From 8a2ceda67659f52a83212ca15aff7b057e63d50c Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 10:48:37 +0800 Subject: [PATCH 01/51] Add externel google's dependencies --- cmake/external/gflags.cmake | 39 ++++++++++++++++++++++++++ cmake/external/glog.cmake | 40 +++++++++++++++++++++++++++ cmake/external/gtest.cmake | 48 ++++++++++++++++++++++++++++++++ cmake/external/protobuf.cmake | 52 +++++++++++++++++++++++++++++++++++ 4 files changed, 179 insertions(+) create mode 100644 cmake/external/gflags.cmake create mode 100644 cmake/external/glog.cmake create mode 100644 cmake/external/gtest.cmake create mode 100644 cmake/external/protobuf.cmake diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake new file mode 100644 index 0000000000..128d50cec8 --- /dev/null +++ b/cmake/external/gflags.cmake @@ -0,0 +1,39 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(GFLAGS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gflags) +SET(GFLAGS_INSTALL_DIR ${PROJECT_BINARY_DIR}/gflags) + +ExternalProject_Add( + gflags + GIT_REPOSITORY "https://github.com/gflags/gflags.git" + PREFIX ${GFLAGS_SOURCES_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR} + CMAKE_ARGS -DBUILD_TESTING=OFF + LOG_DOWNLOAD =ON + UPDATE_COMMAND "" +) + +SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE) +INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR}) + +IF(WIN32) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) +ELSE(WIN32) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) +ENDIF(WIN32) + +LIST(APPEND external_project_dependencies gflags) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake new file mode 100644 index 0000000000..8a4b9d5996 --- /dev/null +++ b/cmake/external/glog.cmake @@ -0,0 +1,40 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(GLOG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/glog) +SET(GLOG_INSTALL_DIR ${PROJECT_BINARY_DIR}/glog) + +ExternalProject_Add( + glog + GIT_REPOSITORY "https://github.com/google/glog.git" + PREFIX ${GLOG_SOURCES_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR} + CMAKE_ARGS -DWITH_GFLAGS=OFF + CMAKE_ARGS -DBUILD_TESTING=OFF + LOG_DOWNLOAD =ON + UPDATE_COMMAND "" +) + +SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE) +INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR}) + +IF(WIN32) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) +ELSE(WIN32) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) +ENDIF(WIN32) + +LIST(APPEND external_project_dependencies glog) diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake new file mode 100644 index 0000000000..533104422a --- /dev/null +++ b/cmake/external/gtest.cmake @@ -0,0 +1,48 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(GTEST_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gtest) +SET(GTEST_INSTALL_DIR ${PROJECT_BINARY_DIR}/gtest) + +ExternalProject_Add( + gtest + GIT_REPOSITORY "https://github.com/google/googletest.git" + GIT_TAG "release-1.8.0" + PREFIX ${GTEST_SOURCES_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} + CMAKE_ARGS -DBUILD_GMOCK=ON + CMAKE_ARGS -Dgtest_disable_pthreads=ON + CMAKE_ARGS -Dgtest_force_shared_crt=ON + LOG_DOWNLOAD =ON + UPDATE_COMMAND "" +) + +SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE) +INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) + +IF(WIN32) + set(GTEST_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/gtest.lib" + "${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest libraries." FORCE) +ELSE(WIN32) + set(GTEST_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/libgtest.a" + "${GTEST_INSTALL_DIR}/lib/libgtest_main.a" CACHE FILEPATH "gtest libraries." FORCE) +ENDIF(WIN32) + +ENABLE_TESTING() + +LIST(APPEND external_project_dependencies gtest) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake new file mode 100644 index 0000000000..8acc6325b9 --- /dev/null +++ b/cmake/external/protobuf.cmake @@ -0,0 +1,52 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(PROTOBUF_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) +SET(PROTOBUF_INSTALL_DIR ${PROJECT_BINARY_DIR}/protobuf) + +ExternalProject_Add( + protobuf + PREFIX ${PROTOBUF_SOURCES_DIR} + DEPENDS zlib + GIT_REPOSITORY "https://github.com/google/protobuf.git" + GIT_TAG "v3.0.0" + CONFIGURE_COMMAND + ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake + -Dprotobuf_BUILD_TESTS=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} + UPDATE_COMMAND "" +) + +SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) +INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) + +IF(WIN32) + SET(PROTOBUF_LIBRARIES + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protobuf libraries." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) +ELSE(WIN32) + SET(PROTOBUF_LIBRARIES + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.a" + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.a" + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.a" CACHE FILEPATH "protobuf libraries." FORCE) + SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) +ENDIF(WIN32) + +LIST(APPEND external_project_dependencies protobuf) From 280994fafac08ac77e22b796aced5535fdc3a009 Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 11:25:28 +0800 Subject: [PATCH 02/51] Add other extern dependencies --- cmake/external/numpy.cmake | 57 ++++++++++++++++++++++++ cmake/external/openblas.cmake | 44 +++++++++++++++++++ cmake/external/python.cmake | 83 +++++++++++++++++++++++++++++++++++ cmake/external/swig.cmake | 75 +++++++++++++++++++++++++++++++ cmake/external/warpctc.cmake | 39 ++++++++++++++++ cmake/external/zlib.cmake | 44 +++++++++++++++++++ 6 files changed, 342 insertions(+) create mode 100644 cmake/external/numpy.cmake create mode 100644 cmake/external/openblas.cmake create mode 100644 cmake/external/python.cmake create mode 100644 cmake/external/swig.cmake create mode 100644 cmake/external/warpctc.cmake create mode 100644 cmake/external/zlib.cmake diff --git a/cmake/external/numpy.cmake b/cmake/external/numpy.cmake new file mode 100644 index 0000000000..de3e6492cd --- /dev/null +++ b/cmake/external/numpy.cmake @@ -0,0 +1,57 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(NUMPY_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/numpy) +SET(NUMPY_INSTALL_DIR ${PROJECT_BINARY_DIR}/numpy) +set(NUMPY_VERSION "v1.11.3") + +# setuptools +ExternalProject_Add(setuptools + PREFIX ${PYTHON_SOURCES_DIR}/setuptools + URL http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz + URL_MD5 7df2a529a074f613b509fb44feefe74e + BUILD_IN_SOURCE 1 + UPDATE_COMMAND "" + PATCH_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python zlib +) + +ExternalProject_Add(cython + PREFIX ${PYTHON_SOURCES_DIR}/cython + GIT_REPOSITORY https://github.com/cython/cython.git + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + PATCH_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python +) + +ExternalProject_Add(numpy + GIT_REPOSITORY https://github.com/numpy/numpy.git + GIT_TAG ${NUMPY_VERSION} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + PREFIX ${NUMPY_SOURCES_DIR} + BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools cython +) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake new file mode 100644 index 0000000000..d1220036a0 --- /dev/null +++ b/cmake/external/openblas.cmake @@ -0,0 +1,44 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# INCLUDE(cblas) + +INCLUDE(ExternalProject) + +SET(CBLAS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/openblas) +SET(CBLAS_INSTALL_DIR ${PROJECT_BINARY_DIR}/openblas) + +ExternalProject_Add( + openblas + GIT_REPOSITORY "https://github.com/xianyi/OpenBLAS.git" + GIT_TAG v0.2.19 + PREFIX ${CBLAS_SOURCES_DIR} + INSTALL_DIR ${CBLAS_INSTALL_DIR} + BUILD_IN_SOURCE 1 + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND cd ${CBLAS_SOURCES_DIR}/src/openblas && make -j4 + INSTALL_COMMAND cd ${CBLAS_SOURCES_DIR}/src/openblas && make install PREFIX= +) + +SET(CBLAS_INCLUDE_DIR "${CBLAS_INSTALL_DIR}/include" CACHE PATH "openblas include directory." FORCE) +INCLUDE_DIRECTORIES(${CBLAS_INCLUDE_DIR}) + +IF(WIN32) + set(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/openblas.lib" CACHE FILEPATH "openblas library." FORCE) +ELSE(WIN32) + set(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/libopenblas.a" CACHE FILEPATH "openblas library" FORCE) +ENDIF(WIN32) + +LIST(APPEND external_project_dependencies openblas) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake new file mode 100644 index 0000000000..b459913314 --- /dev/null +++ b/cmake/external/python.cmake @@ -0,0 +1,83 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) +SET(PYTHON_INSTALL_DIR ${PROJECT_BINARY_DIR}/python) + +if(MSVC) + list(APPEND EXTERNAL_PROJECT_OPTIONAL_ARGS + PATCH_COMMAND ${CMAKE_COMMAND} + -DPYTHON_SRC_DIR:PATH=${_python_SOURCE_DIR} + -P ${CMAKE_CURRENT_LIST_DIR}/PythonPatch.cmake + ) +endif() + +if(APPLE) + list(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS + -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON + ) +endif() + +set(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) + +# Force Python build to "Release". +if(CMAKE_CONFIGURATION_TYPES) + set(SAVED_CMAKE_CFG_INTDIR ${CMAKE_CFG_INTDIR}) + set(CMAKE_CFG_INTDIR "Release") +else() + list(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS + -DCMAKE_BUILD_TYPE:STRING=Release) +endif() + +ExternalProject_Add(python + GIT_REPOSITORY "https://github.com/python-cmake-buildsystem/python-cmake-buildsystem.git" + GIT_TAG "ed5f9bcee540e47f82fa17f8360b820591aa6d66" + PREFIX ${PYTHON_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_CACHE_ARGS + -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} + -DBUILD_SHARED:BOOL=OFF + -DBUILD_STATIC:BOOL=ON + -DUSE_SYSTEM_LIBRARIES:BOOL=OFF + -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} + -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} + -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} + -DDOWNLOAD_SOURCES:BOOL=ON + -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} + DEPENDS zlib +) + +set(_python_DIR ${PYTHON_INSTALL_DIR}) + +if(UNIX) + set(_python_IMPORT_SUFFIX so) + if(APPLE) + set(_python_IMPORT_SUFFIX dylib) + endif() + set(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) + set(PYTHON_LIBRARY "${PYTHON_INSTALL_DIR}/lib/libpython2.7.${_python_IMPORT_SUFFIX}" CACHE FILEPATH "Python library" FORCE) + set(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) + set(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) +elseif(WIN32) + set(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) + set(PYTHON_LIBRARY "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) + set(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) + set(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) +else() + message(FATAL_ERROR "Unknown system !") +endif() diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake new file mode 100644 index 0000000000..9dc112b98e --- /dev/null +++ b/cmake/external/swig.cmake @@ -0,0 +1,75 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Look for system swig +# FIND_PACKAGE(SWIG) + +#IF(NOT ${SWIG_FOUND}) + # build swig as an external project + INCLUDE(ExternalProject) + SET(SWIG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/swig) + SET(SWIG_INSTALL_DIR ${PROJECT_BINARY_DIR}/swig) + SET(SWIG_TARGET_VERSION "3.0.2") + SET(SWIG_DOWNLOAD_SRC_MD5 "62f9b0d010cef36a13a010dc530d0d41") + SET(SWIG_DOWNLOAD_WIN_MD5 "3f18de4fc09ab9abb0d3be37c11fbc8f") + + IF(WIN32) + # swig.exe available as pre-built binary on Windows: + ExternalProject_Add(swig + URL http://prdownloads.sourceforge.net/swig/swigwin-${SWIG_TARGET_VERSION}.zip + URL_MD5 ${SWIG_DOWNLOAD_WIN_MD5} + SOURCE_DIR ${SWIG_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + ) + SET(SWIG_DIR ${SWIG_SOURCES_DIR} CACHE FILEPATH "SWIG Directory" FORCE) + SET(SWIG_EXECUTABLE ${SWIG_SOURCES_DIR}/swig.exe CACHE FILEPATH "SWIG Executable" FORCE) + + ELSE(WIN32) + # From PCRE configure + ExternalProject_Add(pcre + GIT_REPOSITORY https://github.com/svn2github/pcre.git + PREFIX ${SWIG_SOURCES_DIR}/pcre + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SWIG_INSTALL_DIR}/pcre + ) + + # swig uses bison find it by cmake and pass it down + FIND_PACKAGE(BISON) + + # From SWIG configure + ExternalProject_Add(swig + URL https://github.com/swig/swig/archive/rel-3.0.10.tar.gz + PREFIX ${SWIG_SOURCES_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && ./autogen.sh + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && + env "PCRE_LIBS=${SWIG_INSTALL_DIR}/pcre/lib/libpcre.a \ + ${SWIG_INSTALL_DIR}/pcre/lib/libpcrecpp.a \ + ${SWIG_INSTALL_DIR}/pcre/lib/libpcreposix.a" + ./configure + --prefix=${SWIG_INSTALL_DIR} + --with-pcre-prefix=${SWIG_INSTALL_DIR}/pcre + --with-python=${PYTHON_EXECUTABLE} + BUILD_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make + INSTALL_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make install + DEPENDS pcre python + ) + + set(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION} CACHE FILEPATH "SWIG Directory" FORCE) + set(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig CACHE FILEPATH "SWIG Executable" FORCE) + ENDIF(WIN32) +#ENDIF() + diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake new file mode 100644 index 0000000000..57864aca69 --- /dev/null +++ b/cmake/external/warpctc.cmake @@ -0,0 +1,39 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(WARPCTC_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/warpctc) +SET(WARPCTC_INSTALL_DIR ${PROJECT_BINARY_DIR}/warpctc) + +ExternalProject_Add( + warpctc + GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" + PREFIX ${WARPCTC_SOURCES_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} + CMAKE_ARGS -DWITH_GPU=${CUDA_FOUND} +) + +SET(WARPCTC_INCLUDE_DIR "${WARP_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) +INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) + +IF(WIN32) + set(WARPCTC_LIBRARIES + "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) +ELSE(WIN32) + set(WARPCTC_LIBRARIES + "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.so" CACHE FILEPATH "Warp-ctc Library" FORCE) +ENDIF(WIN32) + +LIST(APPEND external_project_dependencies warpctc) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake new file mode 100644 index 0000000000..ec44467aa7 --- /dev/null +++ b/cmake/external/zlib.cmake @@ -0,0 +1,44 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(ZLIB_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) +SET(ZLIB_INSTALL_DIR ${PROJECT_BINARY_DIR}/zlib) + +ExternalProject_Add( + zlib + GIT_REPOSITORY "https://github.com/madler/zlib.git" + GIT_TAG "v1.2.8" + PREFIX ${ZLIB_SOURCES_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR} + CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON + CMAKE_ARGS -DCMAKE_MACOSX_RPATH=ON + LOG_DOWNLOAD =ON + UPDATE_COMMAND "" +) + +SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE PATH "zlib root directory." FORCE) + +SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) +INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) + +IF(WIN32) + SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/zlibstatic.lib" CACHE FILEPATH "zlib library." FORCE) +ELSE(WIN32) + set(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) +ENDIF(WIN32) + +LIST(APPEND external_project_dependencies zlib) From 6cd4b6e041c09a4ba12a5bfd236da2891364dcb4 Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 17:11:26 +0800 Subject: [PATCH 03/51] Update external libs --- cmake/external/numpy.cmake | 104 ++++++++++++++++----------- cmake/external/openblas.cmake | 2 - cmake/external/python.cmake | 131 ++++++++++++++++++---------------- cmake/external/swig.cmake | 24 +++++-- cmake/external/warpctc.cmake | 11 ++- 5 files changed, 159 insertions(+), 113 deletions(-) diff --git a/cmake/external/numpy.cmake b/cmake/external/numpy.cmake index de3e6492cd..607ff31789 100644 --- a/cmake/external/numpy.cmake +++ b/cmake/external/numpy.cmake @@ -12,46 +12,64 @@ # See the License for the specific language governing permissions and # limitations under the License. -INCLUDE(ExternalProject) - -SET(NUMPY_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/numpy) -SET(NUMPY_INSTALL_DIR ${PROJECT_BINARY_DIR}/numpy) -set(NUMPY_VERSION "v1.11.3") - -# setuptools -ExternalProject_Add(setuptools - PREFIX ${PYTHON_SOURCES_DIR}/setuptools - URL http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz - URL_MD5 7df2a529a074f613b509fb44feefe74e - BUILD_IN_SOURCE 1 - UPDATE_COMMAND "" - PATCH_COMMAND "" - CONFIGURE_COMMAND "" - INSTALL_COMMAND "" - BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python zlib -) - -ExternalProject_Add(cython - PREFIX ${PYTHON_SOURCES_DIR}/cython - GIT_REPOSITORY https://github.com/cython/cython.git - BUILD_IN_SOURCE 1 - CONFIGURE_COMMAND "" - UPDATE_COMMAND "" - PATCH_COMMAND "" - INSTALL_COMMAND "" - BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python -) - -ExternalProject_Add(numpy - GIT_REPOSITORY https://github.com/numpy/numpy.git - GIT_TAG ${NUMPY_VERSION} - CONFIGURE_COMMAND "" - UPDATE_COMMAND "" - PREFIX ${NUMPY_SOURCES_DIR} - BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py build - INSTALL_COMMAND ${PYTHON_EXECUTABLE} setup.py install - BUILD_IN_SOURCE 1 - DEPENDS python setuptools cython -) +FIND_PACKAGE(NumPy) + +IF(NOT ${NUMPY_FOUND}) + + INCLUDE(ExternalProject) + + SET(NUMPY_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/numpy) + SET(NUMPY_INSTALL_DIR ${PROJECT_BINARY_DIR}/numpy) + set(NUMPY_VERSION "v1.11.3") + + ExternalProject_Add(setuptools + PREFIX ${PYTHON_SOURCES_DIR}/setuptools + URL http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz + URL_MD5 7df2a529a074f613b509fb44feefe74e + BUILD_IN_SOURCE 1 + UPDATE_COMMAND "" + PATCH_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python zlib + ) + + ExternalProject_Add(cython + PREFIX ${PYTHON_SOURCES_DIR}/cython + GIT_REPOSITORY https://github.com/cython/cython.git + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + PATCH_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python + ) + + ExternalProject_Add(numpy + GIT_REPOSITORY https://github.com/numpy/numpy.git + GIT_TAG ${NUMPY_VERSION} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + PREFIX ${NUMPY_SOURCES_DIR} + BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools cython + ) + + # find numpy include directory + FILE(WRITE ${PROJECT_BINARY_DIR}/FindNumpyPath.py + "try: import numpy; print(numpy.get_include())\nexcept:pass\n") + + EXEC_PROGRAM("${PYTHON_EXECUTABLE}" ${PROJECT_BINARY_DIR} + ARGS "FindNumpyPath.py" + OUTPUT_VARIABLE NUMPY_PATH) + + FIND_PATH(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h + HINTS "${NUMPY_PATH}" "${PYTHON_INCLUDE_PATH}") + + INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) + +ENDIF() diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index d1220036a0..2683153b49 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# INCLUDE(cblas) - INCLUDE(ExternalProject) SET(CBLAS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/openblas) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index b459913314..2354f555db 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -12,72 +12,81 @@ # See the License for the specific language governing permissions and # limitations under the License. -INCLUDE(ExternalProject) +FIND_PACKAGE(PythonLibs 2.7) +FIND_PACKAGE(PythonInterp 2.7) -SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) -SET(PYTHON_INSTALL_DIR ${PROJECT_BINARY_DIR}/python) +IF((NOT ${PYTHONINTERP_FOUND}) OR (NOT ${PYTHONLIBS_FOUND})) -if(MSVC) - list(APPEND EXTERNAL_PROJECT_OPTIONAL_ARGS - PATCH_COMMAND ${CMAKE_COMMAND} - -DPYTHON_SRC_DIR:PATH=${_python_SOURCE_DIR} - -P ${CMAKE_CURRENT_LIST_DIR}/PythonPatch.cmake - ) -endif() + INCLUDE(ExternalProject) -if(APPLE) - list(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS - -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON - ) -endif() + SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) + SET(PYTHON_INSTALL_DIR ${PROJECT_BINARY_DIR}/python) -set(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) + IF(MSVC) + LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_ARGS + PATCH_COMMAND ${CMAKE_COMMAND} + -DPYTHON_SRC_DIR:PATH=${_python_SOURCE_DIR} + -P ${CMAKE_CURRENT_LIST_DIR}/PythonPatch.cmake + ) + ENDIF() -# Force Python build to "Release". -if(CMAKE_CONFIGURATION_TYPES) - set(SAVED_CMAKE_CFG_INTDIR ${CMAKE_CFG_INTDIR}) - set(CMAKE_CFG_INTDIR "Release") -else() - list(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS - -DCMAKE_BUILD_TYPE:STRING=Release) -endif() + IF(APPLE) + LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS + -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON + ) + ENDIF() -ExternalProject_Add(python - GIT_REPOSITORY "https://github.com/python-cmake-buildsystem/python-cmake-buildsystem.git" - GIT_TAG "ed5f9bcee540e47f82fa17f8360b820591aa6d66" - PREFIX ${PYTHON_SOURCES_DIR} - UPDATE_COMMAND "" - CMAKE_CACHE_ARGS - -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} - -DBUILD_SHARED:BOOL=OFF - -DBUILD_STATIC:BOOL=ON - -DUSE_SYSTEM_LIBRARIES:BOOL=OFF - -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} - -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} - -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} - -DDOWNLOAD_SOURCES:BOOL=ON - -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF - ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} - ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} - DEPENDS zlib -) + SET(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) -set(_python_DIR ${PYTHON_INSTALL_DIR}) + # Force Python build to "Release". + IF(CMAKE_CONFIGURATION_TYPES) + SET(SAVED_CMAKE_CFG_INTDIR ${CMAKE_CFG_INTDIR}) + SET(CMAKE_CFG_INTDIR "Release") + ELSE() + LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS + -DCMAKE_BUILD_TYPE:STRING=Release) + ENDIF() -if(UNIX) - set(_python_IMPORT_SUFFIX so) - if(APPLE) - set(_python_IMPORT_SUFFIX dylib) - endif() - set(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) - set(PYTHON_LIBRARY "${PYTHON_INSTALL_DIR}/lib/libpython2.7.${_python_IMPORT_SUFFIX}" CACHE FILEPATH "Python library" FORCE) - set(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) - set(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) -elseif(WIN32) - set(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) - set(PYTHON_LIBRARY "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) - set(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) - set(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) -else() - message(FATAL_ERROR "Unknown system !") -endif() + ExternalProject_Add(python + GIT_REPOSITORY "https://github.com/python-cmake-buildsystem/python-cmake-buildsystem.git" + GIT_TAG "ed5f9bcee540e47f82fa17f8360b820591aa6d66" + PREFIX ${PYTHON_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_CACHE_ARGS + -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} + -DBUILD_SHARED:BOOL=OFF + -DBUILD_STATIC:BOOL=ON + -DUSE_SYSTEM_LIBRARIES:BOOL=OFF + -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} + -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} + -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} + -DDOWNLOAD_SOURCES:BOOL=ON + -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} + DEPENDS zlib + ) + + SET(_python_DIR ${PYTHON_INSTALL_DIR}) + + IF(UNIX) + SET(_python_IMPORT_SUFFIX a) + IF(APPLE) + SET(_python_IMPORT_SUFFIX lib) + ENDIF() + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/lib/libpython2.7.${_python_IMPORT_SUFFIX}" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) + ELSEIF(WIN32) + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) + ELSE() + MESSAGE(FATAL_ERROR "Unknown system !") + ENDIF() + +INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) + +ENDIF() diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake index 9dc112b98e..1ec61660bc 100644 --- a/cmake/external/swig.cmake +++ b/cmake/external/swig.cmake @@ -13,9 +13,9 @@ # limitations under the License. # Look for system swig -# FIND_PACKAGE(SWIG) +FIND_PACKAGE(SWIG) -#IF(NOT ${SWIG_FOUND}) +IF(NOT ${SWIG_FOUND}) # build swig as an external project INCLUDE(ExternalProject) SET(SWIG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/swig) @@ -62,14 +62,28 @@ ./configure --prefix=${SWIG_INSTALL_DIR} --with-pcre-prefix=${SWIG_INSTALL_DIR}/pcre - --with-python=${PYTHON_EXECUTABLE} BUILD_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make INSTALL_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make install - DEPENDS pcre python + DEPENDS pcre ) set(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION} CACHE FILEPATH "SWIG Directory" FORCE) set(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig CACHE FILEPATH "SWIG Executable" FORCE) ENDIF(WIN32) -#ENDIF() +ENDIF() +FUNCTION(generate_python_api target_name) + ADD_CUSTOM_COMMAND(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + ${PROJ_ROOT}/paddle/Paddle_wrap.cxx + ${PROJ_ROOT}/paddle/Paddle_wrap.h + COMMAND ${SWIG_EXECUTABLE} -python -c++ -outcurrentdir -I../ api/Paddle.swig + && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig + ${PROJ_ROOT}/paddle/api/PaddleAPI.h + WORKING_DIRECTORY ${PROJ_ROOT}/paddle + COMMENT "Generate Python API from swig") + ADD_CUSTOM_TARGET(${target_name} ALL DEPENDS + ${PROJ_ROOT}/paddle/Paddle_wrap.cxx + ${PROJ_ROOT}/paddle/Paddle_wrap.h + ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py) +ENDFUNCTION(generate_python_api) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 57864aca69..4fdd47acdb 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -17,22 +17,29 @@ INCLUDE(ExternalProject) SET(WARPCTC_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/warpctc) SET(WARPCTC_INSTALL_DIR ${PROJECT_BINARY_DIR}/warpctc) +IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + SET(USE_OMP ON) +ELSE() + SET(USE_OMP OFF) +ENDIF() + ExternalProject_Add( warpctc GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" PREFIX ${WARPCTC_SOURCES_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} CMAKE_ARGS -DWITH_GPU=${CUDA_FOUND} + CMAKE_ARGS -DWITH_OMP=${USE_OMP} ) SET(WARPCTC_INCLUDE_DIR "${WARP_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) IF(WIN32) - set(WARPCTC_LIBRARIES + SET(WARPCTC_LIBRARIES "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) ELSE(WIN32) - set(WARPCTC_LIBRARIES + SET(WARPCTC_LIBRARIES "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.so" CACHE FILEPATH "Warp-ctc Library" FORCE) ENDIF(WIN32) From 662f174b856353105b5291be8f3a855ed54f5f6d Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 17:12:39 +0800 Subject: [PATCH 04/51] Refine cmake file names --- .gitignore | 1 + cmake/{FindPythonModule.cmake => python_module.cmake} | 0 cmake/{FindAVX.cmake => simd.cmake} | 0 3 files changed, 1 insertion(+) rename cmake/{FindPythonModule.cmake => python_module.cmake} (100%) rename cmake/{FindAVX.cmake => simd.cmake} (100%) diff --git a/.gitignore b/.gitignore index 1c9730a5ad..0a15b996e2 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ build/ .pydevproject Makefile .test_env/ +third_party/ *~ bazel-* diff --git a/cmake/FindPythonModule.cmake b/cmake/python_module.cmake similarity index 100% rename from cmake/FindPythonModule.cmake rename to cmake/python_module.cmake diff --git a/cmake/FindAVX.cmake b/cmake/simd.cmake similarity index 100% rename from cmake/FindAVX.cmake rename to cmake/simd.cmake From 62b55cc6ab9c21c9a09b3ad0c9fd94e5e8a3bfd6 Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 17:13:21 +0800 Subject: [PATCH 05/51] Remove paddle internals --- python/CMakeLists.txt | 5 ----- python/setup.py.in | 5 ----- 2 files changed, 10 deletions(-) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index dce0b90952..6b80e4d58e 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -10,11 +10,6 @@ set(PY_FILES paddle/__init__.py ${HELPERS_PY_FILES} ${UTILS_PY_FILES}) -set(PADDLE_INTERNAL_PACKAGE "") -if (PADDLE_WITH_INTERNAL) - set(PADDLE_INTERNAL_PACKAGE "paddle.internals") -endif() - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) diff --git a/python/setup.py.in b/python/setup.py.in index d2fb95f27f..b66a42e87c 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,16 +1,11 @@ from setuptools import setup -INTERNAL_PACKAGE='${PADDLE_INTERNAL_PACKAGE}' - packages=['paddle', 'paddle.proto', 'paddle.trainer', 'paddle.trainer_config_helpers', 'paddle.utils'] -if len(INTERNAL_PACKAGE) != 0: - packages.append(INTERNAL_PACKAGE) - setup(name='paddle', version='${PADDLE_VERSION}', description='Parallel Distributed Deep Learning', From a02ec8c9323558d035fc2ca78d5076decca52c6f Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 17:13:39 +0800 Subject: [PATCH 06/51] Refine CMakeLists --- CMakeLists.txt | 180 ++++++++++--------------------------- cmake/check_packages.cmake | 31 +------ cmake/definitions.cmake | 62 +++++++++++++ cmake/rdma.cmake | 132 ++++++++++++++------------- cmake/swig.cmake | 15 ---- cmake/util.cmake | 34 +------ cmake/version.cmake | 1 + 7 files changed, 184 insertions(+), 271 deletions(-) create mode 100644 cmake/definitions.cmake delete mode 100644 cmake/swig.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 65fbbb481c..7db4c9f1d1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,163 +4,75 @@ project(paddle CXX C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set(PROJ_ROOT ${CMAKE_SOURCE_DIR}) -include(package) -find_package(SWIG 2.0) -find_package(CUDA QUIET) -find_package(Protobuf REQUIRED) - -# Check protobuf library version. -execute_process(COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --version - OUTPUT_VARIABLE PROTOBUF_VERSION) -string(REPLACE "libprotoc " "" PROTOBUF_VERSION ${PROTOBUF_VERSION}) -set(PROTOBUF_3 OFF) -if (${PROTOBUF_VERSION} VERSION_GREATER "3.0.0" OR ${PROTOBUF_VERSION} VERSION_EQUAL "3.0.0") - set(PROTOBUF_3 ON) -endif() - -find_package(PythonLibs 2.7 REQUIRED) -find_package(PythonInterp 2.7 REQUIRED) -find_package(ZLIB REQUIRED) -find_package(NumPy REQUIRED) -find_package(Threads REQUIRED) -find_package(AVX QUIET) -find_package(Glog REQUIRED) -find_package(Gflags REQUIRED) -find_package(GTest) find_package(Sphinx) find_package(Doxygen) -include(cblas) -find_program(M4_EXECUTABLE m4) -###################### Configurations ########################### +find_package(CUDA QUIET) +find_package(Git REQUIRED) +find_package(Threads REQUIRED) + +include(simd) + +###################### Configurations ############################ option(WITH_DSO "Compile PaddlePaddle with dynamic linked libraries" ON) option(WITH_GPU "Compile PaddlePaddle with gpu" ${CUDA_FOUND}) option(WITH_DOUBLE "Compile PaddlePaddle with double precision, otherwise use single precision" OFF) option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND}) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) -option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND}) +option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ON) option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF) option(WITH_TIMER "Compile PaddlePaddle use timer" OFF) option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF) -option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND}) +option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ON) option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) -option(WITH_SWIG_PY "Compile PaddlePaddle with py PaddlePaddle prediction api" ${SWIG_FOUND}) +option(WITH_SWIG_PY "Compile PaddlePaddle with py PaddlePaddle prediction api" ON) option(ON_TRAVIS "Running test on travis-ci or not." OFF) option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF) option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON) +include(external/zlib) # download, build, install zlib +include(external/gflags) # download, build, install gflags +include(external/glog) # download, build, install glog +include(external/gtest) # download, build, install gtest +include(external/protobuf) # download, build, install protobuf +include(external/openblas) # download, build, install openblas +include(external/python) # download, build, install python +include(external/numpy) # download, build, install numpy +include(external/swig) # download, build, install swig +include(external/warpctc) # download, build, install warpctc + +include(package) # set paddle packages +include(cpplint) # set paddle c++ style +include(ccache) # set ccache for compilation +include(util) # set unittest and link libs +include(rdma) # set rdma libraries +include(flags) # set paddle compile flags +include(cudnn) # set cudnn libraries +include(version) # set PADDLE_VERSION +include(coveralls) # set code coverage +include(python_module) # set python module + +include(check_packages) # check configuration +include(definitions) # add paddle definitions -include(cpplint) -include(ccache) -if(WITH_RDMA) - include(rdma) -endif() -include(util) -include(flags) -include(cudnn) -include(FindPythonModule) -include(check_packages) -include(swig) -include(coveralls) - -# Set PaddlePaddle version to Git tag name or Git commit ID. -find_package(Git REQUIRED) -# version.cmake will get the current PADDLE_VERSION -include(version) -add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION}) - -if(NOT WITH_GPU) - add_definitions(-DPADDLE_ONLY_CPU) - add_definitions(-DHPPL_STUB_FUNC) - - list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) -else() - if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) - message(FATAL_ERROR "Paddle need CUDA >= 7.0 to compile") - endif() - - if(NOT CUDNN_FOUND) - message(FATAL_ERROR "Paddle need cudnn to compile") - endif() - - if(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") - else(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") - endif(WITH_AVX) - - # Include cuda and cudnn - include_directories(${CUDNN_INCLUDE_DIR}) - include_directories(${CUDA_TOOLKIT_INCLUDE}) -endif(NOT WITH_GPU) - -if(WITH_DSO) - add_definitions(-DPADDLE_USE_DSO) -endif(WITH_DSO) - -if(WITH_DOUBLE) - add_definitions(-DPADDLE_TYPE_DOUBLE) - set(ACCURACY double) -else(WITH_DOUBLE) - set(ACCURACY float) -endif(WITH_DOUBLE) - -if(NOT WITH_TIMER) - add_definitions(-DPADDLE_DISABLE_TIMER) -endif(NOT WITH_TIMER) - -if(NOT WITH_PROFILER) - add_definitions(-DPADDLE_DISABLE_PROFILER) -endif(NOT WITH_PROFILER) - -if(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") -else(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") -endif(WITH_AVX) - -if(WITH_PYTHON) - include_directories(${PYTHON_INCLUDE_DIR}) - include_directories(${PYTHON_NUMPY_INCLUDE_DIR}) -else(WITH_PYTHON) - add_definitions(-DPADDLE_NO_PYTHON) -endif(WITH_PYTHON) - -if(WITH_RDMA) - include_directories("${RDMA_INC_DIR}") -else(WITH_RDMA) - add_definitions(-DPADDLE_DISABLE_RDMA) -endif(WITH_RDMA) - -# glog -include_directories(${LIBGLOG_INCLUDE_DIR}) - -#gflags -add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE}) -include_directories(${GFLAGS_INCLUDE_DIRS}) - -if(WITH_TESTING) - enable_testing() - include_directories(${GTEST_INCLUDE_DIRS}) -endif() - -include_directories("${CBLAS_INC_DIR}") include_directories("${PROJ_ROOT}") include_directories("${PROJ_ROOT}/paddle/cuda/include") -include_directories(${PROTOBUF_INCLUDE_DIRS}) include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto") -if(EXISTS "${PROJ_ROOT}/paddle/internals/CMakeLists.txt") - set(PADDLE_WITH_INTERNAL ON) - include(paddle/internals/CMakeLists.txt) -else() - set(PADDLE_WITH_INTERNAL OFF) - set(INTERNAL_PROTO_PATH "") -endif() + +set(EXTERNAL_LIBS + # have not include gtest here. + ${GFLAGS_LIBRARIES} + ${GLOG_LIBRARIES} + ${CBLAS_LIBRARIES} + ${PROTOBUF_LIBRARIES} + ${WARPCTC_LIBRARIES} + ${ZLIB_LIBRARIES} +) + add_subdirectory(proto) add_subdirectory(paddle) add_subdirectory(python) + if(WITH_DOC) add_subdirectory(doc) endif() diff --git a/cmake/check_packages.cmake b/cmake/check_packages.cmake index afb84c6ff5..8f0ed26256 100644 --- a/cmake/check_packages.cmake +++ b/cmake/check_packages.cmake @@ -2,38 +2,13 @@ if(WITH_GPU) find_package(CUDA REQUIRED) # CUDA is required when use gpu -endif() - -if(WITH_PYTHON) - find_package(PythonLibs 2.6 REQUIRED) - find_package(PythonInterp REQUIRED) - find_package(NumPy REQUIRED) -endif() - -if(WITH_STYLE_CHECK) - find_package(PythonInterp REQUIRED) -endif() - -find_package(Glog REQUIRED) - -find_package(Gflags REQUIRED) - -if(WITH_TESTING) - find_package(GTest REQUIRED) -endif() +endif(WITH_GPU) if(WITH_DOC) find_package(Sphinx REQUIRED) find_python_module(recommonmark REQUIRED) -endif() +endif(WITH_DOC) if(WITH_SWIG_PY) - if(NOT SWIG_FOUND) - message(FATAL_ERROR "SWIG is not found. Please install swig or disable WITH_SWIG_PY") - endif() find_python_module(wheel REQUIRED) # package wheel -endif() - -if(NOT M4_EXECUTABLE) - message(FATAL_ERROR "Paddle need m4 to generate proto file.") -endif() +endif(WITH_SWIG_PY) diff --git a/cmake/definitions.cmake b/cmake/definitions.cmake new file mode 100644 index 0000000000..99a52ad764 --- /dev/null +++ b/cmake/definitions.cmake @@ -0,0 +1,62 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(WITH_DSO) + add_definitions(-DPADDLE_USE_DSO) +endif(WITH_DSO) + +if(WITH_DOUBLE) + add_definitions(-DPADDLE_TYPE_DOUBLE) +endif(WITH_DOUBLE) + +if(NOT WITH_TIMER) + add_definitions(-DPADDLE_DISABLE_TIMER) +endif(NOT WITH_TIMER) + +if(NOT WITH_PROFILER) + add_definitions(-DPADDLE_DISABLE_PROFILER) +endif(NOT WITH_PROFILER) + +if(NOT WITH_GPU) + add_definitions(-DPADDLE_ONLY_CPU) + add_definitions(-DHPPL_STUB_FUNC) + + list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) +else() + if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) + message(FATAL_ERROR "Paddle need CUDA >= 7.0 to compile") + endif() + + if(NOT CUDNN_FOUND) + message(FATAL_ERROR "Paddle need cudnn to compile") + endif() + + if(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") + else(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") + endif(WITH_AVX) + + # Include cuda and cudnn + include_directories(${CUDNN_INCLUDE_DIR}) + include_directories(${CUDA_TOOLKIT_INCLUDE}) +endif(NOT WITH_GPU) + +if(WITH_AVX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") +else(WITH_AVX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") +endif(WITH_AVX) diff --git a/cmake/rdma.cmake b/cmake/rdma.cmake index e9a4da79aa..9ff1a77cac 100644 --- a/cmake/rdma.cmake +++ b/cmake/rdma.cmake @@ -5,72 +5,76 @@ # svn co https://svn.baidu.com/sys/ip/trunk/rdma/thirdparty rdma/ # we use static output in svn repositories to avoid implict bugs from not standard runtime env. -set(RDMA_ROOT $ENV{RDMA_ROOT} CACHE PATH "Folder contains RDMA sock library and thirdparty library") +if(WITH_RDMA) + set(RDMA_ROOT $ENV{RDMA_ROOT} CACHE PATH "Folder contains RDMA sock library and thirdparty library") -function(generate_rdma_links) - #redirect to current DIR to isolate the pollution from system runtime environment - #it can benifits unified control for different gcc environment. - #e.g, by default gcc48 did not refer /usr/lib64 which could contain low version - #runtime libraries that will crash process while loading it. That redirect trick - #can fix it. - execute_process( - COMMAND mkdir -p librdma - COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so.1 - COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so - COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so.1 - COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - ) -endfunction(generate_rdma_links) - - -#check and set headers -find_path(RDMA_INC_SXISOCK sxi_sock.h PATHS ${RDMA_ROOT}/sockrdmav1/output/include) -find_path(RDMA_INC_XIO libxio.h PATHS ${RDMA_ROOT}/thirdparty/output/accelio) -find_path(RDMA_INC_EVENT event2 PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_path(RDMA_INC_NUMA numa.h PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) - -#check and set libs -find_library(RDMA_LIB_SXISOCK NAMES sxisock PATHS ${RDMA_ROOT}/sockrdmav1/output) -find_library(RDMA_LIB_XIO NAMES xio PATHS ${RDMA_ROOT}/thirdparty/output/accelio) -find_library(RDMA_LIB_EVENT NAMES event PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_EVENT_CORE NAMES event_core PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_EVENT_EXTRA NAMES event_extra PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_EVENT_PTHREADS NAMES event_pthreads PATHS ${RDMA_ROOT}/thirdparty/output/libevent) -find_library(RDMA_LIB_NUMA NAMES numa PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) - -if( - RDMA_INC_SXISOCK AND - RDMA_INC_XIO AND - RDMA_INC_EVENT AND - RDMA_INC_NUMA AND - RDMA_LIB_SXISOCK AND - RDMA_LIB_XIO AND - RDMA_LIB_EVENT AND - RDMA_LIB_EVENT_CORE AND - RDMA_LIB_EVENT_EXTRA AND - RDMA_LIB_EVENT_PTHREADS AND - RDMA_LIB_NUMA + function(generate_rdma_links) + #redirect to current DIR to isolate the pollution from system runtime environment + #it can benifits unified control for different gcc environment. + #e.g, by default gcc48 did not refer /usr/lib64 which could contain low version + #runtime libraries that will crash process while loading it. That redirect trick + #can fix it. + execute_process( + COMMAND mkdir -p librdma + COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so.1 + COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so + COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so.1 + COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) + endfunction(generate_rdma_links) - set(RDMA_INC_DIR - ${RDMA_INC_SXISOCK} - ${RDMA_INC_XIO} - ${RDMA_INC_EVENT} - ${RDMA_INC_NUMA}) - set(RDMA_LIBS - ${RDMA_LIB_SXISOCK} - ${RDMA_LIB_XIO} - ${RDMA_LIB_EVENT} - ${RDMA_LIB_EVENT_CORE} - ${RDMA_LIB_EVENT_EXTRA} - ${RDMA_LIB_EVENT_PTHREADS} - ${RDMA_LIB_NUMA} - ) - set(RDMA_LD_FLAGS "-L./librdma -libverbs -lrdmacm -Xlinker -rpath ./librdma") - return() -endif() + #check and set headers + find_path(RDMA_INC_SXISOCK sxi_sock.h PATHS ${RDMA_ROOT}/sockrdmav1/output/include) + find_path(RDMA_INC_XIO libxio.h PATHS ${RDMA_ROOT}/thirdparty/output/accelio) + find_path(RDMA_INC_EVENT event2 PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_path(RDMA_INC_NUMA numa.h PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) + + #check and set libs + find_library(RDMA_LIB_SXISOCK NAMES sxisock PATHS ${RDMA_ROOT}/sockrdmav1/output) + find_library(RDMA_LIB_XIO NAMES xio PATHS ${RDMA_ROOT}/thirdparty/output/accelio) + find_library(RDMA_LIB_EVENT NAMES event PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_EVENT_CORE NAMES event_core PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_EVENT_EXTRA NAMES event_extra PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_EVENT_PTHREADS NAMES event_pthreads PATHS ${RDMA_ROOT}/thirdparty/output/libevent) + find_library(RDMA_LIB_NUMA NAMES numa PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) -#if this module is not called, RDMA_INC_DIR RDMA_LIBS will be null, so top module always refer this variable + if( + RDMA_INC_SXISOCK AND + RDMA_INC_XIO AND + RDMA_INC_EVENT AND + RDMA_INC_NUMA AND + RDMA_LIB_SXISOCK AND + RDMA_LIB_XIO AND + RDMA_LIB_EVENT AND + RDMA_LIB_EVENT_CORE AND + RDMA_LIB_EVENT_EXTRA AND + RDMA_LIB_EVENT_PTHREADS AND + RDMA_LIB_NUMA + ) -message(FATAL_ERROR, "RDMA libraries are not found, try to set RDMA_ROOT or check all related libraries.") + set(RDMA_INC_DIR + ${RDMA_INC_SXISOCK} + ${RDMA_INC_XIO} + ${RDMA_INC_EVENT} + ${RDMA_INC_NUMA}) + set(RDMA_LIBS + ${RDMA_LIB_SXISOCK} + ${RDMA_LIB_XIO} + ${RDMA_LIB_EVENT} + ${RDMA_LIB_EVENT_CORE} + ${RDMA_LIB_EVENT_EXTRA} + ${RDMA_LIB_EVENT_PTHREADS} + ${RDMA_LIB_NUMA} + ) + set(RDMA_LD_FLAGS "-L./librdma -libverbs -lrdmacm -Xlinker -rpath ./librdma") + include_directories("${RDMA_INC_DIR}") + else() + #if this module is not called, RDMA_INC_DIR RDMA_LIBS will be null, so top module always refer this variable + message(FATAL_ERROR, "RDMA libraries are not found, try to set RDMA_ROOT or check all related libraries.") + endif() +else(WITH_RDMA) + set(RDMA_LIBS "") + set(RDMA_LD_FLAGS "") + add_definitions(-DPADDLE_DISABLE_RDMA) +endif(WITH_RDMA) diff --git a/cmake/swig.cmake b/cmake/swig.cmake deleted file mode 100644 index 97e87aa947..0000000000 --- a/cmake/swig.cmake +++ /dev/null @@ -1,15 +0,0 @@ -function(generate_python_api target_name) - add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - COMMAND swig -python -c++ -outcurrentdir -I../ api/Paddle.swig - && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig - ${PROJ_ROOT}/paddle/api/PaddleAPI.h - WORKING_DIRECTORY ${PROJ_ROOT}/paddle - COMMENT "Generate Python API from swig") - add_custom_target(${target_name} ALL DEPENDS - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py) -endfunction(generate_python_api) diff --git a/cmake/util.cmake b/cmake/util.cmake index 8a71b23c62..1b1e630661 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -81,18 +81,6 @@ function(link_paddle_exe TARGET_NAME) set(METRIC_LIBS "") endif() - if(PADDLE_WITH_INTERNAL) - set(INTERAL_LIBS paddle_internal_gserver paddle_internal_parameter) - target_circle_link_libraries(${TARGET_NAME} - ARCHIVE_START - paddle_internal_gserver - paddle_internal_owlqn - ARCHIVE_END - paddle_internal_parameter) - else() - set(INTERAL_LIBS "") - endif() - target_circle_link_libraries(${TARGET_NAME} ARCHIVE_START paddle_gserver @@ -109,20 +97,11 @@ function(link_paddle_exe TARGET_NAME) paddle_cuda paddle_test_main ${METRIC_LIBS} - ${PROTOBUF_LIBRARY} - ${LIBGLOG_LIBRARY} - ${GFLAGS_LIBRARIES} + ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} - ${CBLAS_LIBS} - ${ZLIB_LIBRARIES} - ${INTERAL_LIBS} - ${CMAKE_DL_LIBS}) - - if(WITH_RDMA) - target_link_libraries(${TARGET_NAME} - ${RDMA_LD_FLAGS} - ${RDMA_LIBS}) - endif() + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) if(WITH_PYTHON) target_link_libraries(${TARGET_NAME} @@ -142,11 +121,6 @@ function(link_paddle_exe TARGET_NAME) target_link_libraries(${TARGET_NAME} rt) endif() endif() - - if(NOT WITH_DSO) - target_link_libraries(${TARGET_NAME} - ${WARPCTC_LIBRARY}) - endif() endfunction() # link_paddle_test diff --git a/cmake/version.cmake b/cmake/version.cmake index a0518e07e8..ac1583a24c 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -21,4 +21,5 @@ while ("${PADDLE_VERSION}" STREQUAL "") endif() endwhile() +add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION}) message(STATUS "Paddle version is ${PADDLE_VERSION}") From 46cadaeaa98668ba868f7f9eef406a59b0176f46 Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 23:23:25 +0800 Subject: [PATCH 07/51] Update external dependencies cmake --- cmake/external/numpy.cmake | 5 +++-- cmake/external/protobuf.cmake | 2 +- cmake/external/python.cmake | 8 ++++---- cmake/external/warpctc.cmake | 7 ++++--- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/cmake/external/numpy.cmake b/cmake/external/numpy.cmake index 607ff31789..9d686ecaac 100644 --- a/cmake/external/numpy.cmake +++ b/cmake/external/numpy.cmake @@ -70,6 +70,7 @@ IF(NOT ${NUMPY_FOUND}) FIND_PATH(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h HINTS "${NUMPY_PATH}" "${PYTHON_INCLUDE_PATH}") - INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) - ENDIF() + +INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) + diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 8acc6325b9..0138f082d9 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -22,7 +22,7 @@ ExternalProject_Add( PREFIX ${PROTOBUF_SOURCES_DIR} DEPENDS zlib GIT_REPOSITORY "https://github.com/google/protobuf.git" - GIT_TAG "v3.0.0" +# GIT_TAG "v3.1.0" CONFIGURE_COMMAND ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake -Dprotobuf_BUILD_TESTS=OFF diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 2354f555db..7b66cb44e4 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -19,8 +19,8 @@ IF((NOT ${PYTHONINTERP_FOUND}) OR (NOT ${PYTHONLIBS_FOUND})) INCLUDE(ExternalProject) - SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) - SET(PYTHON_INSTALL_DIR ${PROJECT_BINARY_DIR}/python) + SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/Python) + SET(PYTHON_INSTALL_DIR ${PROJECT_BINARY_DIR}/Python) IF(MSVC) LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_ARGS @@ -87,6 +87,6 @@ IF((NOT ${PYTHONINTERP_FOUND}) OR (NOT ${PYTHONLIBS_FOUND})) MESSAGE(FATAL_ERROR "Unknown system !") ENDIF() -INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) - ENDIF() + +INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 4fdd47acdb..d59e5e9c3a 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -17,7 +17,7 @@ INCLUDE(ExternalProject) SET(WARPCTC_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/warpctc) SET(WARPCTC_INSTALL_DIR ${PROJECT_BINARY_DIR}/warpctc) -IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") +IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" ) SET(USE_OMP ON) ELSE() SET(USE_OMP OFF) @@ -28,8 +28,9 @@ ExternalProject_Add( GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" PREFIX ${WARPCTC_SOURCES_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} - CMAKE_ARGS -DWITH_GPU=${CUDA_FOUND} + CMAKE_ARGS -DWITH_GPU=${WITH_GPU} CMAKE_ARGS -DWITH_OMP=${USE_OMP} + UPDATE_COMMAND "" ) SET(WARPCTC_INCLUDE_DIR "${WARP_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) @@ -40,7 +41,7 @@ IF(WIN32) "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) ELSE(WIN32) SET(WARPCTC_LIBRARIES - "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.so" CACHE FILEPATH "Warp-ctc Library" FORCE) + "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.dylib" CACHE FILEPATH "Warp-ctc Library" FORCE) ENDIF(WIN32) LIST(APPEND external_project_dependencies warpctc) From de3c15277f2b08e88c3b7f84156064fef0831b90 Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 23:25:32 +0800 Subject: [PATCH 08/51] Fix glog check type unmatch --- paddle/function/cross_map_normal_op.cpp | 16 ++++++++-------- paddle/gserver/layers/ConvProjection.cpp | 3 ++- paddle/gserver/tests/LayerGradUtil.cpp | 2 +- paddle/gserver/tests/test_BatchNorm.cpp | 4 ++-- paddle/gserver/tests/test_PyDataProvider2.cpp | 2 +- paddle/math/Matrix.cpp | 2 +- 6 files changed, 15 insertions(+), 14 deletions(-) diff --git a/paddle/function/cross_map_normal_op.cpp b/paddle/function/cross_map_normal_op.cpp index a9c7693830..74094bc4fc 100644 --- a/paddle/function/cross_map_normal_op.cpp +++ b/paddle/function/cross_map_normal_op.cpp @@ -128,11 +128,11 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(1, inputs.size()); - CHECK_EQ(2, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(1, static_cast(inputs.size())); + CHECK_EQ(2, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); - CHECK_EQ(inputs[0].dims_.size(), 4); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 4); for (size_t i = 0; i < inputs[0].dims_.size(); i++) { CHECK_EQ(inputs[0].dims_[i], outputs[0].dims_[i]); CHECK_EQ(inputs[0].dims_[i], outputs[1].dims_[i]); @@ -180,11 +180,11 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(4, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(4, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); - CHECK_EQ(inputs[0].dims_.size(), 4); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 4); for (size_t i = 0; i < inputs[0].dims_.size(); i++) { CHECK_EQ(inputs[0].dims_[i], inputs[1].dims_[i]); CHECK_EQ(inputs[0].dims_[i], inputs[2].dims_[i]); diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/gserver/layers/ConvProjection.cpp index e1c4b91ace..0281170bc5 100644 --- a/paddle/gserver/layers/ConvProjection.cpp +++ b/paddle/gserver/layers/ConvProjection.cpp @@ -130,7 +130,8 @@ void ConvProjection::reshapeTensorDesc(int batchSize) { void ConvProjection::reshape(int batchSize) { size_t width = calOutputSize(); CHECK_EQ(width, out_->value->getWidth()); - CHECK_EQ(channels_ * imageH_ * imageW_, in_->value->getWidth()) + CHECK_EQ(static_cast(channels_ * imageH_ * imageW_), + in_->value->getWidth()) << "Wrong input size for convolution" << " channels=" << channels_ << " imageH=" << imageH_ << " imageW=" << imageW_ << " inputSize=" << in_->value->getWidth(); diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index 57c176810f..ae016e74ea 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -310,7 +310,7 @@ void initDataLayer(TestConfig testConf, testConf.inputDefs[i].labelSeqStartPositions; if (labelSeqStartPositions.size() != 0) { CHECK(!sequenceStartPositions); - CHECK_GE(labelSeqStartPositions.size(), 2); + CHECK_GE(static_cast(labelSeqStartPositions.size()), 2); sequenceStartPositions = ICpuGpuVector::create(labelSeqStartPositions.size(), useGpu); diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 7f5fcb670b..e000c69944 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -114,8 +114,8 @@ TEST(Layer, batchNorm) { bnLayer->forward(PASS_GC); convLayer->forward(PASS_GC); - CHECK_EQ(convLayer->getOutputValue()->getHeight(), 100); - CHECK_EQ(convLayer->getOutputValue()->getWidth(), 576); + CHECK_EQ(static_cast(convLayer->getOutputValue()->getHeight()), 100); + CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } int main(int argc, char** argv) { diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index 5f8bc5ecd0..7e193eb31a 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -293,7 +293,7 @@ TEST(PyDataProvider2, can_over_batch_size) { while (true) { int64_t realBatchSize = provider->getNextBatchInternal(batchSize, &batch); if (realBatchSize) { - CHECK_LE(realBatchSize, batchSize); + CHECK_LE(static_cast(realBatchSize), batchSize); } else { break; } diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 50d2e3eb67..b281d5eb02 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -2268,7 +2268,7 @@ void CpuMatrix::contextProjectionBackward(Matrix* inputGrad, int64_t inputDim = inputGrad ? inputGrad->getWidth() : weightGrad ? weightGrad->getWidth() : 0; - CHECK_EQ(getWidth(), inputDim * contextLength); + CHECK_EQ(getWidth(), static_cast(inputDim * contextLength)); const int* starts = sequence.getData(); size_t numSequences = sequence.getSize() - 1; From 0b956711d994d101f7e68aa59b586d2f21645195 Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 27 Dec 2016 23:25:50 +0800 Subject: [PATCH 09/51] Add external_project_dependencies for targets --- cmake/util.cmake | 1 + paddle/cuda/CMakeLists.txt | 2 ++ paddle/cuda/include/hl_warpctc_wrap.h | 2 +- paddle/function/CMakeLists.txt | 2 ++ proto/CMakeLists.txt | 8 ++++---- 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/cmake/util.cmake b/cmake/util.cmake index 1b1e630661..b8d20266f4 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -121,6 +121,7 @@ function(link_paddle_exe TARGET_NAME) target_link_libraries(${TARGET_NAME} rt) endif() endif() + add_dependencies(${TARGET_NAME} ${external_project_dependencies}) endfunction() # link_paddle_test diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt index aa1ff4a771..57fb89608f 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -88,6 +88,8 @@ else() ${CUDA_CXX_SOURCES}) endif() +add_dependencies(paddle_cuda ${external_project_dependencies}) + add_style_check_target(paddle_cuda ${CUDA_SOURCES} ${CUDA_HEADERS} diff --git a/paddle/cuda/include/hl_warpctc_wrap.h b/paddle/cuda/include/hl_warpctc_wrap.h index 79bf6c3db7..7885ae5701 100644 --- a/paddle/cuda/include/hl_warpctc_wrap.h +++ b/paddle/cuda/include/hl_warpctc_wrap.h @@ -15,8 +15,8 @@ limitations under the License. */ #ifndef HL_WARPCTC_WRAP_H_ #define HL_WARPCTC_WRAP_H_ +#include "ctc.h" #include "hl_base.h" -#include "warp-ctc/include/ctc.h" typedef ctcStatus_t hl_warpctc_status_t; typedef ctcOptions hl_warpctc_options_t; diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt index 0697842bbe..1de887b7dd 100644 --- a/paddle/function/CMakeLists.txt +++ b/paddle/function/CMakeLists.txt @@ -10,8 +10,10 @@ if(WITH_GPU) endif() add_library(paddle_function STATIC ${cpp_files} ${cu_objs}) +add_dependencies(paddle_function ${external_project_dependencies}) add_library(paddle_test_main STATIC TestMain.cpp) +add_dependencies(paddle_test_main ${external_project_dependencies}) if(WITH_GPU) # TODO: diff --git a/proto/CMakeLists.txt b/proto/CMakeLists.txt index 2c40070eca..c4e170b10f 100644 --- a/proto/CMakeLists.txt +++ b/proto/CMakeLists.txt @@ -20,8 +20,8 @@ foreach(filename ${proto_filenames}) add_custom_command(OUTPUT ${CUR_PROTO_GEN} COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${CMAKE_CURRENT_BINARY_DIR} - --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} - DEPENDS ${filename}) + --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} + DEPENDS ${filename} ${external_project_dependencies}) set(CUR_PROTO_GEN_PY ${PROJ_ROOT}/paddle/python/paddle/proto/${base_filename}_pb2.py) @@ -30,8 +30,8 @@ foreach(filename ${proto_filenames}) ${PROTO_GEN_PY}) add_custom_command(OUTPUT ${CUR_PROTO_GEN_PY} COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${PROJ_ROOT}/python/paddle/proto - --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} - DEPENDS ${filename}) + --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} + DEPENDS ${filename} ${external_project_dependencies}) endforeach() include_directories(${CMAKE_CURRENT_BINARY_DIR}/proto) From 338dc3ee923e0757420b6466f91dc60d19850b2a Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Dec 2016 00:20:15 +0800 Subject: [PATCH 10/51] Fix external denpendencies build order --- cmake/external/numpy.cmake | 2 ++ cmake/external/protobuf.cmake | 12 +++++++++--- cmake/external/python.cmake | 3 +++ cmake/external/swig.cmake | 7 ++++++- cmake/external/warpctc.cmake | 8 +++++++- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/cmake/external/numpy.cmake b/cmake/external/numpy.cmake index 9d686ecaac..d01cff9722 100644 --- a/cmake/external/numpy.cmake +++ b/cmake/external/numpy.cmake @@ -59,6 +59,8 @@ IF(NOT ${NUMPY_FOUND}) DEPENDS python setuptools cython ) + LIST(APPEND external_project_dependencies numpy) + # find numpy include directory FILE(WRITE ${PROJECT_BINARY_DIR}/FindNumpyPath.py "try: import numpy; print(numpy.get_include())\nexcept:pass\n") diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 0138f082d9..efd07eb807 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -42,10 +42,16 @@ IF(WIN32) "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protobuf libraries." FORCE) SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) ELSE(WIN32) + FIND_PATH(PROTOBUF_LIBS_DIR libprotoc.a + PATHS + ${PROTOBUF_INSTALL_DIR}/lib64 + ${PROTOBUF_INSTALL_DIR}/lib + NO_DEFAULT_PATH + ) SET(PROTOBUF_LIBRARIES - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.a" - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.a" - "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.a" CACHE FILEPATH "protobuf libraries." FORCE) + "${PROTOBUF_LIBS_DIR}/libprotobuf-lite.a" + "${PROTOBUF_LIBS_DIR}/libprotobuf.a" + "${PROTOBUF_LIBS_DIR}/libprotoc.a" CACHE FILEPATH "protobuf libraries." FORCE) SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) ENDIF(WIN32) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 7b66cb44e4..d6cdf535fe 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -87,6 +87,9 @@ IF((NOT ${PYTHONINTERP_FOUND}) OR (NOT ${PYTHONLIBS_FOUND})) MESSAGE(FATAL_ERROR "Unknown system !") ENDIF() +LIST(APPEND external_project_dependencies python) + ENDIF() INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) + diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake index 1ec61660bc..2da826d375 100644 --- a/cmake/external/swig.cmake +++ b/cmake/external/swig.cmake @@ -70,6 +70,9 @@ IF(NOT ${SWIG_FOUND}) set(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION} CACHE FILEPATH "SWIG Directory" FORCE) set(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig CACHE FILEPATH "SWIG Executable" FORCE) ENDIF(WIN32) + + LIST(APPEND external_project_dependencies swig) + ENDIF() FUNCTION(generate_python_api target_name) @@ -80,10 +83,12 @@ FUNCTION(generate_python_api target_name) && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig ${PROJ_ROOT}/paddle/api/PaddleAPI.h + ${external_project_dependencies} WORKING_DIRECTORY ${PROJ_ROOT}/paddle COMMENT "Generate Python API from swig") ADD_CUSTOM_TARGET(${target_name} ALL DEPENDS ${PROJ_ROOT}/paddle/Paddle_wrap.cxx ${PROJ_ROOT}/paddle/Paddle_wrap.h - ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py) + ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + ${external_project_dependencies}) ENDFUNCTION(generate_python_api) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index d59e5e9c3a..2e678aadcf 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -40,8 +40,14 @@ IF(WIN32) SET(WARPCTC_LIBRARIES "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) ELSE(WIN32) + IF(APPLE) + SET(_warpctc_SHARED_SUFFIX dylib) + ELSE(APPLE) + SET(_warpctc_SHARED_SUFFIX so) + ENDIF(APPLE) + SET(WARPCTC_LIBRARIES - "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.dylib" CACHE FILEPATH "Warp-ctc Library" FORCE) + "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.${_warpctc_SHARED_SUFFIX}" CACHE FILEPATH "Warp-ctc Library" FORCE) ENDIF(WIN32) LIST(APPEND external_project_dependencies warpctc) From 2ffb6dbb171b426ef85db88df4f5ac2493abb8d8 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Dec 2016 09:58:54 +0800 Subject: [PATCH 11/51] Fix warpctc header directory --- cmake/external/warpctc.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 2e678aadcf..b3dea19ceb 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -33,7 +33,7 @@ ExternalProject_Add( UPDATE_COMMAND "" ) -SET(WARPCTC_INCLUDE_DIR "${WARP_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) +SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) IF(WIN32) From aee0857838ee41f46237f0b6561242d523116a0c Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 28 Dec 2016 13:49:01 +0800 Subject: [PATCH 12/51] Clean Travis ci and fix bug --- .gitmodules | 3 --- .pre-commit-config.yaml | 4 ++-- .travis.yml | 6 ----- CMakeLists.txt | 1 - cmake/external/gflags.cmake | 1 + cmake/external/glog.cmake | 1 + cmake/external/gtest.cmake | 1 + cmake/external/protobuf.cmake | 23 +++++++++++-------- cmake/external/warpctc.cmake | 1 + paddle/api/paddle_api_config.py.in | 10 ++++---- paddle/api/paddle_ld_flags.py | 6 ++--- paddle/cuda/CMakeLists.txt | 2 +- paddle/gserver/tests/CMakeLists.txt | 2 +- paddle/scripts/travis/before_install.linux.sh | 18 --------------- paddle/scripts/travis/before_install.osx.sh | 8 +------ paddle/scripts/travis/build_submodules.sh | 20 ---------------- 16 files changed, 30 insertions(+), 77 deletions(-) delete mode 100644 .gitmodules delete mode 100755 paddle/scripts/travis/before_install.linux.sh delete mode 100755 paddle/scripts/travis/build_submodules.sh diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index f635e65784..0000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "warp-ctc"] - path = warp-ctc - url = https://github.com/baidu-research/warp-ctc.git diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b9902a863d..a6e45028eb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ sha: c25201a00e6b0514370501050cf2a8538ac12270 hooks: - id: remove-crlf - files: (?!.*warp-ctc)^.*$ + files: (?!.*third_party)^.*$ - repo: https://github.com/reyoung/mirrors-yapf.git sha: v0.13.2 hooks: @@ -15,7 +15,7 @@ - id: check-merge-conflict - id: check-symlinks - id: detect-private-key - files: (?!.*warp-ctc)^.*$ + files: (?!.*third_party)^.*$ - id: end-of-file-fixer - repo: https://github.com/PaddlePaddle/clang-format-pre-commit-hook.git sha: 28c0ea8a67a3e2dbbf4822ef44e85b63a0080a29 diff --git a/.travis.yml b/.travis.yml index 047ca6ffe7..b49d4638d7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,16 +24,11 @@ addons: - wget - git - build-essential - - libatlas-base-dev - python - python-pip - python2.7-dev - - m4 - python-numpy - python-wheel - - libgoogle-glog-dev - - libgflags-dev - - libgtest-dev - curl - lcov - graphviz @@ -53,7 +48,6 @@ before_install: fi fi fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme pre-commit requests==2.9.2 LinkChecker diff --git a/CMakeLists.txt b/CMakeLists.txt index 7db4c9f1d1..784876f089 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,7 +65,6 @@ set(EXTERNAL_LIBS ${GLOG_LIBRARIES} ${CBLAS_LIBRARIES} ${PROTOBUF_LIBRARIES} - ${WARPCTC_LIBRARIES} ${ZLIB_LIBRARIES} ) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 128d50cec8..55f9a4c3e6 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -22,6 +22,7 @@ ExternalProject_Add( GIT_REPOSITORY "https://github.com/gflags/gflags.git" PREFIX ${GFLAGS_SOURCES_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DBUILD_TESTING=OFF LOG_DOWNLOAD =ON UPDATE_COMMAND "" diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 8a4b9d5996..473071a72a 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -22,6 +22,7 @@ ExternalProject_Add( GIT_REPOSITORY "https://github.com/google/glog.git" PREFIX ${GLOG_SOURCES_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DWITH_GFLAGS=OFF CMAKE_ARGS -DBUILD_TESTING=OFF LOG_DOWNLOAD =ON diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index 533104422a..a6ed9e9b9f 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -23,6 +23,7 @@ ExternalProject_Add( GIT_TAG "release-1.8.0" PREFIX ${GTEST_SOURCES_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DBUILD_GMOCK=ON CMAKE_ARGS -Dgtest_disable_pthreads=ON CMAKE_ARGS -Dgtest_force_shared_crt=ON diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index efd07eb807..f42e42ef68 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -36,22 +36,25 @@ SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) IF(WIN32) - SET(PROTOBUF_LIBRARIES - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" - "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" - "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protobuf libraries." FORCE) + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf-lite.lib" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotobuf.lib" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protoc library." FORCE) SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) ELSE(WIN32) FIND_PATH(PROTOBUF_LIBS_DIR libprotoc.a - PATHS - ${PROTOBUF_INSTALL_DIR}/lib64 ${PROTOBUF_INSTALL_DIR}/lib + ${PROTOBUF_INSTALL_DIR}/lib64 NO_DEFAULT_PATH ) - SET(PROTOBUF_LIBRARIES - "${PROTOBUF_LIBS_DIR}/libprotobuf-lite.a" - "${PROTOBUF_LIBS_DIR}/libprotobuf.a" - "${PROTOBUF_LIBS_DIR}/libprotoc.a" CACHE FILEPATH "protobuf libraries." FORCE) + SET(PROTOBUF_LITE_LIBRARY + "${PROTOBUF_LIBS_DIR}/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) + SET(PROTOBUF_LIBRARY + "${PROTOBUF_LIBS_DIR}/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) + SET(PROTOBUF_PROTOC_LIBRARY + "${PROTOBUF_LIBS_DIR}/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) ENDIF(WIN32) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index b3dea19ceb..6a88c87df6 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -36,6 +36,7 @@ ExternalProject_Add( SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) +SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib" CACHE PATH "Warp-ctc Library Directory" FORCE) IF(WIN32) SET(WARPCTC_LIBRARIES "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) diff --git a/paddle/api/paddle_api_config.py.in b/paddle/api/paddle_api_config.py.in index 23542b952b..e11ee92036 100644 --- a/paddle/api/paddle_api_config.py.in +++ b/paddle/api/paddle_api_config.py.in @@ -1,17 +1,17 @@ PADDLE_BUILD_DIR="@CMAKE_CURRENT_BINARY_DIR@/../" WITH_GPU="@WITH_GPU@" -PROTOBUF_LIB="@PROTOBUF_LIBRARY@" -ZLIB_LIB="@ZLIB_LIBRARIES@" +PROTOBUF_LIBRARY="@PROTOBUF_LIBRARY@" +ZLIB_LIBRARIES="@ZLIB_LIBRARIES@" CMAKE_THREAD_LIB="@CMAKE_THREAD_LIBS_INIT@" CMAKE_DL_LIBS="@CMAKE_DL_LIBS@" WITH_PYTHON="@WITH_PYTHON@" PYTHON_LIBRARIES="@PYTHON_LIBRARIES@" -LIBGLOG_LIBRARY="@LIBGLOG_LIBRARY@" +GLOG_LIBRARIES="@GLOG_LIBRARIES@" GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@" GFLAGS_LOCATION="@GFLAGS_LOCATION@" -CBLAS_LIBRARIES="@CBLAS_LIBS@" +CBLAS_LIBRARIES="@CBLAS_LIBRARIES@" -CUDA_LIBRARIES="@CUDA_LIBRARIES@" +CUDA_LIBRARIES="@CUDA_cudart_shared_LIBRARY@" WITH_COVERALLS="@ON_COVERALLS@" diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index b4d27b1cc7..ad5dce209b 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -40,14 +40,14 @@ try: self.paddle_build_dir = PADDLE_BUILD_DIR self.paddle_build_dir = os.path.abspath(self.paddle_build_dir) self.with_gpu = PaddleLDFlag.cmake_bool(WITH_GPU) - self.protolib = PROTOBUF_LIB - self.zlib = ZLIB_LIB + self.protolib = PROTOBUF_LIBRARY + self.zlib = ZLIB_LIBRARIES self.thread = CMAKE_THREAD_LIB self.dl_libs = CMAKE_DL_LIBS self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON) self.python_libs = PYTHON_LIBRARIES - self.glog_libs = LIBGLOG_LIBRARY + self.glog_libs = GLOG_LIBRARIES self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) self.gflags_libs = GFLAGS_LIBRARIES diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt index 57fb89608f..0a05897854 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -88,7 +88,7 @@ else() ${CUDA_CXX_SOURCES}) endif() -add_dependencies(paddle_cuda ${external_project_dependencies}) +add_dependencies(paddle_cuda warpctc) add_style_check_target(paddle_cuda ${CUDA_SOURCES} diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index c26a2a7f06..4190892db1 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -92,7 +92,7 @@ if(NOT WITH_DOUBLE) TestUtil.cpp) add_test(NAME test_WarpCTCLayer - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${PROJ_ROOT}/warp-ctc/build + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${WARPCTC_LIB_DIR} WORKING_DIRECTORY ${PROJ_ROOT}/paddle) endif() diff --git a/paddle/scripts/travis/before_install.linux.sh b/paddle/scripts/travis/before_install.linux.sh deleted file mode 100755 index 9620bff6bc..0000000000 --- a/paddle/scripts/travis/before_install.linux.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e -pip install protobuf -cd /tmp -wget https://github.com/google/protobuf/archive/v3.0.2.tar.gz -O protobuf.tar.gz -tar xf protobuf.tar.gz -cd protobuf* -./autogen.sh -./configure --prefix=/usr/ -make -j 2 install -cd .. -rm -rf protobuf* - -pushd /usr/src/gtest -cmake . -make -sudo cp *.a /usr/lib -popd diff --git a/paddle/scripts/travis/before_install.osx.sh b/paddle/scripts/travis/before_install.osx.sh index bd88ed3913..89742d67f5 100755 --- a/paddle/scripts/travis/before_install.osx.sh +++ b/paddle/scripts/travis/before_install.osx.sh @@ -3,10 +3,4 @@ brew update brew tap homebrew/science brew install python sudo pip install --upgrade protobuf -brew install cmake python glog gflags openblas wget md5sha1sum protobuf - -wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz -O gtest.tar.gz -tar xf gtest.tar.gz -cd googletest-release-1.8.0/ -cmake . -make install +brew install cmake python wget md5sha1sum diff --git a/paddle/scripts/travis/build_submodules.sh b/paddle/scripts/travis/build_submodules.sh deleted file mode 100755 index d458bf92bf..0000000000 --- a/paddle/scripts/travis/build_submodules.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e -WORK_DIR=$PWD -PROJ_ROOT=$(git rev-parse --show-cdup) -SUBMODULES=$(grep path ${PROJ_ROOT}.gitmodules | sed 's/^.*path = //') - -for module in $SUBMODULES -do - case $module in - "warp-ctc") - if [ -d ${PROJ_ROOT}warp-ctc/build ]; then - rm -rf ${PROJ_ROOT}warp-ctc/build - fi - mkdir ${PROJ_ROOT}warp-ctc/build - cd ${PROJ_ROOT}warp-ctc/build - cmake ..; make - ;; - esac -done -cd $WORK_DIR From 2b174a04bf0cb266fd455842a79e25de67818672 Mon Sep 17 00:00:00 2001 From: Zhizhong Su Date: Wed, 28 Dec 2016 08:34:35 +0000 Subject: [PATCH 13/51] add new_layer_cn doc --- doc/howto/dev/new_layer_cn.rst | 390 +++++++++++++++++++++++++++++++++ 1 file changed, 390 insertions(+) create mode 100644 doc/howto/dev/new_layer_cn.rst diff --git a/doc/howto/dev/new_layer_cn.rst b/doc/howto/dev/new_layer_cn.rst new file mode 100644 index 0000000000..ebec5fa206 --- /dev/null +++ b/doc/howto/dev/new_layer_cn.rst @@ -0,0 +1,390 @@ +================ +实现新的网络层 +================ + +这份教程指导你在PaddlePaddle中实现一个自定义的网络层。在这里我们使用全连接层作为例子来指导你完成实现新网络层需要的几个步骤。 + +- 推导该层前向和后向传递的方程。 +- 实现该层的C++类。 +- 写梯度检测的测试单元,以保证梯度的正确计算。 +- 实现该层的python封装。 + +推导方程 +================ + +首先我们需要推导该网络层的*前向传播*和*后向传播*的方程。前向传播给定输入,计算输出。后向传播给定输出的梯度,计算输入和参数的梯度。 + +下图是一个全链接层的示意图。在全连接层中,每个输出节点都连接到所有的输入节点上。 + +.. image:: FullyConnected.jpg + :align: center + :scale: 60 % + +一个网络层的前向传播部分把输入转化为相应的输出。 +全连接层以一个维度为:math:`D_i`稠密的向量作为输入。其用一个尺度为:math:`D_i \times D_o`的变换矩阵:math:`W`把:math:`x`映射到一个维度为:math:`D_o`的向量,并在其上再加上维度为:math:`D_o`的偏置向量:math:`b`。 + +.. math:: + + y = f(W^T x + b) + +其中:math:`f(.)`是一个非线性的*激活方程*,例如sigmoid, tanh,以及Relu。 + +变换矩阵:math:`W`和偏置向量:math:`b`是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传播对所有的参数和输入都计算输出函数的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 + +假设我们的损失函数是:math:`c(y)`,那么 + +.. math:: + + \frac{\partial c(y)}{\partial x} = \frac{\partial c(y)}{\partial y} \frac{\partial y}{\partial x} + +假设:math:`z = f(W^T x + b)`,那么 + +.. math:: + + \frac{\partial y}{\partial z} = \frac{\partial f(z)}{\partial z} + +我们的base layer类可以自动计算上面的导数。 + +因而,对全连接层来说,我们需要计算: + +.. math:: + + \frac{\partial z}{\partial x} = W, \frac{\partial z_j}{\partial W_{ij}} = x_i, \frac{\partial z}{\partial b} = \mathbf 1 + +其中:math:`\mathbf 1`是一个全1的向量,:math:`W_{ij}`是矩阵:math:`W`第i行第j列的数值,:math:`z_j`是向量math:`z`的第j个值,:math:`x_i`是向量:math:`x`的第i个值。 + +最后我们使用链式法则计算:math:`\frac{\partial z}{\partial x}`以及:math:`\frac{\partial z}{\partial W}`。计算的细节将在下面的小节给出。 + +实现C++类 +=================== + +一个网络层的C++类需要实现初始化,前向和后向。全连接层的实现位于:code:`paddle/gserver/layers/FullyConnectedLayer.h`及:code:`paddle/gserver/layers/FullyConnectedLayer.cpp`。这里我们展示一份简化过的代码。 + +这个类需要继承:code:`paddle::Layer`这个基类,并且需要重写以下基类中的虚函数: + +- 类的构造函数和析构析构函数。 +- :code:`init`函数。用于初始化参数和设置。 +- :code:`forward`。实现网络层的前向传播。 +- :code:`backward`。实现网络层的后向传播。 +- :code:`prefetch`。用于确定由参数服务器预取的行相关的参数矩阵。如果该网络层不需要远程稀疏更新的话,你不需要重写该函数。(大多数网络层不需要支持远程稀疏更新) + + +头文件在下面列出: + +.. code-block:: c++ + + namespace paddle { + /** + * 全连接层的每个输出都连接到上一层的所有的神经元上。 + * 其用一些学习过的参数做内积并加上偏置(可选)。 + * + * 配置文件接口是fc_layer。 + */ + + class FullyConnectedLayer : public Layer { + protected: + WeightList weights_; + std::unique_ptr biases_; + + public: + explicit FullyConnectedLayer(const LayerConfig& config) + : Layer(config) {} + ~FullyConnectedLayer() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + Weight& getWeight(int idx) { return *weights_[idx]; } + + void prefetch(); + void forward(PassType passType); + void backward(const UpdateCallback& callback = nullptr); + }; + } // namespace paddle + +头文件中把参数定位为类的成员变量。我们使用:code:`Weight`类作为参数的抽象,它支持多线程更新。该类的实现细节在“实现细节”中由详细介绍。 + +- :code:`weights_`是存有变换矩阵的一系列权重。在当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。每个权重对应一个输入。 +- :code:`biases_`是存有偏置向量的权重。 + +全连接层没有网络层配置的超参数。如果一个网络层需要配置的话,通常的做法是将配置存于:code:`LayerConfig& config`中,并在类构建函数中把它放入一个类成员变量里。 + +下面的代码片段实现了:code:`init`函数。 + +- 首先,所有的:code:`init`函数必须先调用基类中的函数:code:`Layer::init(layerMap, parameterMap);`。该语句会为每个层初始化其所需要的变量和连接。 +- 之后初始化所有的权重矩阵:math:`W`。当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。 +- 最后,初始化偏置向量。 + + +.. code-block:: c++ + + bool FullyConnectedLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* 初始化父类 */ + Layer::init(layerMap, parameterMap); + + /* 初始化权重表 */ + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + // 获得参数尺寸 + size_t height = inputLayers_[i]->getSize(); + size_t width = getSize(); + + // 新建一个权重 + if (parameters_[i]->isSparse()) { + CHECK_LE(parameters_[i]->getSize(), width * height); + } else { + CHECK_EQ(parameters_[i]->getSize(), width * height); + } + Weight* w = new Weight(height, width, parameters_[i]); + + // 将新建的权重加入权重表 + weights_.emplace_back(w); + } + + /* 初始化biases_ */ + if (biasParameter_.get() != NULL) { + biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + } + + return true; + } + +实现前向传播的部分有下面几个步骤。 + +- 每个层在其:code:`forward`函数的开头必须调用:code:`Layer::forward(passType);`。 +- 之后使用:code:`reserveOutput(batchSize, size);`为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。:code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。 +- 之后使用矩阵运算函数来计算:math:`\sum_i W_i x + b`。:code:`getInput(i).value`返回第i个输入矩阵。每个输入都是一个:math:`batchSize \times dim`的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考:code:`paddle/math/Matrix.h`和:code:`paddle/math/BaseMatrix.h`。 +- 最终,使用:code:`forwardActivation();`进行激活操作。这会自动进行网络配置中声明的激活操作。 + + +.. code-block:: c++ + + void FullyConnectedLayer::forward(PassType passType) { + Layer::forward(passType); + + /* 若有必要,为output_申请内存 */ + int batchSize = getInput(0).getBatchSize(); + int size = getSize(); + + { + // 设置输出的尺寸 + reserveOutput(batchSize, size); + } + + MatrixPtr outV = getOutputValue(); + + // 对每个输入乘上转化矩阵 + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto input = getInput(i); + CHECK(input.value) << "The input of 'fc' layer must be matrix"; + i == 0 ? outV->mul(input.value, weights_[i]->getW(), 1, 0) + : outV->mul(input.value, weights_[i]->getW(), 1, 1); + } + + /* 加上偏置向量 */ + if (biases_.get() != NULL) { + outV->addBias(*(biases_->getW()), 1); + } + + /* 激活 */ { + forwardActivation(); + } + } + +实现后向传播的部分有下面几个步骤。 + +- :code:`backwardActivation()`计算激活函数的梯度。梯度会就地(不使用额外空间)乘上输出的梯度,并可以通过:code:`getOutputGrad()`来获得。 +- 计算偏置的梯度。注意,我们使用:code:`biases_->getWGrad()`来得到某个特定参数的梯度矩阵。在一个参数的梯度被更新后,**必须**要调用:code:`getParameterPtr()->incUpdate(callback);`。这是用来在多线程和多机上更新参数的。 +- 之后,计算转换矩阵和输入的梯度,并对相应的参数调用:code:`incUpdate`。这给了框架一个机会去了解自己是否已经把所有的梯度收集到一个参数中,使得框架可以进行有时间重叠的工作。(例如,网络通信) + + +.. code-block:: c++ + + void FullyConnectedLayer::backward(const UpdateCallback& callback) { + /* 对激活求导 */ { + backwardActivation(); + } + + if (biases_ && biases_->getWGrad()) { + biases_->getWGrad()->collectBias(*getOutputGrad(), 1); + + /* 加上偏置的梯度 */ + biases_->getParameterPtr()->incUpdate(callback); + } + + bool syncFlag = hl_get_sync_flag(); + + for (size_t i = 0; i != inputLayers_.size(); ++i) { + /* 计算当前层权重的梯度 */ + if (weights_[i]->getWGrad()) { + MatrixPtr input_T = getInputValue(i)->getTranspose(); + MatrixPtr oGrad = getOutputGrad(); + { + weights_[i]->getWGrad()->mul(input_T, oGrad, 1, 1); + } + } + + + /* 计算输入层的偏差 */ + MatrixPtr preGrad = getInputGrad(i); + if (NULL != preGrad) { + MatrixPtr weights_T = weights_[i]->getW()->getTranspose(); + preGrad->mul(getOutputGrad(), weights_T, 1, 1); + } + + { + weights_[i]->getParameterPtr()->incUpdate(callback); + } + } + } + +:code:`prefetch`函数指出了在训练时需要从参数服务器取出的行。仅在远程稀疏训练时有效。在远程稀疏训练时,完整的参数矩阵被分布式的保存在参数服务器上。当网络层用一个批次做训练时,该批次中,输入仅有一个子集是非零的。因此,该层仅需要这些非零样本位置所对应的转换矩阵的那些行。:code:`prefetch`表明了这些行的标号。 + +大多数层不需要远程稀疏训练函数。这种情况下不需要重写该函数。 + +.. code-block:: c++ + + void FullyConnectedLayer::prefetch() { + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto* sparseParam = + dynamic_cast(weights_[i]->getW().get()); + if (sparseParam) { + MatrixPtr input = getInputValue(i); + sparseParam->addRows(input); + } + } + } + +最后,使用:code:`REGISTER_LAYER(fc, FullyConnectedLayer);`来注册该层。:code:`fc`是该层的标识符,:code:`FullyConnectedLayer`是该层的类名。 + +.. code-block:: c++ + + namespace paddle { + REGISTER_LAYER(fc, FullyConnectedLayer); + } + +若:code:`cpp`被放在:code:`paddle/gserver/layers`目录下,其会自动被加入编译列表。 + + +写梯度检查单元测试 +=============================== + +写梯度检查单元测试是一个验证新实现的层是否正确的相对简单的办法。梯度检查单元测试通过有限差分法来验证一个层的梯度。首先对输入做一个小的扰动:math:`\Delta x`,然后观察到输出的变化为:math:`\Delta y`,那么,梯度就可以通过这个方程计算得到:math:`\frac{\Delta y}{\Delta x }`。之后,再用这个梯度去和:code:`backward`函数得到的梯度去对比,以保证梯度计算的正确性。需要注意的是梯度检查仅仅验证了梯度的计算,并不保证:code:`forward`和:code:`backward`函数的实现是正确的。你需要一些更复杂的单元测试来保证你实现的网络层是正确的。 + +所有的梯度检测单侧都位于:code:`paddle/gserver/tests/test_LayerGrad.cpp`。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: + ++ 生成网络层配置。网络层配置包含以下几项: + - 偏置参数的大小。(例子中是4096) + - 层的类型。(例子中是fc) + - 层的大小。(例子中是4096) + - 激活的类型。(例子中是softmax) + - dropout的比例。(例子中是0.1) ++ 配置网络层的输入。在这个例子里,我们仅有一个输入。 + - 输入的类型(:code:`INPUT_DATA`),可以是以下几种: + - :code:`INPUT_DATA`:稠密向量。 + - :code:`INPUT_LABEL`:整数。 + - :code:`INPUT_DATA_TARGET`:稠密向量,但不用于计算梯度。 + - :code:`INPUT_SEQUENCE_DATA`:含有序列信息的稠密向量。 + - :code:`INPUT_HASSUB_SEQUENCE_DATA`:含有序列信息和子序列信息的稠密向量。 + - :code:`INPUT_SEQUENCE_LABEL`:含有序列信息的整数。 + - :code:`INPUT_SPARSE_NON_VALUE_DATA`:0-1稀疏数据。 + - :code:`INPUT_SPARSE_FLOAT_VALUE_DATA`:浮点稀疏数据。 + - 输入的名字。(例子中是:code:`layer_0`) + - 输入的大小。(例子中是8192) + - 非零数字的个数,仅对稀疏数据有效。 + - 稀疏数据的格式,仅对稀疏数据有效。 ++ 对每个输入,都需要调用一次:code:`config.layerConfig.add_inputs();`。 ++ 调用:code:`testLayerGrad`来做梯度检查。它包含下面的 It has the following arguments. + - 层和输入的配置。(例子中是:code:`config`) + - 输入的类型。(例子中是:code:`fc`) + - 梯度检查的批次大小。(例子中是100) + - 输入是否是转置的。大多数层需要设置为:code:`false`。(例子中是:code:`false`) + - 是否使用权重。有些层或者激活需要做归一化以保证它们的输出的和是一个常数。例如,softm激活的输出的和总是1。在这种情况下,我们不能通过常规的梯度检查的方式来计算梯度。因此我们采用输出的加权和(非常数)来计算梯度。(例子中是:code:`true`,因为全连接层的激活可以是softmax) + +.. code-block:: c++ + + void testFcLayer(string format, size_t nnz) { + // Create layer configuration. + TestConfig config; + config.biasSize = 4096; + config.layerConfig.set_type("fc"); + config.layerConfig.set_size(4096); + config.layerConfig.set_active_type("sigmoid"); + config.layerConfig.set_drop_rate(0.1); + // Setup inputs. + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)}); + config.layerConfig.add_inputs(); + LOG(INFO) << config.inputDefs[0].sparse.sparse << " " + << config.inputDefs[0].sparse.format; + for (auto useGpu : {false, true}) { + testLayerGrad(config, "fc", 100, /* trans */ false, useGpu, + /* weight */ true); + } + } + +如果你要为了测试而增加新的文件,例如:code:`paddle/gserver/tests/testFCGrad.cpp`,你需要把该文件加入:code:`paddle/gserver/tests/CMakeLists.txt`中。下面给出了一个例子。当你执行命令:code:`make tests`时,所有的单侧都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单侧正确执行。你需要在配置cmake时将:code:`WITH_DOUBLE`设置为`ON`。 + +.. code-block:: bash + + add_unittest_without_exec(test_FCGrad + test_FCGrad.cpp + LayerGradUtil.cpp + TestUtil.cpp) + + add_test(NAME test_FCGrad + COMMAND test_FCGrad) + + +实现python封装 +======================== + +python封装的实现使得我们可以在配置文件中使用新实现的网络层。所有的python封装都在:code:`python/paddle/trainer/config_parser.py`中。全连接层python封装的例子中包含下面几步: + +- 所有的Python封装都使用:code:`@config_layer('fc')`这样的装饰器。网络层的标识符为:code:`fc`。 +- 实现构造函数:code:`__init__`。 + - 它首先调用基构造函数:code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)`。:code:`FCLayer`是Python封装的类名。:code:`fc`是网络层的标识符。为了封装能够正确工作,这些名字必须要写对。 + - 之后,计算转换矩阵的大小和格式(是否稀疏)。 + +.. code-block:: python + + @config_layer('fc') + class FCLayer(LayerBase): + def __init__( + self, + name, + size, + inputs, + bias=True, + **xargs): + super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs) + for input_index in xrange(len(self.inputs)): + input_layer = self.get_input_layer(input_index) + psize = self.config.size * input_layer.size + dims = [input_layer.size, self.config.size] + format = self.inputs[input_index].format + sparse = format == "csr" or format == "csc" + if sparse: + psize = self.inputs[input_index].nnz + self.create_input_parameter(input_index, psize, dims, sparse, format) + self.create_bias_parameter(bias, self.config.size) + +在网络配置中,网络层的细节可以通过下面这些代码片段来指定。这个类的参数包括: + +- :code:`name`是网络层实例的名字标识符。 +- :code:`type`是网络层的类型,通过网络层的标识符来指定。 +- :code:`size`是网络层输出的大小。 +- :code:`bias`表明这个层的一个实例是否需要偏置。 +- :code:`inputs`说明这个层的输入,输入是由一个list中的网络层实例的名字组成的。 + +.. code-block:: python + + Layer( + name = "fc1", + type = "fc", + size = 64, + bias = True, + inputs = [Input("pool3")] + ) + +我们建议你为你的Python封装实现一个“助手”,使得搭模型时更方便。具体可以参考:code:`python/paddle/trainer_config_helpers/layers.py`。 From 9284a6c1dc7234a6d4e18a4ebcecfaeee754800e Mon Sep 17 00:00:00 2001 From: Zhizhong Su Date: Wed, 28 Dec 2016 09:05:10 +0000 Subject: [PATCH 14/51] fix format problem in new_layer_cn.rst --- doc/howto/dev/new_layer_cn.rst | 116 ++++++++++++++++----------------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/doc/howto/dev/new_layer_cn.rst b/doc/howto/dev/new_layer_cn.rst index ebec5fa206..8f5df0b36a 100644 --- a/doc/howto/dev/new_layer_cn.rst +++ b/doc/howto/dev/new_layer_cn.rst @@ -21,23 +21,23 @@ :scale: 60 % 一个网络层的前向传播部分把输入转化为相应的输出。 -全连接层以一个维度为:math:`D_i`稠密的向量作为输入。其用一个尺度为:math:`D_i \times D_o`的变换矩阵:math:`W`把:math:`x`映射到一个维度为:math:`D_o`的向量,并在其上再加上维度为:math:`D_o`的偏置向量:math:`b`。 +全连接层以一个维度为 :math:`D_i` 稠密的向量作为输入。其用一个尺度为 :math:`D_i \times D_o` 的变换矩阵 :math:`W` 把 :math:`x` 映射到一个维度为 :math:`D_o` 的向量,并在其上再加上维度为 :math:`D_o` 的偏置向量 :math:`b` 。 .. math:: y = f(W^T x + b) -其中:math:`f(.)`是一个非线性的*激活方程*,例如sigmoid, tanh,以及Relu。 +其中 :math:`f(.)` 是一个非线性的*激活方程*,例如sigmoid, tanh,以及Relu。 -变换矩阵:math:`W`和偏置向量:math:`b`是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传播对所有的参数和输入都计算输出函数的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 +变换矩阵 :math:`W` 和偏置向量 :math:`b` 是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传播对所有的参数和输入都计算输出函数的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 -假设我们的损失函数是:math:`c(y)`,那么 +假设我们的损失函数是 :math:`c(y)` ,那么 .. math:: \frac{\partial c(y)}{\partial x} = \frac{\partial c(y)}{\partial y} \frac{\partial y}{\partial x} -假设:math:`z = f(W^T x + b)`,那么 +假设 :math:`z = f(W^T x + b)` ,那么 .. math:: @@ -51,22 +51,22 @@ \frac{\partial z}{\partial x} = W, \frac{\partial z_j}{\partial W_{ij}} = x_i, \frac{\partial z}{\partial b} = \mathbf 1 -其中:math:`\mathbf 1`是一个全1的向量,:math:`W_{ij}`是矩阵:math:`W`第i行第j列的数值,:math:`z_j`是向量math:`z`的第j个值,:math:`x_i`是向量:math:`x`的第i个值。 +其中 :math:`\mathbf 1` 是一个全1的向量, :math:`W_{ij}` 是矩阵 :math:`W` 第i行第j列的数值, :math:`z_j` 是向量 :math:`z` 的第j个值, :math:`x_i` 是向量 :math:`x` 的第i个值。 -最后我们使用链式法则计算:math:`\frac{\partial z}{\partial x}`以及:math:`\frac{\partial z}{\partial W}`。计算的细节将在下面的小节给出。 +最后我们使用链式法则计算 :math:`\frac{\partial z}{\partial x}` 以及 :math:`\frac{\partial z}{\partial W}` 。计算的细节将在下面的小节给出。 实现C++类 =================== 一个网络层的C++类需要实现初始化,前向和后向。全连接层的实现位于:code:`paddle/gserver/layers/FullyConnectedLayer.h`及:code:`paddle/gserver/layers/FullyConnectedLayer.cpp`。这里我们展示一份简化过的代码。 -这个类需要继承:code:`paddle::Layer`这个基类,并且需要重写以下基类中的虚函数: +这个类需要继承 :code:`paddle::Layer` 这个基类,并且需要重写以下基类中的虚函数: - 类的构造函数和析构析构函数。 -- :code:`init`函数。用于初始化参数和设置。 -- :code:`forward`。实现网络层的前向传播。 -- :code:`backward`。实现网络层的后向传播。 -- :code:`prefetch`。用于确定由参数服务器预取的行相关的参数矩阵。如果该网络层不需要远程稀疏更新的话,你不需要重写该函数。(大多数网络层不需要支持远程稀疏更新) +- :code:`init` 函数。用于初始化参数和设置。 +- :code:`forward` 。实现网络层的前向传播。 +- :code:`backward` 。实现网络层的后向传播。 +- :code:`prefetch` 。用于确定由参数服务器预取的行相关的参数矩阵。如果该网络层不需要远程稀疏更新的话,你不需要重写该函数。(大多数网络层不需要支持远程稀疏更新) 头文件在下面列出: @@ -101,17 +101,17 @@ }; } // namespace paddle -头文件中把参数定位为类的成员变量。我们使用:code:`Weight`类作为参数的抽象,它支持多线程更新。该类的实现细节在“实现细节”中由详细介绍。 +头文件中把参数定位为类的成员变量。我们使用 :code:`Weight` 类作为参数的抽象,它支持多线程更新。该类的实现细节在“实现细节”中由详细介绍。 -- :code:`weights_`是存有变换矩阵的一系列权重。在当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。每个权重对应一个输入。 -- :code:`biases_`是存有偏置向量的权重。 +- :code:`weights_` 是存有变换矩阵的一系列权重。在当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。每个权重对应一个输入。 +- :code:`biases_` 是存有偏置向量的权重。 -全连接层没有网络层配置的超参数。如果一个网络层需要配置的话,通常的做法是将配置存于:code:`LayerConfig& config`中,并在类构建函数中把它放入一个类成员变量里。 +全连接层没有网络层配置的超参数。如果一个网络层需要配置的话,通常的做法是将配置存于 :code:`LayerConfig& config` 中,并在类构建函数中把它放入一个类成员变量里。 -下面的代码片段实现了:code:`init`函数。 +下面的代码片段实现了 :code:`init` 函数。 -- 首先,所有的:code:`init`函数必须先调用基类中的函数:code:`Layer::init(layerMap, parameterMap);`。该语句会为每个层初始化其所需要的变量和连接。 -- 之后初始化所有的权重矩阵:math:`W`。当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。 +- 首先,所有的 :code:`init` 函数必须先调用基类中的函数 :code:`Layer::init(layerMap, parameterMap);` 。该语句会为每个层初始化其所需要的变量和连接。 +- 之后初始化所有的权重矩阵 :math:`W` 。当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。 - 最后,初始化偏置向量。 @@ -151,10 +151,10 @@ 实现前向传播的部分有下面几个步骤。 -- 每个层在其:code:`forward`函数的开头必须调用:code:`Layer::forward(passType);`。 -- 之后使用:code:`reserveOutput(batchSize, size);`为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。:code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。 -- 之后使用矩阵运算函数来计算:math:`\sum_i W_i x + b`。:code:`getInput(i).value`返回第i个输入矩阵。每个输入都是一个:math:`batchSize \times dim`的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考:code:`paddle/math/Matrix.h`和:code:`paddle/math/BaseMatrix.h`。 -- 最终,使用:code:`forwardActivation();`进行激活操作。这会自动进行网络配置中声明的激活操作。 +- 每个层在其 :code:`forward` 函数的开头必须调用 :code:`Layer::forward(passType);` 。 +- 之后使用 :code:`reserveOutput(batchSize, size);` 为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。 :code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。 +- 之后使用矩阵运算函数来计算 :math:`\sum_i W_i x + b`。:code:`getInput(i).value` 返回第i个输入矩阵。每个输入都是一个 :math:`batchSize \times dim` 的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考 :code:`paddle/math/Matrix.h`和:code:`paddle/math/BaseMatrix.h` 。 +- 最终,使用 :code:`forwardActivation();` 进行激活操作。这会自动进行网络配置中声明的激活操作。 .. code-block:: c++ @@ -193,9 +193,9 @@ 实现后向传播的部分有下面几个步骤。 -- :code:`backwardActivation()`计算激活函数的梯度。梯度会就地(不使用额外空间)乘上输出的梯度,并可以通过:code:`getOutputGrad()`来获得。 -- 计算偏置的梯度。注意,我们使用:code:`biases_->getWGrad()`来得到某个特定参数的梯度矩阵。在一个参数的梯度被更新后,**必须**要调用:code:`getParameterPtr()->incUpdate(callback);`。这是用来在多线程和多机上更新参数的。 -- 之后,计算转换矩阵和输入的梯度,并对相应的参数调用:code:`incUpdate`。这给了框架一个机会去了解自己是否已经把所有的梯度收集到一个参数中,使得框架可以进行有时间重叠的工作。(例如,网络通信) +- :code:`backwardActivation()` 计算激活函数的梯度。梯度会就地(不使用额外空间)乘上输出的梯度,并可以通过 :code:`getOutputGrad()` 来获得。 +- 计算偏置的梯度。注意,我们使用 :code:`biases_->getWGrad()` 来得到某个特定参数的梯度矩阵。在一个参数的梯度被更新后,**必须**要调用 :code:`getParameterPtr()->incUpdate(callback);` 。这是用来在多线程和多机上更新参数的。 +- 之后,计算转换矩阵和输入的梯度,并对相应的参数调用 :code:`incUpdate` 。这给了框架一个机会去了解自己是否已经把所有的梯度收集到一个参数中,使得框架可以进行有时间重叠的工作。(例如,网络通信) .. code-block:: c++ @@ -238,7 +238,7 @@ } } -:code:`prefetch`函数指出了在训练时需要从参数服务器取出的行。仅在远程稀疏训练时有效。在远程稀疏训练时,完整的参数矩阵被分布式的保存在参数服务器上。当网络层用一个批次做训练时,该批次中,输入仅有一个子集是非零的。因此,该层仅需要这些非零样本位置所对应的转换矩阵的那些行。:code:`prefetch`表明了这些行的标号。 + :code:`prefetch` 函数指出了在训练时需要从参数服务器取出的行。仅在远程稀疏训练时有效。在远程稀疏训练时,完整的参数矩阵被分布式的保存在参数服务器上。当网络层用一个批次做训练时,该批次中,输入仅有一个子集是非零的。因此,该层仅需要这些非零样本位置所对应的转换矩阵的那些行。 :code:`prefetch` 表明了这些行的标号。 大多数层不需要远程稀疏训练函数。这种情况下不需要重写该函数。 @@ -255,7 +255,7 @@ } } -最后,使用:code:`REGISTER_LAYER(fc, FullyConnectedLayer);`来注册该层。:code:`fc`是该层的标识符,:code:`FullyConnectedLayer`是该层的类名。 +最后,使用 :code:`REGISTER_LAYER(fc, FullyConnectedLayer);` 来注册该层。 :code:`fc` 是该层的标识符, :code:`FullyConnectedLayer` 是该层的类名。 .. code-block:: c++ @@ -263,15 +263,15 @@ REGISTER_LAYER(fc, FullyConnectedLayer); } -若:code:`cpp`被放在:code:`paddle/gserver/layers`目录下,其会自动被加入编译列表。 +若 :code:`cpp` 被放在 :code:`paddle/gserver/layers` 目录下,其会自动被加入编译列表。 写梯度检查单元测试 =============================== -写梯度检查单元测试是一个验证新实现的层是否正确的相对简单的办法。梯度检查单元测试通过有限差分法来验证一个层的梯度。首先对输入做一个小的扰动:math:`\Delta x`,然后观察到输出的变化为:math:`\Delta y`,那么,梯度就可以通过这个方程计算得到:math:`\frac{\Delta y}{\Delta x }`。之后,再用这个梯度去和:code:`backward`函数得到的梯度去对比,以保证梯度计算的正确性。需要注意的是梯度检查仅仅验证了梯度的计算,并不保证:code:`forward`和:code:`backward`函数的实现是正确的。你需要一些更复杂的单元测试来保证你实现的网络层是正确的。 +写梯度检查单元测试是一个验证新实现的层是否正确的相对简单的办法。梯度检查单元测试通过有限差分法来验证一个层的梯度。首先对输入做一个小的扰动 :math:`\Delta x` ,然后观察到输出的变化为 :math:`\Delta y` ,那么,梯度就可以通过这个方程计算得到 :math:`\frac{\Delta y}{\Delta x }` 。之后,再用这个梯度去和 :code:`backward` 函数得到的梯度去对比,以保证梯度计算的正确性。需要注意的是梯度检查仅仅验证了梯度的计算,并不保证 :code:`forward` 和 :code:`backward` 函数的实现是正确的。你需要一些更复杂的单元测试来保证你实现的网络层是正确的。 -所有的梯度检测单侧都位于:code:`paddle/gserver/tests/test_LayerGrad.cpp`。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: +所有的梯度检测单侧都位于 :code:`paddle/gserver/tests/test_LayerGrad.cpp` 。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: + 生成网络层配置。网络层配置包含以下几项: - 偏置参数的大小。(例子中是4096) @@ -280,26 +280,26 @@ - 激活的类型。(例子中是softmax) - dropout的比例。(例子中是0.1) + 配置网络层的输入。在这个例子里,我们仅有一个输入。 - - 输入的类型(:code:`INPUT_DATA`),可以是以下几种: - - :code:`INPUT_DATA`:稠密向量。 - - :code:`INPUT_LABEL`:整数。 - - :code:`INPUT_DATA_TARGET`:稠密向量,但不用于计算梯度。 - - :code:`INPUT_SEQUENCE_DATA`:含有序列信息的稠密向量。 - - :code:`INPUT_HASSUB_SEQUENCE_DATA`:含有序列信息和子序列信息的稠密向量。 - - :code:`INPUT_SEQUENCE_LABEL`:含有序列信息的整数。 - - :code:`INPUT_SPARSE_NON_VALUE_DATA`:0-1稀疏数据。 - - :code:`INPUT_SPARSE_FLOAT_VALUE_DATA`:浮点稀疏数据。 - - 输入的名字。(例子中是:code:`layer_0`) + - 输入的类型( :code:`INPUT_DATA` ),可以是以下几种: + - :code:`INPUT_DATA` :稠密向量。 + - :code:`INPUT_LABEL` :整数。 + - :code:`INPUT_DATA_TARGET` :稠密向量,但不用于计算梯度。 + - :code:`INPUT_SEQUENCE_DATA` :含有序列信息的稠密向量。 + - :code:`INPUT_HASSUB_SEQUENCE_DATA` :含有序列信息和子序列信息的稠密向量。 + - :code:`INPUT_SEQUENCE_LABEL` :含有序列信息的整数。 + - :code:`INPUT_SPARSE_NON_VALUE_DATA` :0-1稀疏数据。 + - :code:`INPUT_SPARSE_FLOAT_VALUE_DATA` :浮点稀疏数据。 + - 输入的名字。(例子中是 :code:`layer_0` ) - 输入的大小。(例子中是8192) - 非零数字的个数,仅对稀疏数据有效。 - 稀疏数据的格式,仅对稀疏数据有效。 -+ 对每个输入,都需要调用一次:code:`config.layerConfig.add_inputs();`。 -+ 调用:code:`testLayerGrad`来做梯度检查。它包含下面的 It has the following arguments. - - 层和输入的配置。(例子中是:code:`config`) - - 输入的类型。(例子中是:code:`fc`) ++ 对每个输入,都需要调用一次 :code:`config.layerConfig.add_inputs();` 。 ++ 调用 :code:`testLayerGrad` 来做梯度检查。它包含下面的参数。 + - 层和输入的配置。(例子中是 :code:`config` ) + - 输入的类型。(例子中是 :code:`fc` ) - 梯度检查的批次大小。(例子中是100) - - 输入是否是转置的。大多数层需要设置为:code:`false`。(例子中是:code:`false`) - - 是否使用权重。有些层或者激活需要做归一化以保证它们的输出的和是一个常数。例如,softm激活的输出的和总是1。在这种情况下,我们不能通过常规的梯度检查的方式来计算梯度。因此我们采用输出的加权和(非常数)来计算梯度。(例子中是:code:`true`,因为全连接层的激活可以是softmax) + - 输入是否是转置的。大多数层需要设置为 :code:`false` 。(例子中是 :code:`false` ) + - 是否使用权重。有些层或者激活需要做归一化以保证它们的输出的和是一个常数。例如,softmax激活的输出的和总是1。在这种情况下,我们不能通过常规的梯度检查的方式来计算梯度。因此我们采用输出的加权和(非常数)来计算梯度。(例子中是 :code:`true` ,因为全连接层的激活可以是softmax) .. code-block:: c++ @@ -323,7 +323,7 @@ } } -如果你要为了测试而增加新的文件,例如:code:`paddle/gserver/tests/testFCGrad.cpp`,你需要把该文件加入:code:`paddle/gserver/tests/CMakeLists.txt`中。下面给出了一个例子。当你执行命令:code:`make tests`时,所有的单侧都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单侧正确执行。你需要在配置cmake时将:code:`WITH_DOUBLE`设置为`ON`。 +如果你要为了测试而增加新的文件,例如 :code:`paddle/gserver/tests/testFCGrad.cpp` ,你需要把该文件加入 :code:`paddle/gserver/tests/CMakeLists.txt` 中。下面给出了一个例子。当你执行命令 :code:`make tests` 时,所有的单侧都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单侧正确执行。你需要在配置cmake时将 :code:`WITH_DOUBLE` 设置为 `ON` 。 .. code-block:: bash @@ -339,11 +339,11 @@ 实现python封装 ======================== -python封装的实现使得我们可以在配置文件中使用新实现的网络层。所有的python封装都在:code:`python/paddle/trainer/config_parser.py`中。全连接层python封装的例子中包含下面几步: +python封装的实现使得我们可以在配置文件中使用新实现的网络层。所有的python封装都在 :code:`python/paddle/trainer/config_parser.py` 中。全连接层python封装的例子中包含下面几步: -- 所有的Python封装都使用:code:`@config_layer('fc')`这样的装饰器。网络层的标识符为:code:`fc`。 -- 实现构造函数:code:`__init__`。 - - 它首先调用基构造函数:code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)`。:code:`FCLayer`是Python封装的类名。:code:`fc`是网络层的标识符。为了封装能够正确工作,这些名字必须要写对。 +- 所有的Python封装都使用 :code:`@config_layer('fc')` 这样的装饰器。网络层的标识符为 :code:`fc` 。 +- 实现构造函数 :code:`__init__` 。 + - 它首先调用基构造函数 :code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)` 。 :code:`FCLayer` 是Python封装的类名。 :code:`fc` 是网络层的标识符。为了封装能够正确工作,这些名字必须要写对。 - 之后,计算转换矩阵的大小和格式(是否稀疏)。 .. code-block:: python @@ -371,11 +371,11 @@ python封装的实现使得我们可以在配置文件中使用新实现的网 在网络配置中,网络层的细节可以通过下面这些代码片段来指定。这个类的参数包括: -- :code:`name`是网络层实例的名字标识符。 -- :code:`type`是网络层的类型,通过网络层的标识符来指定。 -- :code:`size`是网络层输出的大小。 -- :code:`bias`表明这个层的一个实例是否需要偏置。 -- :code:`inputs`说明这个层的输入,输入是由一个list中的网络层实例的名字组成的。 +- :code:`name` 是网络层实例的名字标识符。 +- :code:`type` 是网络层的类型,通过网络层的标识符来指定。 +- :code:`size` 是网络层输出的大小。 +- :code:`bias` 表明这个层的一个实例是否需要偏置。 +- :code:`inputs` 说明这个层的输入,输入是由一个list中的网络层实例的名字组成的。 .. code-block:: python @@ -387,4 +387,4 @@ python封装的实现使得我们可以在配置文件中使用新实现的网 inputs = [Input("pool3")] ) -我们建议你为你的Python封装实现一个“助手”,使得搭模型时更方便。具体可以参考:code:`python/paddle/trainer_config_helpers/layers.py`。 +我们建议你为你的Python封装实现一个“助手”,使得搭模型时更方便。具体可以参考 :code:`python/paddle/trainer_config_helpers/layers.py` 。 From 68c89bcc522f8dfac53302f06547872bc92597f8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 26 Dec 2016 21:24:10 +0800 Subject: [PATCH 15/51] Add jupyter notebook support in Docker images. --- paddle/scripts/docker/Dockerfile | 13 +++++++++++-- paddle/scripts/docker/Dockerfile.gpu | 13 +++++++++++-- paddle/scripts/docker/build.sh | 4 +++- paddle/scripts/docker/run_all | 8 ++++++++ 4 files changed, 33 insertions(+), 5 deletions(-) create mode 100755 paddle/scripts/docker/run_all diff --git a/paddle/scripts/docker/Dockerfile b/paddle/scripts/docker/Dockerfile index b01de499bd..46363b05b7 100644 --- a/paddle/scripts/docker/Dockerfile +++ b/paddle/scripts/docker/Dockerfile @@ -15,7 +15,7 @@ RUN apt-get update \ && apt-get clean -y RUN cd /usr/src/gtest && cmake . && make && cp *.a /usr/lib RUN pip install -U BeautifulSoup docopt PyYAML pillow \ - sphinx sphinx_rtd_theme recommonmark + sphinx sphinx_rtd_theme recommonmark jupyter ARG WITH_AVX ARG WITH_DOC @@ -43,4 +43,13 @@ RUN echo 'root:root' | chpasswd RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config EXPOSE 22 -CMD ["/usr/sbin/sshd", "-D"] + +# Jupyter Notebook directory. +RUN mkdir /notes/ +WORKDIR "/notes" +EXPOSE 8888 + +RUN mkdir -p /opt/bin +COPY ./paddle/scripts/docker/run_all /opt/bin/ + +CMD ["/opt/bin/run_all"] diff --git a/paddle/scripts/docker/Dockerfile.gpu b/paddle/scripts/docker/Dockerfile.gpu index a68cc79b84..072c144818 100644 --- a/paddle/scripts/docker/Dockerfile.gpu +++ b/paddle/scripts/docker/Dockerfile.gpu @@ -15,7 +15,7 @@ RUN apt-get update \ && apt-get clean -y RUN cd /usr/src/gtest && cmake . && make && cp *.a /usr/lib RUN pip install -U BeautifulSoup docopt PyYAML pillow \ - sphinx sphinx_rtd_theme recommonmark + sphinx sphinx_rtd_theme recommonmark jupyter ARG WITH_AVX ARG WITH_DOC @@ -43,4 +43,13 @@ RUN echo 'root:root' | chpasswd RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config EXPOSE 22 -CMD ["/usr/sbin/sshd", "-D"] + +# Jupyter Notebook directory. +RUN mkdir /notes/ +WORKDIR "/notes" +EXPOSE 8888 + +RUN mkdir -p /opt/bin +COPY ./paddle/scripts/docker/run_all /opt/bin/ + +CMD ["/opt/bin/run_all"] diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index ca3f1c3f18..7edba3dd09 100755 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -43,5 +43,7 @@ cp -rv /woboq/data $WOBOQ_OUT/../data -o $WOBOQ_OUT \ -p paddle:/paddle /woboq/indexgenerator/codebrowser_indexgenerator $WOBOQ_OUT - +cd /woboq +make clean +rm -rf /paddle/build trap : 0 diff --git a/paddle/scripts/docker/run_all b/paddle/scripts/docker/run_all new file mode 100755 index 0000000000..87083467f5 --- /dev/null +++ b/paddle/scripts/docker/run_all @@ -0,0 +1,8 @@ +#!/bin/bash +LOG=/var/log/all + +touch $LOG + +/usr/sbin/sshd -D >> $LOG & +jupyter notebook --ip=0.0.0.0 /notes/ >> $LOG & +tail -f $LOG From e0a81dca263df428deeca10dee7783346d64411b Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 3 Jan 2017 19:42:51 +0800 Subject: [PATCH 16/51] add translated chinese docs into catalog --- doc/howto/deep_model/rnn/index_cn.rst | 1 + doc/howto/deep_model/rnn/rnn_cn.md | 226 ------------------ .../deep_model/{ => rnn}/rnn_config_cn.rst | 59 +++-- doc/howto/index_cn.rst | 5 +- doc/howto/index_en.rst | 4 +- doc/howto/usage/cmd_parameter/index_cn.rst | 11 + doc/howto/usage/cmd_parameter/index_en.md | 8 - doc/howto/usage/cmd_parameter/index_en.rst | 11 + .../k8s-aws/README.md => k8s/k8s_aws_en.md} | 8 +- doc/howto/usage/{cluster => }/k8s/k8s_cn.md | 2 +- .../{cluster => }/k8s/k8s_distributed_cn.md | 4 +- doc/howto/usage/{cluster => }/k8s/k8s_en.md | 0 .../usage/{cluster/k8s => k8s/src}/Dockerfile | 0 .../src}/add_security_group.png | Bin .../k8s-aws => k8s/src}/create_efs.png | Bin .../k8s-aws => k8s/src}/efs_mount.png | Bin .../usage/{cluster/k8s => k8s/src}/job.yaml | 0 .../k8s => k8s/src}/k8s-paddle-arch.png | Bin .../k8s-aws => k8s/src}/managed_policy.png | Bin .../usage/{cluster/k8s => k8s/src}/start.sh | 0 .../{cluster/k8s => k8s/src}/start_paddle.py | 0 doc/tutorials/index_cn.md | 2 + doc/tutorials/index_en.md | 1 + 23 files changed, 67 insertions(+), 275 deletions(-) delete mode 100644 doc/howto/deep_model/rnn/rnn_cn.md rename doc/howto/deep_model/{ => rnn}/rnn_config_cn.rst (88%) create mode 100644 doc/howto/usage/cmd_parameter/index_cn.rst delete mode 100644 doc/howto/usage/cmd_parameter/index_en.md create mode 100644 doc/howto/usage/cmd_parameter/index_en.rst rename doc/howto/usage/{cluster/k8s-aws/README.md => k8s/k8s_aws_en.md} (99%) rename doc/howto/usage/{cluster => }/k8s/k8s_cn.md (99%) rename doc/howto/usage/{cluster => }/k8s/k8s_distributed_cn.md (99%) rename doc/howto/usage/{cluster => }/k8s/k8s_en.md (100%) rename doc/howto/usage/{cluster/k8s => k8s/src}/Dockerfile (100%) rename doc/howto/usage/{cluster/k8s-aws => k8s/src}/add_security_group.png (100%) rename doc/howto/usage/{cluster/k8s-aws => k8s/src}/create_efs.png (100%) rename doc/howto/usage/{cluster/k8s-aws => k8s/src}/efs_mount.png (100%) rename doc/howto/usage/{cluster/k8s => k8s/src}/job.yaml (100%) rename doc/howto/usage/{cluster/k8s => k8s/src}/k8s-paddle-arch.png (100%) rename doc/howto/usage/{cluster/k8s-aws => k8s/src}/managed_policy.png (100%) rename doc/howto/usage/{cluster/k8s => k8s/src}/start.sh (100%) rename doc/howto/usage/{cluster/k8s => k8s/src}/start_paddle.py (100%) diff --git a/doc/howto/deep_model/rnn/index_cn.rst b/doc/howto/deep_model/rnn/index_cn.rst index 9e805ca851..9ecab5594c 100644 --- a/doc/howto/deep_model/rnn/index_cn.rst +++ b/doc/howto/deep_model/rnn/index_cn.rst @@ -4,6 +4,7 @@ RNN相关模型 .. toctree:: :maxdepth: 1 + rnn_config_cn.rst recurrent_group_cn.md hierarchical_layer_cn.rst hrnn_rnn_api_compare_cn.rst diff --git a/doc/howto/deep_model/rnn/rnn_cn.md b/doc/howto/deep_model/rnn/rnn_cn.md deleted file mode 100644 index 5ec05b2cab..0000000000 --- a/doc/howto/deep_model/rnn/rnn_cn.md +++ /dev/null @@ -1,226 +0,0 @@ -RNN 配置 -================= - -本教程将指导你如何在 PaddlePaddle 中配置循环神经网络(RNN)。PaddlePaddle 高度支持灵活和高效的循环神经网络配置。 在本教程中,您将了解如何: - -- 准备用来学习循环神经网络的序列数据。 -- 配置循环神经网络架构。 -- 使用学习完成的循环神经网络模型生成序列。 - -我们将使用 vanilla 循环神经网络和 sequence to sequence 模型来指导你完成这些步骤。sequence to sequence 模型的代码可以在`demo / seqToseq`找到。 - -准备序列数据 ---------------------- - -PaddlePaddle 不需要对序列数据进行任何预处理,例如填充。唯一需要做的是将相应类型设置为输入。例如,以下代码段定义了三个输入。 它们都是序列,它们的大小是`src_dict`,`trg_dict`和`trg_dict`: - -``` sourceCode -settings.input_types = [ - integer_value_sequence(len(settings.src_dict)), - integer_value_sequence(len(settings.trg_dict)), - integer_value_sequence(len(settings.trg_dict))] -``` - -在`process`函数中,每个`yield`函数将返回三个整数列表。每个整数列表被视为一个整数序列: - -``` sourceCode -yield src_ids, trg_ids, trg_ids_next -``` - -有关如何编写数据提供程序的更多细节描述,请参考 [PyDataProvider2](../../ui/data_provider/index.html)。完整的数据提供文件在 `demo/seqToseq/dataprovider.py`。 - -配置循环神经网络架构 ------------------------------------------------ - -### 简单门控循环神经网络(Gated Recurrent Neural Network) - -循环神经网络在每个时间步骤顺序地处理序列。下面列出了 LSTM 的架构的示例。 - -![image](../../../tutorials/sentiment_analysis/bi_lstm.jpg) - -一般来说,循环网络从 *t* = 1 到 *t* = *T* 或者反向地从 *t* = *T* 到 *t* = 1 执行以下操作。 - -*x**t* + 1 = *f**x*(*x**t*),*y**t* = *f**y*(*x**t*) - -其中 *f**x*(.) 称为**单步函数**(即单时间步执行的函数,step function),而 *f**y*(.) 称为**输出函数**。在 vanilla 循环神经网络中,单步函数和输出函数都非常简单。然而,PaddlePaddle 可以通过修改这两个函数来实现复杂的网络配置。我们将使用 sequence to sequence 模型演示如何配置复杂的循环神经网络模型。在本节中,我们将使用简单的 vanilla 循环神经网络作为使用`recurrent_group`配置简单循环神经网络的例子。 注意,如果你只需要使用简单的RNN,GRU或LSTM,那么推荐使用`grumemory`和`lstmemory`,因为它们的计算效率比`recurrent_group`更高。 - -对于 vanilla RNN,在每个时间步长,**单步函数**为: - -*x**t* + 1 = *W**x**x**t* + *W**i**I**t* + *b* - -其中 *x**t* 是RNN状态,并且 *I**t* 是输入,*W**x* 和 *W**i* 分别是RNN状态和输入的变换矩阵。*b* 是偏差。它的**输出函数**只需要*x**t*作为输出。 - -`recurrent_group`是构建循环神经网络的最重要的工具。 它定义了**单步函数**,**输出函数**和循环神经网络的输入。注意,这个函数的`step`参数需要实现`step function`(单步函数)和`output function`(输出函数): - - -``` sourceCode -def simple_rnn(input, - size=None, - name=None, - reverse=False, - rnn_bias_attr=None, - act=None, - rnn_layer_attr=None): - def __rnn_step__(ipt): - out_mem = memory(name=name, size=size) - rnn_out = mixed_layer(input = [full_matrix_projection(ipt), - full_matrix_projection(out_mem)], - name = name, - bias_attr = rnn_bias_attr, - act = act, - layer_attr = rnn_layer_attr, - size = size) - return rnn_out - return recurrent_group(name='%s_recurrent_group' % name, - step=__rnn_step__, - reverse=reverse, - input=input) -``` - -PaddlePaddle 使用“Memory”(记忆模块)实现单步函数。**Memory**是在PaddlePaddle中构造循环神经网络时最重要的概念。 Memory是在单步函数中循环使用的状态,例如*x**t* + 1 = *f**x*(*x**t*)。 一个Memory包含**输出**和**输入**。当前时间步处的Memory的输出作为下一时间步Memory的输入。Memory也可以具有**boot layer(引导层)**,其输出被用作Memory的初始值。 在我们的例子中,门控循环单元的输出被用作输出Memory。请注意,`rnn_out`层的名称与`out_mem`的名称相同。这意味着`rnn_out` (*x**t* + 1)的输出被用作`out_mem`Memory的**输出**。 - -Memory也可以是序列。在这种情况下,在每个时间步中,我们有一个序列作为循环神经网络的状态。这在构造非常复杂的循环神经网络时是有用的。 其他高级功能包括定义多个Memory,以及使用子序列来定义分级循环神经网络架构。 - -我们在函数的结尾返回`rnn_out`。 这意味着 `rnn_out` 层的输出被用作门控循环神经网络的**输出**函数。 - -### Sequence to Sequence Model with Attention - -我们将使用 sequence to sequence model with attention 作为例子演示如何配置复杂的循环神经网络模型。该模型的说明如下图所示。 - -![image](../../../tutorials/text_generation/encoder-decoder-attention-model.png) - -在这个模型中,源序列 *S* = {*s*1, …, *s**T*} 用双向门控循环神经网络编码。双向门控循环神经网络的隐藏状态 *H**S* = {*H*1, …, *H**T*} 被称为 *编码向量*。解码器是门控循环神经网络。当解读每一个*y**t*时, 这个门控循环神经网络生成一系列权重 *W**S**t* = {*W*1*t*, …, *W**T**t*}, 用于计算编码向量的加权和。加权和用来生成*y**t*。 - -模型的编码器部分如下所示。它叫做`grumemory`来表示门控循环神经网络。如果网络架构简单,那么推荐使用循环神经网络的方法,因为它比 `recurrent_group` 更快。我们已经实现了大多数常用的循环神经网络架构,可以参考 [Layers](../../ui/api/trainer_config_helpers/layers_index.html) 了解更多细节。 - -我们还将编码向量投射到 `decoder_size` 维空间。这通过获得反向循环网络的第一个实例,并将其投射到 `decoder_size` 维空间完成: - -``` sourceCode -# 定义源语句的数据层 -src_word_id = data_layer(name='source_language_word', size=source_dict_dim) -# 计算每个词的词向量 -src_embedding = embedding_layer( - input=src_word_id, - size=word_vector_dim, - param_attr=ParamAttr(name='_source_language_embedding')) -# 应用前向循环神经网络 -src_forward = grumemory(input=src_embedding, size=encoder_size) -# 应用反向递归神经网络(reverse=True表示反向循环神经网络) -src_backward = grumemory(input=src_embedding, - size=encoder_size, - reverse=True) -# 将循环神经网络的前向和反向部分混合在一起 -encoded_vector = concat_layer(input=[src_forward, src_backward]) - -# 投射编码向量到 decoder_size -encoder_proj = mixed_layer(input = [full_matrix_projection(encoded_vector)], - size = decoder_size) - -# 计算反向RNN的第一个实例 -backward_first = first_seq(input=src_backward) - -# 投射反向RNN的第一个实例到 decoder size -decoder_boot = mixed_layer(input=[full_matrix_projection(backward_first)], size=decoder_size, act=TanhActivation()) -``` - -解码器使用 `recurrent_group` 来定义循环神经网络。单步函数和输出函数在 `gru_decoder_with_attention` 中定义: - -``` sourceCode -group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] -trg_embedding = embedding_layer( - input=data_layer(name='target_language_word', - size=target_dict_dim), - size=word_vector_dim, - param_attr=ParamAttr(name='_target_language_embedding')) -group_inputs.append(trg_embedding) - -# 对于配备有注意力机制的解码器,在训练中, -# 目标向量(groudtruth)是数据输入, -# 而源序列的编码向量可以被无边界的memory访问 -# StaticInput 意味着不同时间步的输入都是相同的值, -# 否则它以一个序列输入,不同时间步的输入是不同的。 -# 所有输入序列应该有相同的长度。 -decoder = recurrent_group(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs) -``` - -单步函数的实现如下所示。首先,它定义解码网络的**Memory**。然后定义 attention,门控循环单元单步函数和输出函数: - -``` sourceCode -def gru_decoder_with_attention(enc_vec, enc_proj, current_word): - # 定义解码器的Memory - # Memory的输出定义在 gru_step 内 - # 注意 gru_step 应该与它的Memory名字相同 - decoder_mem = memory(name='gru_decoder', - size=decoder_size, - boot_layer=decoder_boot) - # 计算 attention 加权编码向量 - context = simple_attention(encoded_sequence=enc_vec, - encoded_proj=enc_proj, - decoder_state=decoder_mem) - # 混合当前词向量和attention加权编码向量 - decoder_inputs = mixed_layer(inputs = [full_matrix_projection(context), - full_matrix_projection(current_word)], - size = decoder_size * 3) - # 定义门控循环单元循环神经网络单步函数 - gru_step = gru_step_layer(name='gru_decoder', - input=decoder_inputs, - output_mem=decoder_mem, - size=decoder_size) - # 定义输出函数 - out = mixed_layer(input=[full_matrix_projection(input=gru_step)], - size=target_dict_dim, - bias_attr=True, - act=SoftmaxActivation()) - return out -``` - -生成序列 ------------------ - -训练模型后,我们可以使用它来生成序列。通常的做法是使用**beam search** 生成序列。以下代码片段定义 beam search 算法。注意,`beam_search` 函数假设 `step` 的输出函数返回的是下一个时刻输出词的 softmax 归一化概率向量。我们对模型进行了以下更改。 - -- 使用 `GeneratedInput` 来表示 trg\_embedding。 `GeneratedInput` 将上一时间步所生成的词的向量来作为当前时间步的输入。 -- 使用 `beam_search` 函数。这个函数需要设置: - - `bos_id`: 开始标记。每个句子都以开始标记开头。 - - `eos_id`: 结束标记。每个句子都以结束标记结尾。 - - `beam_size`: beam search 算法中的beam大小。 - - `max_length`: 生成序列的最大长度。 -- 使用 `seqtext_printer_evaluator` 根据索引矩阵和字典打印文本。这个函数需要设置: - - `id_input`: 数据的整数ID,用于标识生成的文件中的相应输出。 - - `dict_file`: 用于将词ID转换为词的字典文件。 - - `result_file`: 生成结果文件的路径。 - -代码如下: - -``` sourceCode -group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] -# 在生成时,解码器基于编码源序列和最后生成的目标词预测下一目标词。 -# 编码源序列(编码器输出)必须由只读Memory的 StaticInput 指定。 -# 这里, GeneratedInputs 自动获取上一个生成的词,并在最开始初始化为起始词,如 。 -trg_embedding = GeneratedInput( - size=target_dict_dim, - embedding_name='_target_language_embedding', - embedding_size=word_vector_dim) -group_inputs.append(trg_embedding) -beam_gen = beam_search(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs, - bos_id=0, # Beginnning token. - eos_id=1, # End of sentence token. - beam_size=beam_size, - max_length=max_length) - -seqtext_printer_evaluator(input=beam_gen, - id_input=data_layer(name="sent_id", size=1), - dict_file=trg_dict_path, - result_file=gen_trans_file) -outputs(beam_gen) -``` - -注意,这种生成技术只用于类似解码器的生成过程。如果你正在处理序列标记任务,请参阅 [Semantic Role Labeling Demo](../../demo/semantic_role_labeling/index.html) 了解更多详细信息。 - -完整的配置文件在`demo/seqToseq/seqToseq_net.py`。 diff --git a/doc/howto/deep_model/rnn_config_cn.rst b/doc/howto/deep_model/rnn/rnn_config_cn.rst similarity index 88% rename from doc/howto/deep_model/rnn_config_cn.rst rename to doc/howto/deep_model/rnn/rnn_config_cn.rst index e6d8c1133a..8d65b3512d 100644 --- a/doc/howto/deep_model/rnn_config_cn.rst +++ b/doc/howto/deep_model/rnn/rnn_config_cn.rst @@ -1,4 +1,4 @@ -RNN 配置 +RNN配置 ======== 本教程将指导你如何在 PaddlePaddle @@ -20,7 +20,7 @@ PaddlePaddle 不需要对序列数据进行任何预处理,例如填充。唯一需要做的是将相应类型设置为输入。例如,以下代码段定义了三个输入。 它们都是序列,它们的大小是\ ``src_dict``\ ,\ ``trg_dict``\ 和\ ``trg_dict``\ : -.. code:: sourcecode +.. code:: python settings.input_types = [ integer_value_sequence(len(settings.src_dict)), @@ -29,7 +29,7 @@ PaddlePaddle 在\ ``process``\ 函数中,每个\ ``yield``\ 函数将返回三个整数列表。每个整数列表被视为一个整数序列: -.. code:: sourcecode +.. code:: python yield src_ids, trg_ids, trg_ids_next @@ -45,18 +45,17 @@ PaddlePaddle 循环神经网络在每个时间步骤顺序地处理序列。下面列出了 LSTM 的架构的示例。 -.. figure:: ../../../tutorials/sentiment_analysis/bi_lstm.jpg - :alt: image +.. image:: ../../../tutorials/sentiment_analysis/bi_lstm.jpg + :align: center - image +一般来说,循环网络从 :math:`t=1` 到 :math:`t=T` 或者反向地从 :math:`t=T` 到 :math:`t=1` 执行以下操作。 -一般来说,循环网络从 *t* = 1 到 *t* = *T* 或者反向地从 *t* = *T* 到 *t* -= 1 执行以下操作。 +.. math:: -*x*\ \ *t* + 1 = *f*\ \ *x*\ (*x*\ \ *t*\ ),\ *y*\ \ *t*\  = *f*\ \ *y*\ (*x*\ \ *t*\ ) + x_{t+1} = f_x(x_t), y_t = f_y(x_t) -其中 *f*\ \ *x*\ (.) 称为\ **单步函数**\ (即单时间步执行的函数,step -function),而 *f*\ \ *y*\ (.) 称为\ **输出函数**\ 。在 vanilla +其中 :math:`f_x(.)` 称为\ **单步函数**\ (即单时间步执行的函数,step +function),而 :math:`f_y(.)` 称为\ **输出函数**\ 。在 vanilla 循环神经网络中,单步函数和输出函数都非常简单。然而,PaddlePaddle 可以通过修改这两个函数来实现复杂的网络配置。我们将使用 sequence to sequence @@ -67,16 +66,17 @@ vanilla 对于 vanilla RNN,在每个时间步长,\ **单步函数**\ 为: -*x*\ \ *t* + 1 = *W*\ \ *x*\ \ *x*\ \ *t*\  + *W*\ \ *i*\ \ *I*\ \ *t*\  + *b* +.. math:: -其中 *x*\ \ *t*\ 是RNN状态,并且 *I*\ \ *t*\ 是输入,\ *W*\ \ *x*\ 和 -*W*\ \ *i*\ 分别是RNN状态和输入的变换矩阵。\ *b* -是偏差。它的\ **输出函数**\ 只需要\ *x*\ \ *t*\ 作为输出。 + x_{t+1} = W_x x_t + W_i I_t + b + +其中 :math:`x_t` 是RNN状态,并且 :math:`I_t` 是输入,:math:`W_x` 和 +:math:`W_i` 分别是RNN状态和输入的变换矩阵。:math:`b` 是偏差。它的\ **输出函数**\ 只需要 :math:`x_t` 作为输出。 ``recurrent_group``\ 是构建循环神经网络的最重要的工具。 它定义了\ **单步函数**\ ,\ **输出函数**\ 和循环神经网络的输入。注意,这个函数的\ ``step``\ 参数需要实现\ ``step function``\ (单步函数)和\ ``output function``\ (输出函数): -.. code:: sourcecode +.. code:: python def simple_rnn(input, size=None, @@ -102,7 +102,7 @@ vanilla PaddlePaddle 使用“Memory”(记忆模块)实现单步函数。\ **Memory**\ 是在PaddlePaddle中构造循环神经网络时最重要的概念。 -Memory是在单步函数中循环使用的状态,例如\ *x*\ \ *t* + 1 = *f*\ \ *x*\ (*x*\ \ *t*\ )。 +Memory是在单步函数中循环使用的状态,例如 :math:`x_{t+1} = f_x(x_t)` 。 一个Memory包含\ **输出**\ 和\ **输入**\ 。当前时间步处的Memory的输出作为下一时间步Memory的输入。Memory也可以具有\ **boot layer(引导层)**\ ,其输出被用作Memory的初始值。 在我们的例子中,门控循环单元的输出被用作输出Memory。请注意,\ ``rnn_out``\ 层的名称与\ ``out_mem``\ 的名称相同。这意味着\ ``rnn_out`` @@ -120,18 +120,15 @@ Sequence to Sequence Model with Attention 我们将使用 sequence to sequence model with attention 作为例子演示如何配置复杂的循环神经网络模型。该模型的说明如下图所示。 -.. figure:: ../../../tutorials/text_generation/encoder-decoder-attention-model.png - :alt: image - - image +.. image:: ../../../tutorials/text_generation/encoder-decoder-attention-model.png + :align: center -在这个模型中,源序列 *S* = {*s*\ 1, …, \ *s*\ \ *T*\ } +在这个模型中,源序列 :math:`S = \{s_1, \dots, s_T\}` 用双向门控循环神经网络编码。双向门控循环神经网络的隐藏状态 -*H*\ \ *S*\  = {*H*\ 1, …, \ *H*\ \ *T*\ } 被称为 -*编码向量*\ 。解码器是门控循环神经网络。当解读每一个\ *y*\ \ *t*\ 时, -这个门控循环神经网络生成一系列权重 -*W*\ \ *S*\ \ *t*\  = {*W*\ 1\ *t*\ , …, \ *W*\ \ *T*\ \ *t*\ }, -用于计算编码向量的加权和。加权和用来生成\ *y*\ \ *t*\ 。 +:math:`H_S = \{H_1, \dots, H_T\}` 被称为 +*编码向量*\ 。解码器是门控循环神经网络。当解读每一个 :math:`y_t` 时, +这个门控循环神经网络生成一系列权重 :math:`W_S^t = \{W_1^t, \dots, W_T^t\}` , +用于计算编码向量的加权和。加权和用来生成 :math:`y_t` 。 模型的编码器部分如下所示。它叫做\ ``grumemory``\ 来表示门控循环神经网络。如果网络架构简单,那么推荐使用循环神经网络的方法,因为它比 ``recurrent_group`` @@ -143,7 +140,7 @@ Sequence to Sequence Model with Attention 维空间。这通过获得反向循环网络的第一个实例,并将其投射到 ``decoder_size`` 维空间完成: -.. code:: sourcecode +.. code:: python # 定义源语句的数据层 src_word_id = data_layer(name='source_language_word', size=source_dict_dim) @@ -174,7 +171,7 @@ Sequence to Sequence Model with Attention 解码器使用 ``recurrent_group`` 来定义循环神经网络。单步函数和输出函数在 ``gru_decoder_with_attention`` 中定义: -.. code:: sourcecode +.. code:: python group_inputs=[StaticInput(input=encoded_vector,is_seq=True), StaticInput(input=encoded_proj,is_seq=True)] @@ -198,7 +195,7 @@ Sequence to Sequence Model with Attention 单步函数的实现如下所示。首先,它定义解码网络的\ **Memory**\ 。然后定义 attention,门控循环单元单步函数和输出函数: -.. code:: sourcecode +.. code:: python def gru_decoder_with_attention(enc_vec, enc_proj, current_word): # 定义解码器的Memory @@ -253,7 +250,7 @@ attention,门控循环单元单步函数和输出函数: 代码如下: -.. code:: sourcecode +.. code:: python group_inputs=[StaticInput(input=encoded_vector,is_seq=True), StaticInput(input=encoded_proj,is_seq=True)] diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index 6a14ce8ae7..bd3d0ec292 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -7,10 +7,11 @@ .. toctree:: :maxdepth: 1 + usage/cmd_parameter/index_cn.rst usage/concepts/use_concepts_cn.rst usage/cluster/cluster_train_cn.md - usage/cluster/k8s/k8s_cn.md - usage/cluster/k8s/k8s_distributed_cn.md + usage/k8s/k8s_cn.md + usage/k8s/k8s_distributed_cn.md 开发标准 -------- diff --git a/doc/howto/index_en.rst b/doc/howto/index_en.rst index 983dc743eb..1fbfcd260b 100644 --- a/doc/howto/index_en.rst +++ b/doc/howto/index_en.rst @@ -7,8 +7,10 @@ Usage .. toctree:: :maxdepth: 1 - usage/cmd_parameter/index_en.md + usage/cmd_parameter/index_en.rst usage/cluster/cluster_train_en.md + usage/k8s/k8s_en.md + usage/k8s/k8s_aws_en.md Development ------------ diff --git a/doc/howto/usage/cmd_parameter/index_cn.rst b/doc/howto/usage/cmd_parameter/index_cn.rst new file mode 100644 index 0000000000..4c87298211 --- /dev/null +++ b/doc/howto/usage/cmd_parameter/index_cn.rst @@ -0,0 +1,11 @@ +.. _cmd_line_index: + +设置命令行参数 +=============== + +.. toctree:: + :maxdepth: 1 + + use_case_cn.md + arguments_cn.md + detail_introduction_cn.md diff --git a/doc/howto/usage/cmd_parameter/index_en.md b/doc/howto/usage/cmd_parameter/index_en.md deleted file mode 100644 index 2a96e7e976..0000000000 --- a/doc/howto/usage/cmd_parameter/index_en.md +++ /dev/null @@ -1,8 +0,0 @@ -```eval_rst -.. _cmd_line_index: -``` -# Set Command-line Parameters - -* [Use Case](use_case_en.md) -* [Arguments](arguments_en.md) -* [Detailed Descriptions](detail_introduction_en.md) diff --git a/doc/howto/usage/cmd_parameter/index_en.rst b/doc/howto/usage/cmd_parameter/index_en.rst new file mode 100644 index 0000000000..0e3c72d27a --- /dev/null +++ b/doc/howto/usage/cmd_parameter/index_en.rst @@ -0,0 +1,11 @@ +.. _cmd_line_index: + +Set Command-line Parameters +=========================== + +.. toctree:: + :maxdepth: 1 + + use_case_en.md + arguments_en.md + detail_introduction_en.md diff --git a/doc/howto/usage/cluster/k8s-aws/README.md b/doc/howto/usage/k8s/k8s_aws_en.md similarity index 99% rename from doc/howto/usage/cluster/k8s-aws/README.md rename to doc/howto/usage/k8s/k8s_aws_en.md index 5931584288..201bcae48d 100644 --- a/doc/howto/usage/cluster/k8s-aws/README.md +++ b/doc/howto/usage/k8s/k8s_aws_en.md @@ -1,4 +1,4 @@ -# PaddlePaddle on AWS with Kubernetes +# Kubernetes on AWS ## Create AWS Account and IAM Account @@ -331,15 +331,15 @@ For sharing the training data across all the Kubernetes nodes, we use EFS (Elast 1. Make sure you added AmazonElasticFileSystemFullAccess policy in your group. 1. Create the Elastic File System in AWS console, and attach the new VPC with it. - + 1. Modify the Kubernetes security group under ec2/Security Groups, add additional inbound policy "All TCP TCP 0 - 65535 0.0.0.0/0" for Kubernetes default VPC security group. - + 1. Follow the EC2 mount instruction to mount the disk onto all the Kubernetes nodes, we recommend to mount EFS disk onto ~/efs. - + Before starting the training, you should place your user config and divided training data onto EFS. When the training start, each task will copy related files from EFS into container, and it will also write the training results back onto EFS, we will show you how to place the data later in this article. diff --git a/doc/howto/usage/cluster/k8s/k8s_cn.md b/doc/howto/usage/k8s/k8s_cn.md similarity index 99% rename from doc/howto/usage/cluster/k8s/k8s_cn.md rename to doc/howto/usage/k8s/k8s_cn.md index 2575701053..ab07cb9cd5 100644 --- a/doc/howto/usage/cluster/k8s/k8s_cn.md +++ b/doc/howto/usage/k8s/k8s_cn.md @@ -1,4 +1,4 @@ -# Kubernetes 单机训练 +# Kubernetes单机训练 在这篇文档里,我们介绍如何在 Kubernetes 集群上启动一个单机使用CPU的Paddle训练作业。在下一篇中,我们将介绍如何启动分布式训练作业。 diff --git a/doc/howto/usage/cluster/k8s/k8s_distributed_cn.md b/doc/howto/usage/k8s/k8s_distributed_cn.md similarity index 99% rename from doc/howto/usage/cluster/k8s/k8s_distributed_cn.md rename to doc/howto/usage/k8s/k8s_distributed_cn.md index 53d0b4676c..b63b8437a0 100644 --- a/doc/howto/usage/cluster/k8s/k8s_distributed_cn.md +++ b/doc/howto/usage/k8s/k8s_distributed_cn.md @@ -1,4 +1,4 @@ -# Kubernetes 分布式训练 +# Kubernetes分布式训练 前一篇文章介绍了如何在Kubernetes集群上启动一个单机PaddlePaddle训练作业 (Job)。在这篇文章里,我们介绍如何在Kubernetes集群上进行分布式PaddlePaddle训练作业。关于PaddlePaddle的分布式训练,文章 [Cluster Training](https://github.com/baidu/Paddle/blob/develop/doc/cluster/opensource/cluster_train.md)介绍了一种通过SSH远程分发任务,进行分布式训练的方法,与此不同的是,本文将介绍在Kubernetes容器管理平台上快速构建PaddlePaddle容器集群,进行分布式训练的方案。 @@ -22,7 +22,7 @@ 首先,我们需要拥有一个Kubernetes集群,在这个集群中所有node与pod都可以互相通信。关于Kubernetes集群搭建,可以参考[官方文档](http://kubernetes.io/docs/getting-started-guides/kubeadm/),在以后的文章中我们也会介绍AWS上搭建的方案。本文假设大家能找到几台物理机,并且可以按照官方文档在上面部署Kubernetes。在本文的环境中,Kubernetes集群中所有node都挂载了一个[MFS](http://moosefs.org/)(Moose filesystem,一种分布式文件系统)共享目录,我们通过这个目录来存放训练文件与最终输出的模型。关于MFS的安装部署,可以参考[MooseFS documentation](https://moosefs.com/documentation.html)。在训练之前,用户将配置与训练数据切分好放在MFS目录中,训练时,程序从此目录拷贝文件到容器内进行训练,将结果保存到此目录里。整体的结构图如下: -![paddle on kubernetes结构图](k8s-paddle-arch.png) +![paddle on kubernetes结构图](src/k8s-paddle-arch.png) 上图描述了一个3节点的分布式训练场景,Kubernetes集群的每个node上都挂载了一个MFS目录,这个目录可以通过volume的形式挂载到容器中。Kubernetes为这次训练创建了3个pod并且调度到了3个node上运行,每个pod包含一个PaddlePaddle容器。在容器创建后,会启动pserver与trainer进程,读取volume中的数据进行这次分布式训练。 diff --git a/doc/howto/usage/cluster/k8s/k8s_en.md b/doc/howto/usage/k8s/k8s_en.md similarity index 100% rename from doc/howto/usage/cluster/k8s/k8s_en.md rename to doc/howto/usage/k8s/k8s_en.md diff --git a/doc/howto/usage/cluster/k8s/Dockerfile b/doc/howto/usage/k8s/src/Dockerfile similarity index 100% rename from doc/howto/usage/cluster/k8s/Dockerfile rename to doc/howto/usage/k8s/src/Dockerfile diff --git a/doc/howto/usage/cluster/k8s-aws/add_security_group.png b/doc/howto/usage/k8s/src/add_security_group.png similarity index 100% rename from doc/howto/usage/cluster/k8s-aws/add_security_group.png rename to doc/howto/usage/k8s/src/add_security_group.png diff --git a/doc/howto/usage/cluster/k8s-aws/create_efs.png b/doc/howto/usage/k8s/src/create_efs.png similarity index 100% rename from doc/howto/usage/cluster/k8s-aws/create_efs.png rename to doc/howto/usage/k8s/src/create_efs.png diff --git a/doc/howto/usage/cluster/k8s-aws/efs_mount.png b/doc/howto/usage/k8s/src/efs_mount.png similarity index 100% rename from doc/howto/usage/cluster/k8s-aws/efs_mount.png rename to doc/howto/usage/k8s/src/efs_mount.png diff --git a/doc/howto/usage/cluster/k8s/job.yaml b/doc/howto/usage/k8s/src/job.yaml similarity index 100% rename from doc/howto/usage/cluster/k8s/job.yaml rename to doc/howto/usage/k8s/src/job.yaml diff --git a/doc/howto/usage/cluster/k8s/k8s-paddle-arch.png b/doc/howto/usage/k8s/src/k8s-paddle-arch.png similarity index 100% rename from doc/howto/usage/cluster/k8s/k8s-paddle-arch.png rename to doc/howto/usage/k8s/src/k8s-paddle-arch.png diff --git a/doc/howto/usage/cluster/k8s-aws/managed_policy.png b/doc/howto/usage/k8s/src/managed_policy.png similarity index 100% rename from doc/howto/usage/cluster/k8s-aws/managed_policy.png rename to doc/howto/usage/k8s/src/managed_policy.png diff --git a/doc/howto/usage/cluster/k8s/start.sh b/doc/howto/usage/k8s/src/start.sh similarity index 100% rename from doc/howto/usage/cluster/k8s/start.sh rename to doc/howto/usage/k8s/src/start.sh diff --git a/doc/howto/usage/cluster/k8s/start_paddle.py b/doc/howto/usage/k8s/src/start_paddle.py similarity index 100% rename from doc/howto/usage/cluster/k8s/start_paddle.py rename to doc/howto/usage/k8s/src/start_paddle.py diff --git a/doc/tutorials/index_cn.md b/doc/tutorials/index_cn.md index 97014d5376..6a27004d58 100644 --- a/doc/tutorials/index_cn.md +++ b/doc/tutorials/index_cn.md @@ -2,6 +2,7 @@ * [快速入门](quick_start/index_cn.rst) * [个性化推荐](rec/ml_regression_cn.rst) +* [图像分类](image_classification/index_cn.md) * [情感分析](sentiment_analysis/index_cn.md) * [语义角色标注](semantic_role_labeling/index_cn.md) * [机器翻译](text_generation/index_cn.md) @@ -9,3 +10,4 @@ ## 常用模型 * [ResNet模型](imagenet_model/resnet_model_cn.md) +* [词向量模型](embedding_model/index_cn.md) diff --git a/doc/tutorials/index_en.md b/doc/tutorials/index_en.md index cce9d3a176..77331a703b 100644 --- a/doc/tutorials/index_en.md +++ b/doc/tutorials/index_en.md @@ -7,6 +7,7 @@ There are several examples and demos here. * [Sentiment Analysis](sentiment_analysis/index_en.md) * [Semantic Role Labeling](semantic_role_labeling/index_en.md) * [Text Generation](text_generation/index_en.md) +* [Image Auto-Generation](gan/index_en.md) ## Model Zoo * [ImageNet: ResNet](imagenet_model/resnet_model_en.md) From 936b0ed11863d92dc323b4c0430d6c1dc5e2bfd4 Mon Sep 17 00:00:00 2001 From: xutianbing Date: Tue, 3 Jan 2017 12:33:08 -0800 Subject: [PATCH 17/51] add paddle_test_util static lib to simplify unit test. --- cmake/util.cmake | 1 + paddle/function/CMakeLists.txt | 4 +- paddle/function/ContextProjectionOpTest.cpp | 2 +- paddle/gserver/tests/CMakeLists.txt | 37 +-- paddle/gserver/tests/LayerGradUtil.h | 2 +- paddle/gserver/tests/TestUtil.cpp | 219 ------------------ paddle/gserver/tests/TestUtil.h | 78 ------- paddle/gserver/tests/test_ActivationGrad.cpp | 2 +- paddle/gserver/tests/test_BatchNorm.cpp | 2 +- paddle/gserver/tests/test_ConvTrans.cpp | 2 +- paddle/gserver/tests/test_ConvUnify.cpp | 2 +- paddle/gserver/tests/test_Evaluator.cpp | 2 +- paddle/gserver/tests/test_LayerGrad.cpp | 2 +- paddle/gserver/tests/test_NetworkCompare.cpp | 2 +- paddle/gserver/tests/test_PriorBox.cpp | 2 +- .../gserver/tests/test_ProtoDataProvider.cpp | 2 +- paddle/gserver/tests/test_PyDataProvider.cpp | 2 +- paddle/gserver/tests/test_RecurrentLayer.cpp | 2 +- paddle/gserver/tests/test_WarpCTCLayer.cpp | 2 +- paddle/math/tests/CMakeLists.txt | 3 +- paddle/math/tests/test_GpuProfiler.cpp | 2 +- paddle/math/tests/test_matrixCompare.cpp | 2 +- paddle/testing/CMakeLists.txt | 2 + 23 files changed, 33 insertions(+), 343 deletions(-) delete mode 100644 paddle/gserver/tests/TestUtil.cpp delete mode 100644 paddle/gserver/tests/TestUtil.h diff --git a/cmake/util.cmake b/cmake/util.cmake index 43a56378df..38299a87e9 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -156,6 +156,7 @@ function(link_paddle_test TARGET_NAME) link_paddle_exe(${TARGET_NAME}) target_link_libraries(${TARGET_NAME} paddle_test_main + paddle_test_util ${GTEST_LIBRARIES}) endfunction() diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt index 0b3126155d..42a9bd470c 100644 --- a/paddle/function/CMakeLists.txt +++ b/paddle/function/CMakeLists.txt @@ -17,9 +17,7 @@ if(WITH_TESTING) # file(GLOB test_files . *OpTest.cpp) # add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files}) add_simple_unittest(CrossMapNormalOpTest) - add_unittest(ContextProjectionOpTest - ContextProjectionOpTest.cpp - ../gserver/tests/TestUtil.cpp) + add_simple_unittest(ContextProjectionOpTest) endif() endif() diff --git a/paddle/function/ContextProjectionOpTest.cpp b/paddle/function/ContextProjectionOpTest.cpp index 359428fc03..6223d2fd23 100644 --- a/paddle/function/ContextProjectionOpTest.cpp +++ b/paddle/function/ContextProjectionOpTest.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include #include "FunctionTest.h" -#include "paddle/gserver/tests/TestUtil.h" #include "paddle/math/Matrix.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index c26a2a7f06..6775563b2b 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -2,8 +2,7 @@ ################### test_ProtoDataProvider ############ add_unittest_without_exec(test_ProtoDataProvider - test_ProtoDataProvider.cpp - TestUtil.cpp) + test_ProtoDataProvider.cpp) # test_ProtoDataProvider will mkdir as same name, # so if WORKING_DIRECTORY is default directory, then @@ -15,53 +14,46 @@ add_test(NAME test_ProtoDataProvider ################# test_LayerGrad ####################### add_unittest_without_exec(test_LayerGrad test_LayerGrad.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_LayerGrad COMMAND test_LayerGrad) add_unittest_without_exec(test_ActivationGrad test_ActivationGrad.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_ActivationGrad COMMAND test_ActivationGrad) ################# test_ConvTrans ####################### add_unittest_without_exec(test_ConvTrans test_ConvTrans.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_ConvTrans COMMAND test_ConvTrans) ################# test_PriorBox ####################### add_unittest_without_exec(test_PriorBox test_PriorBox.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_PriorBox COMMAND test_PriorBox) ################# test_ConvUnify ####################### add_unittest_without_exec(test_ConvUnify test_ConvUnify.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_ConvUnify COMMAND test_ConvUnify) ################# test_BatchNorm ####################### add_unittest_without_exec(test_BatchNorm test_BatchNorm.cpp - LayerGradUtil.cpp - TestUtil.cpp) + LayerGradUtil.cpp) add_test(NAME test_BatchNorm COMMAND test_BatchNorm) ################## test_Evaluator ####################### add_unittest(test_Evaluator - test_Evaluator.cpp - TestUtil.cpp) + test_Evaluator.cpp) ################ test_LinearChainCRF #################### add_simple_unittest(test_LinearChainCRF) @@ -72,8 +64,7 @@ add_simple_unittest(test_MultinomialSampler) ############## test_PyDataProvider ######################## if(WITH_PYTHON) add_unittest_without_exec(test_PyDataProvider - test_PyDataProvider.cpp - TestUtil.cpp) + test_PyDataProvider.cpp) add_test(NAME test_PyDataProvider COMMAND .set_python_path.sh -d ./gserver/tests:${PROJ_ROOT}/python/ ${CMAKE_CURRENT_BINARY_DIR}/test_PyDataProvider @@ -81,15 +72,12 @@ if(WITH_PYTHON) endif() ############### test_RecurrentLayer ####################### -add_unittest(test_RecurrentLayer - test_RecurrentLayer.cpp - TestUtil.cpp) +add_simple_unittest(test_RecurrentLayer) ############### test_WarpCTCLayer ####################### if(NOT WITH_DOUBLE) add_unittest_without_exec(test_WarpCTCLayer - test_WarpCTCLayer.cpp - TestUtil.cpp) + test_WarpCTCLayer.cpp) add_test(NAME test_WarpCTCLayer COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${PROJ_ROOT}/warp-ctc/build @@ -108,8 +96,7 @@ add_test(NAME test_RecurrentGradientMachine WORKING_DIRECTORY ${PROJ_ROOT}/paddle) add_unittest_without_exec(test_NetworkCompare - test_NetworkCompare.cpp - TestUtil.cpp) + test_NetworkCompare.cpp) if(WITH_GPU) add_test(NAME test_NetworkCompare COMMAND .set_python_path.sh -d ${PROJ_ROOT}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 4e88ac0e81..9f68eb64d0 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/gserver/layers/DataLayer.h" #include "paddle/trainer/Trainer.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace std; // NOLINT namespace paddle { diff --git a/paddle/gserver/tests/TestUtil.cpp b/paddle/gserver/tests/TestUtil.cpp deleted file mode 100644 index c691fe2625..0000000000 --- a/paddle/gserver/tests/TestUtil.cpp +++ /dev/null @@ -1,219 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "TestUtil.h" -#include -#include "paddle/math/SparseMatrix.h" - -DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length"); - -namespace paddle { - -std::string randStr(const int len) { - std::string str = - "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - std::string s = ""; - for (int i = 0; i < len; ++i) s += str[(rand() % 62)]; // NOLINT - return s; -} - -MatrixPtr makeRandomSparseMatrix(size_t height, - size_t width, - bool withValue, - bool useGpu, - bool equalNnzPerSample) { - std::vector ids(height); - std::vector indices(height + 1); - indices[0] = 0; - - std::function randomer = [] { return uniformRandom(10); }; - if (equalNnzPerSample) { - size_t n = 0; - do { - n = uniformRandom(10); - } while (!n); - randomer = [=] { return n; }; - } - for (size_t i = 0; i < height; ++i) { - indices[i + 1] = indices[i] + std::min(randomer(), width); - ids[i] = i; - } - - if (!withValue) { - std::vector data; - data.resize(indices[height] - indices[0]); - for (size_t i = 0; i < data.size(); ++i) { - data[i].col = uniformRandom(width); - } - auto mat = Matrix::createSparseMatrix( - height, width, data.size(), NO_VALUE, SPARSE_CSR, false, useGpu); - if (useGpu) { - std::dynamic_pointer_cast(mat)->copyFrom( - ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT); - } else { - std::dynamic_pointer_cast(mat)->copyFrom( - ids.data(), indices.data(), data.data()); - } - return mat; - } else { - std::vector data; - data.resize(indices[height] - indices[0]); - for (size_t i = 0; i < data.size(); ++i) { - data[i].col = uniformRandom(width); - data[i].value = rand() / static_cast(RAND_MAX); // NOLINT - } - auto mat = Matrix::createSparseMatrix( - height, width, data.size(), FLOAT_VALUE, SPARSE_CSR, false, useGpu); - if (useGpu) { - std::dynamic_pointer_cast(mat)->copyFrom( - ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT); - } else { - std::dynamic_pointer_cast(mat)->copyFrom( - ids.data(), indices.data(), data.data()); - } - return mat; - } -} - -void generateSequenceStartPositions(size_t batchSize, - IVectorPtr& sequenceStartPositions) { - ICpuGpuVectorPtr gpuCpuVec; - generateSequenceStartPositions(batchSize, gpuCpuVec); - sequenceStartPositions = gpuCpuVec->getMutableVector(false); -} - -void generateSequenceStartPositions(size_t batchSize, - ICpuGpuVectorPtr& sequenceStartPositions) { - int numSeqs; - if (FLAGS_fixed_seq_length != 0) { - numSeqs = std::ceil((float)batchSize / (float)FLAGS_fixed_seq_length); - } else { - numSeqs = batchSize / 10 + 1; - } - sequenceStartPositions = - ICpuGpuVector::create(numSeqs + 1, /* useGpu= */ false); - int* buf = sequenceStartPositions->getMutableData(false); - int64_t pos = 0; - int len = FLAGS_fixed_seq_length; - int maxLen = 2 * batchSize / numSeqs; - for (int i = 0; i < numSeqs; ++i) { - if (FLAGS_fixed_seq_length == 0) { - len = uniformRandom( - std::min(maxLen, batchSize - pos - numSeqs + i)) + - 1; - } - buf[i] = pos; - pos += len; - VLOG(1) << " len=" << len; - } - buf[numSeqs] = batchSize; -} - -void generateSubSequenceStartPositions( - const ICpuGpuVectorPtr& sequenceStartPositions, - ICpuGpuVectorPtr& subSequenceStartPositions) { - int numSeqs = sequenceStartPositions->getSize() - 1; - const int* buf = sequenceStartPositions->getData(false); - int numOnes = 0; - for (int i = 0; i < numSeqs; ++i) { - if (buf[i + 1] - buf[i] == 1) { - ++numOnes; - } - } - // each seq has two sub-seq except length 1 - int numSubSeqs = numSeqs * 2 - numOnes; - subSequenceStartPositions = - ICpuGpuVector::create(numSubSeqs + 1, /* useGpu= */ false); - int* subBuf = subSequenceStartPositions->getMutableData(false); - int j = 0; - for (int i = 0; i < numSeqs; ++i) { - if (buf[i + 1] - buf[i] == 1) { - subBuf[j++] = buf[i]; - } else { - int len = uniformRandom(buf[i + 1] - buf[i] - 1) + 1; - subBuf[j++] = buf[i]; - subBuf[j++] = buf[i] + len; - } - } - subBuf[j] = buf[numSeqs]; -} - -void generateMDimSequenceData(const IVectorPtr& sequenceStartPositions, - IVectorPtr& cpuSequenceDims) { - /* generate sequences with 2 dims */ - int numSeqs = sequenceStartPositions->getSize() - 1; - int numDims = 2; - - cpuSequenceDims = IVector::create(numSeqs * numDims, /* useGpu= */ false); - int* bufStarts = sequenceStartPositions->getData(); - int* bufDims = cpuSequenceDims->getData(); - - for (int i = 0; i < numSeqs; i++) { - int len = bufStarts[i + 1] - bufStarts[i]; - /* get width and height randomly */ - std::vector dimVec; - for (int j = 0; j < len; j++) { - if (len % (j + 1) == 0) { - dimVec.push_back(1); - } - } - int idx = rand() % dimVec.size(); // NOLINT use rand_r - bufDims[i * numDims] = dimVec[idx]; - bufDims[i * numDims + 1] = len / dimVec[idx]; - } -} - -void generateMDimSequenceData(const ICpuGpuVectorPtr& sequenceStartPositions, - IVectorPtr& cpuSequenceDims) { - /* generate sequences with 2 dims */ - int numSeqs = sequenceStartPositions->getSize() - 1; - int numDims = 2; - - cpuSequenceDims = IVector::create(numSeqs * numDims, /* useGpu= */ false); - const int* bufStarts = sequenceStartPositions->getData(false); - int* bufDims = cpuSequenceDims->getData(); - - for (int i = 0; i < numSeqs; i++) { - int len = bufStarts[i + 1] - bufStarts[i]; - /* get width and height randomly */ - std::vector dimVec; - for (int j = 0; j < len; j++) { - if (len % (j + 1) == 0) { - dimVec.push_back(1); - } - } - int idx = rand() % dimVec.size(); // NOLINT use rand_r - bufDims[i * numDims] = dimVec[idx]; - bufDims[i * numDims + 1] = len / dimVec[idx]; - } -} - -void checkMatrixEqual(const MatrixPtr& a, const MatrixPtr& b) { - EXPECT_EQ(a->getWidth(), b->getWidth()); - EXPECT_EQ(a->getHeight(), b->getHeight()); - EXPECT_EQ(a->isTransposed(), b->isTransposed()); - for (size_t r = 0; r < a->getHeight(); ++r) { - for (size_t c = 0; c < a->getWidth(); ++c) { - EXPECT_FLOAT_EQ(a->getElement(r, c), b->getElement(r, c)); - } - } -} - -void checkVectorEqual(const IVectorPtr& a, const IVectorPtr& b) { - EXPECT_EQ(a->getSize(), b->getSize()); - for (size_t r = 0; r < a->getSize(); ++r) { - EXPECT_FLOAT_EQ(a->get(r), b->get(r)); - } -} -} // namespace paddle diff --git a/paddle/gserver/tests/TestUtil.h b/paddle/gserver/tests/TestUtil.h deleted file mode 100644 index ec86469aeb..0000000000 --- a/paddle/gserver/tests/TestUtil.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include "paddle/math/Matrix.h" - -namespace paddle { - -std::string randStr(const int len); - -inline int uniformRandom(int n) { return n == 0 ? 0 : rand() % n; } - -inline bool approximatelyEqual(float a, float b, float epsilon) { - return fabs(a - b) <= ((fabs(a) < fabs(b) ? fabs(b) : fabs(a)) * epsilon); -} - -MatrixPtr makeRandomSparseMatrix(size_t height, - size_t width, - bool withValue, - bool useGpu, - bool equalNnzPerSample = false); - -/** - * @brief generate sequenceStartPositions for INPUT_SEQUENCE_DATA, - * INPUT_HASSUB_SEQUENCE_DATA and INPUT_SEQUENCE_LABEL - * - * @param batchSize batchSize - * sequenceStartPositions[out] generation output - */ -void generateSequenceStartPositions(size_t batchSize, - IVectorPtr& sequenceStartPositions); - -void generateSequenceStartPositions(size_t batchSize, - ICpuGpuVectorPtr& sequenceStartPositions); - -/** - * @brief generate subSequenceStartPositions for INPUT_HASSUB_SEQUENCE_DATA - * according to sequenceStartPositions - * - * @param sequenceStartPositions[in] input - * subSequenceStartPositions[out] generation output - */ -void generateSubSequenceStartPositions(const IVectorPtr& sequenceStartPositions, - IVectorPtr& subSequenceStartPositions); - -void generateSubSequenceStartPositions( - const ICpuGpuVectorPtr& sequenceStartPositions, - ICpuGpuVectorPtr& subSequenceStartPositions); - -/** - * @brief generate cpuSequenceDims for INPUT_SEQUENCE_MDIM_DATA according to - * sequenceStartPositions - * - * @param sequenceStartPositions[in] input - * cpuSequenceDims[out] generation output - */ -void generateMDimSequenceData(const IVectorPtr& sequenceStartPositions, - IVectorPtr& cpuSequenceDims); -void generateMDimSequenceData(const ICpuGpuVectorPtr& sequenceStartPositions, - IVectorPtr& cpuSequenceDims); - -void checkMatrixEqual(const MatrixPtr& a, const MatrixPtr& b); - -void checkVectorEqual(const IVectorPtr& a, const IVectorPtr& b); -} // namespace paddle diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index 7d7e68da5c..b201ba8a5a 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 7f5fcb670b..822db5a3c4 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -22,7 +22,7 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index dd3378304b..40bb1e2d73 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index ad99b50245..207fc0566f 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index e07066dad8..8165eb8269 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" #include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 2cc25f6b21..66a70ecd41 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -21,7 +21,7 @@ limitations under the License. */ #include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index 0d26105955..4db30f37a5 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" #include "paddle/trainer/Trainer.h" #include "paddle/utils/Stat.h" diff --git a/paddle/gserver/tests/test_PriorBox.cpp b/paddle/gserver/tests/test_PriorBox.cpp index a6d6a24269..ae0e3bc3d2 100644 --- a/paddle/gserver/tests/test_PriorBox.cpp +++ b/paddle/gserver/tests/test_PriorBox.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "LayerGradUtil.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ProtoDataProvider.cpp b/paddle/gserver/tests/test_ProtoDataProvider.cpp index 8fc0aaab69..e11bf402c2 100644 --- a/paddle/gserver/tests/test_ProtoDataProvider.cpp +++ b/paddle/gserver/tests/test_ProtoDataProvider.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/gserver/dataproviders/ProtoDataProvider.h" #include "paddle/utils/Util.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_PyDataProvider.cpp b/paddle/gserver/tests/test_PyDataProvider.cpp index 0f264ecf91..db883543c3 100644 --- a/paddle/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/gserver/tests/test_PyDataProvider.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/gserver/dataproviders/PyDataProvider.h" #include "paddle/utils/Util.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace std; // NOLINT using namespace paddle; // NOLINT diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/gserver/tests/test_RecurrentLayer.cpp index f91c788863..16ab0e6aec 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/gserver/tests/test_RecurrentLayer.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/Layer.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index dab6366588..23ae95852e 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/WarpCTCLayer.h" -#include "TestUtil.h" +#include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index a3ea078509..06fc10bae7 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -7,8 +7,7 @@ add_simple_unittest(test_SparseMatrix) # TODO(yuyang18): Refactor TestUtil.cpp. Remove this cross module reference. add_unittest(test_matrixCompare - test_matrixCompare.cpp - ../../gserver/tests/TestUtil.cpp) + test_matrixCompare.cpp) add_simple_unittest(test_sparseMatrixCompare) add_simple_unittest(test_perturbation) diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index d490078d90..e6b5dba446 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -15,9 +15,9 @@ limitations under the License. */ #ifndef PADDLE_ONLY_CPU #include -#include "paddle/gserver/tests/TestUtil.h" #include "paddle/math/Matrix.h" #include "paddle/math/SparseMatrix.h" +#include "paddle/testing/TestUtil.h" #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 98d63438a5..3a780d26c0 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -18,9 +18,9 @@ limitations under the License. */ #include #include "TensorCheck.h" -#include "paddle/gserver/tests/TestUtil.h" #include "paddle/math/Matrix.h" #include "paddle/math/SparseMatrix.h" +#include "paddle/testing/TestUtil.h" #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index 584498c860..c47add04b0 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -3,4 +3,6 @@ if(WITH_TESTING) add_library(paddle_test_main STATIC TestMain.cpp) add_dependencies(paddle_test_main gen_proto_cpp) + add_library(paddle_test_util STATIC TestUtil.cpp) + add_dependencies(paddle_test_util gen_proto_cpp) endif() From 17953b3a4d8a8f15402968fc56f77e9ad2f71e4d Mon Sep 17 00:00:00 2001 From: xutianbing Date: Tue, 3 Jan 2017 18:12:32 -0800 Subject: [PATCH 18/51] add TestUtil.h and TestUtil.cpp, moving from gserver/tests/ to testing/ --- paddle/testing/TestUtil.cpp | 219 ++++++++++++++++++++++++++++++++++++ paddle/testing/TestUtil.h | 78 +++++++++++++ 2 files changed, 297 insertions(+) create mode 100644 paddle/testing/TestUtil.cpp create mode 100644 paddle/testing/TestUtil.h diff --git a/paddle/testing/TestUtil.cpp b/paddle/testing/TestUtil.cpp new file mode 100644 index 0000000000..c691fe2625 --- /dev/null +++ b/paddle/testing/TestUtil.cpp @@ -0,0 +1,219 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "TestUtil.h" +#include +#include "paddle/math/SparseMatrix.h" + +DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length"); + +namespace paddle { + +std::string randStr(const int len) { + std::string str = + "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + std::string s = ""; + for (int i = 0; i < len; ++i) s += str[(rand() % 62)]; // NOLINT + return s; +} + +MatrixPtr makeRandomSparseMatrix(size_t height, + size_t width, + bool withValue, + bool useGpu, + bool equalNnzPerSample) { + std::vector ids(height); + std::vector indices(height + 1); + indices[0] = 0; + + std::function randomer = [] { return uniformRandom(10); }; + if (equalNnzPerSample) { + size_t n = 0; + do { + n = uniformRandom(10); + } while (!n); + randomer = [=] { return n; }; + } + for (size_t i = 0; i < height; ++i) { + indices[i + 1] = indices[i] + std::min(randomer(), width); + ids[i] = i; + } + + if (!withValue) { + std::vector data; + data.resize(indices[height] - indices[0]); + for (size_t i = 0; i < data.size(); ++i) { + data[i].col = uniformRandom(width); + } + auto mat = Matrix::createSparseMatrix( + height, width, data.size(), NO_VALUE, SPARSE_CSR, false, useGpu); + if (useGpu) { + std::dynamic_pointer_cast(mat)->copyFrom( + ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT); + } else { + std::dynamic_pointer_cast(mat)->copyFrom( + ids.data(), indices.data(), data.data()); + } + return mat; + } else { + std::vector data; + data.resize(indices[height] - indices[0]); + for (size_t i = 0; i < data.size(); ++i) { + data[i].col = uniformRandom(width); + data[i].value = rand() / static_cast(RAND_MAX); // NOLINT + } + auto mat = Matrix::createSparseMatrix( + height, width, data.size(), FLOAT_VALUE, SPARSE_CSR, false, useGpu); + if (useGpu) { + std::dynamic_pointer_cast(mat)->copyFrom( + ids.data(), indices.data(), data.data(), HPPL_STREAM_DEFAULT); + } else { + std::dynamic_pointer_cast(mat)->copyFrom( + ids.data(), indices.data(), data.data()); + } + return mat; + } +} + +void generateSequenceStartPositions(size_t batchSize, + IVectorPtr& sequenceStartPositions) { + ICpuGpuVectorPtr gpuCpuVec; + generateSequenceStartPositions(batchSize, gpuCpuVec); + sequenceStartPositions = gpuCpuVec->getMutableVector(false); +} + +void generateSequenceStartPositions(size_t batchSize, + ICpuGpuVectorPtr& sequenceStartPositions) { + int numSeqs; + if (FLAGS_fixed_seq_length != 0) { + numSeqs = std::ceil((float)batchSize / (float)FLAGS_fixed_seq_length); + } else { + numSeqs = batchSize / 10 + 1; + } + sequenceStartPositions = + ICpuGpuVector::create(numSeqs + 1, /* useGpu= */ false); + int* buf = sequenceStartPositions->getMutableData(false); + int64_t pos = 0; + int len = FLAGS_fixed_seq_length; + int maxLen = 2 * batchSize / numSeqs; + for (int i = 0; i < numSeqs; ++i) { + if (FLAGS_fixed_seq_length == 0) { + len = uniformRandom( + std::min(maxLen, batchSize - pos - numSeqs + i)) + + 1; + } + buf[i] = pos; + pos += len; + VLOG(1) << " len=" << len; + } + buf[numSeqs] = batchSize; +} + +void generateSubSequenceStartPositions( + const ICpuGpuVectorPtr& sequenceStartPositions, + ICpuGpuVectorPtr& subSequenceStartPositions) { + int numSeqs = sequenceStartPositions->getSize() - 1; + const int* buf = sequenceStartPositions->getData(false); + int numOnes = 0; + for (int i = 0; i < numSeqs; ++i) { + if (buf[i + 1] - buf[i] == 1) { + ++numOnes; + } + } + // each seq has two sub-seq except length 1 + int numSubSeqs = numSeqs * 2 - numOnes; + subSequenceStartPositions = + ICpuGpuVector::create(numSubSeqs + 1, /* useGpu= */ false); + int* subBuf = subSequenceStartPositions->getMutableData(false); + int j = 0; + for (int i = 0; i < numSeqs; ++i) { + if (buf[i + 1] - buf[i] == 1) { + subBuf[j++] = buf[i]; + } else { + int len = uniformRandom(buf[i + 1] - buf[i] - 1) + 1; + subBuf[j++] = buf[i]; + subBuf[j++] = buf[i] + len; + } + } + subBuf[j] = buf[numSeqs]; +} + +void generateMDimSequenceData(const IVectorPtr& sequenceStartPositions, + IVectorPtr& cpuSequenceDims) { + /* generate sequences with 2 dims */ + int numSeqs = sequenceStartPositions->getSize() - 1; + int numDims = 2; + + cpuSequenceDims = IVector::create(numSeqs * numDims, /* useGpu= */ false); + int* bufStarts = sequenceStartPositions->getData(); + int* bufDims = cpuSequenceDims->getData(); + + for (int i = 0; i < numSeqs; i++) { + int len = bufStarts[i + 1] - bufStarts[i]; + /* get width and height randomly */ + std::vector dimVec; + for (int j = 0; j < len; j++) { + if (len % (j + 1) == 0) { + dimVec.push_back(1); + } + } + int idx = rand() % dimVec.size(); // NOLINT use rand_r + bufDims[i * numDims] = dimVec[idx]; + bufDims[i * numDims + 1] = len / dimVec[idx]; + } +} + +void generateMDimSequenceData(const ICpuGpuVectorPtr& sequenceStartPositions, + IVectorPtr& cpuSequenceDims) { + /* generate sequences with 2 dims */ + int numSeqs = sequenceStartPositions->getSize() - 1; + int numDims = 2; + + cpuSequenceDims = IVector::create(numSeqs * numDims, /* useGpu= */ false); + const int* bufStarts = sequenceStartPositions->getData(false); + int* bufDims = cpuSequenceDims->getData(); + + for (int i = 0; i < numSeqs; i++) { + int len = bufStarts[i + 1] - bufStarts[i]; + /* get width and height randomly */ + std::vector dimVec; + for (int j = 0; j < len; j++) { + if (len % (j + 1) == 0) { + dimVec.push_back(1); + } + } + int idx = rand() % dimVec.size(); // NOLINT use rand_r + bufDims[i * numDims] = dimVec[idx]; + bufDims[i * numDims + 1] = len / dimVec[idx]; + } +} + +void checkMatrixEqual(const MatrixPtr& a, const MatrixPtr& b) { + EXPECT_EQ(a->getWidth(), b->getWidth()); + EXPECT_EQ(a->getHeight(), b->getHeight()); + EXPECT_EQ(a->isTransposed(), b->isTransposed()); + for (size_t r = 0; r < a->getHeight(); ++r) { + for (size_t c = 0; c < a->getWidth(); ++c) { + EXPECT_FLOAT_EQ(a->getElement(r, c), b->getElement(r, c)); + } + } +} + +void checkVectorEqual(const IVectorPtr& a, const IVectorPtr& b) { + EXPECT_EQ(a->getSize(), b->getSize()); + for (size_t r = 0; r < a->getSize(); ++r) { + EXPECT_FLOAT_EQ(a->get(r), b->get(r)); + } +} +} // namespace paddle diff --git a/paddle/testing/TestUtil.h b/paddle/testing/TestUtil.h new file mode 100644 index 0000000000..ec86469aeb --- /dev/null +++ b/paddle/testing/TestUtil.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/math/Matrix.h" + +namespace paddle { + +std::string randStr(const int len); + +inline int uniformRandom(int n) { return n == 0 ? 0 : rand() % n; } + +inline bool approximatelyEqual(float a, float b, float epsilon) { + return fabs(a - b) <= ((fabs(a) < fabs(b) ? fabs(b) : fabs(a)) * epsilon); +} + +MatrixPtr makeRandomSparseMatrix(size_t height, + size_t width, + bool withValue, + bool useGpu, + bool equalNnzPerSample = false); + +/** + * @brief generate sequenceStartPositions for INPUT_SEQUENCE_DATA, + * INPUT_HASSUB_SEQUENCE_DATA and INPUT_SEQUENCE_LABEL + * + * @param batchSize batchSize + * sequenceStartPositions[out] generation output + */ +void generateSequenceStartPositions(size_t batchSize, + IVectorPtr& sequenceStartPositions); + +void generateSequenceStartPositions(size_t batchSize, + ICpuGpuVectorPtr& sequenceStartPositions); + +/** + * @brief generate subSequenceStartPositions for INPUT_HASSUB_SEQUENCE_DATA + * according to sequenceStartPositions + * + * @param sequenceStartPositions[in] input + * subSequenceStartPositions[out] generation output + */ +void generateSubSequenceStartPositions(const IVectorPtr& sequenceStartPositions, + IVectorPtr& subSequenceStartPositions); + +void generateSubSequenceStartPositions( + const ICpuGpuVectorPtr& sequenceStartPositions, + ICpuGpuVectorPtr& subSequenceStartPositions); + +/** + * @brief generate cpuSequenceDims for INPUT_SEQUENCE_MDIM_DATA according to + * sequenceStartPositions + * + * @param sequenceStartPositions[in] input + * cpuSequenceDims[out] generation output + */ +void generateMDimSequenceData(const IVectorPtr& sequenceStartPositions, + IVectorPtr& cpuSequenceDims); +void generateMDimSequenceData(const ICpuGpuVectorPtr& sequenceStartPositions, + IVectorPtr& cpuSequenceDims); + +void checkMatrixEqual(const MatrixPtr& a, const MatrixPtr& b); + +void checkVectorEqual(const IVectorPtr& a, const IVectorPtr& b); +} // namespace paddle From a080aa7a1c3936fd3e6ad27091873947c178c0dc Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Jan 2017 10:27:34 +0800 Subject: [PATCH 19/51] Change run_all => entrypoint. --- paddle/scripts/docker/Dockerfile | 4 ++-- paddle/scripts/docker/Dockerfile.gpu | 4 ++-- paddle/scripts/docker/{run_all => entrypoint} | 0 3 files changed, 4 insertions(+), 4 deletions(-) rename paddle/scripts/docker/{run_all => entrypoint} (100%) diff --git a/paddle/scripts/docker/Dockerfile b/paddle/scripts/docker/Dockerfile index 46363b05b7..1522be023f 100644 --- a/paddle/scripts/docker/Dockerfile +++ b/paddle/scripts/docker/Dockerfile @@ -50,6 +50,6 @@ WORKDIR "/notes" EXPOSE 8888 RUN mkdir -p /opt/bin -COPY ./paddle/scripts/docker/run_all /opt/bin/ +COPY ./paddle/scripts/docker/entrypoint /opt/bin/ -CMD ["/opt/bin/run_all"] +CMD ["/opt/bin/entrypoint"] diff --git a/paddle/scripts/docker/Dockerfile.gpu b/paddle/scripts/docker/Dockerfile.gpu index 072c144818..09f07043e2 100644 --- a/paddle/scripts/docker/Dockerfile.gpu +++ b/paddle/scripts/docker/Dockerfile.gpu @@ -50,6 +50,6 @@ WORKDIR "/notes" EXPOSE 8888 RUN mkdir -p /opt/bin -COPY ./paddle/scripts/docker/run_all /opt/bin/ +COPY ./paddle/scripts/docker/entrypoint /opt/bin/ -CMD ["/opt/bin/run_all"] +CMD ["/opt/bin/entrypoint"] diff --git a/paddle/scripts/docker/run_all b/paddle/scripts/docker/entrypoint similarity index 100% rename from paddle/scripts/docker/run_all rename to paddle/scripts/docker/entrypoint From e0a85db7f5609da5b881707505144de99613e49c Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 4 Jan 2017 17:16:28 +0800 Subject: [PATCH 20/51] fix dead links, reduce image size --- doc/howto/deep_model/rnn/rnn_config_cn.rst | 12 +++--------- doc/howto/usage/k8s/k8s_aws_en.md | 6 +++--- doc/tutorials/gan/gan.png | Bin 33275 -> 17810 bytes doc/tutorials/gan/index_en.md | 12 +++--------- doc/tutorials/gan/uniform_sample.png | Bin 20609 -> 24880 bytes 5 files changed, 9 insertions(+), 21 deletions(-) diff --git a/doc/howto/deep_model/rnn/rnn_config_cn.rst b/doc/howto/deep_model/rnn/rnn_config_cn.rst index 8d65b3512d..ac2bd0775f 100644 --- a/doc/howto/deep_model/rnn/rnn_config_cn.rst +++ b/doc/howto/deep_model/rnn/rnn_config_cn.rst @@ -33,8 +33,7 @@ PaddlePaddle yield src_ids, trg_ids, trg_ids_next -有关如何编写数据提供程序的更多细节描述,请参考 -`PyDataProvider2 <../../ui/data_provider/index.html>`__\ 。完整的数据提供文件在 +有关如何编写数据提供程序的更多细节描述,请参考 :ref:`api_pydataprovider2` 。完整的数据提供文件在 ``demo/seqToseq/dataprovider.py``\ 。 配置循环神经网络架构 @@ -132,9 +131,7 @@ Sequence to Sequence Model with Attention 模型的编码器部分如下所示。它叫做\ ``grumemory``\ 来表示门控循环神经网络。如果网络架构简单,那么推荐使用循环神经网络的方法,因为它比 ``recurrent_group`` -更快。我们已经实现了大多数常用的循环神经网络架构,可以参考 -`Layers <../../ui/api/trainer_config_helpers/layers_index.html>`__ -了解更多细节。 +更快。我们已经实现了大多数常用的循环神经网络架构,可以参考 :ref:`api_trainer_config_helpers_layers` 了解更多细节。 我们还将编码向量投射到 ``decoder_size`` 维空间。这通过获得反向循环网络的第一个实例,并将其投射到 @@ -276,9 +273,6 @@ attention,门控循环单元单步函数和输出函数: result_file=gen_trans_file) outputs(beam_gen) -注意,这种生成技术只用于类似解码器的生成过程。如果你正在处理序列标记任务,请参阅 -`Semantic Role Labeling -Demo <../../demo/semantic_role_labeling/index.html>`__ -了解更多详细信息。 +注意,这种生成技术只用于类似解码器的生成过程。如果你正在处理序列标记任务,请参阅 :ref:`semantic_role_labeling` 了解更多详细信息。 完整的配置文件在\ ``demo/seqToseq/seqToseq_net.py``\ 。 diff --git a/doc/howto/usage/k8s/k8s_aws_en.md b/doc/howto/usage/k8s/k8s_aws_en.md index 201bcae48d..422dc3bd81 100644 --- a/doc/howto/usage/k8s/k8s_aws_en.md +++ b/doc/howto/usage/k8s/k8s_aws_en.md @@ -331,15 +331,15 @@ For sharing the training data across all the Kubernetes nodes, we use EFS (Elast 1. Make sure you added AmazonElasticFileSystemFullAccess policy in your group. 1. Create the Elastic File System in AWS console, and attach the new VPC with it. - +
![](src/create_efs.png)
1. Modify the Kubernetes security group under ec2/Security Groups, add additional inbound policy "All TCP TCP 0 - 65535 0.0.0.0/0" for Kubernetes default VPC security group. - +
![](src/add_security_group.png)
1. Follow the EC2 mount instruction to mount the disk onto all the Kubernetes nodes, we recommend to mount EFS disk onto ~/efs. - +
![](src/efs_mount.png)
Before starting the training, you should place your user config and divided training data onto EFS. When the training start, each task will copy related files from EFS into container, and it will also write the training results back onto EFS, we will show you how to place the data later in this article. diff --git a/doc/tutorials/gan/gan.png b/doc/tutorials/gan/gan.png index 001ed6cc19e8911f9b10f63211c9658160b3a06e..0eafd7cb49b545f412f8e775804bcd0b22c42454 100644 GIT binary patch literal 17810 zcmeIaWl&sC^o9w9B!eWl1$TD~?!hHE!9BQJ2qAcIcPB`Ypuyb<8rk4`2Ae#=Rc{JH_ zi=jt*e9RIF88nfqGMRb0cx>3KzvCa~84lm!)hp3#5L$iPd2|4(0< zP;&J1&sCL!9$nqHEr|Hb-vnm;L(7RD73h+CQT-IhcSo&_RPrfx7t{JeC#Ja^7~hpe zprS)%D;b}5X|Hbg%I0N< z-&mErKdjEltEqL;@+;(>)~YRU8H_t;=EUrxxkuMkXrp8zDBhKnifvXyzQZ?6_Ueh**3 z(f>@Er}d3n!v-A|txp~rjD>COLr(Q`^gF~SD zq*0XLY^62^V^BAQGPk^?-qVL@Ad8ekjiE%Lq|U5^OEQz!l?Ohn#Huo z*%0^8hvtnKSppUQQ~6wuvxA7j7U&IIjt=kZ%e$TClvaDbk6e)E81mqKwdi)xTKj83 z|M3*ZZQiCf+0kT}OeT`QVOrNMo!sGB>EyK~BSr4hiXY0tq=x>%m&Y4(r^Xtq>EX#D zwE;%0%C4bwzG;`;iQm9PhCzfnU@+uJV+**K#bwx+50iH3p?4jyX=p`oQ8pE%^Q9_u`df^QQ|oSxnP~MmM`EIxF-pRPTghT@*aVNoTAMnr1Fzs21|9aRd? zz38l>O*HUwCwDH;$m?b^afprLO%21IeWI>w#f4a=*QJ)zI2-ovvxt3um%S+$LOxe# zAW^LkE=Nm?E{6*|>vQq-5KS3&oTHFk3h1?KQK9NooQ%Inv{CX8Gd;x{h^mm<$O&a~ z#GsP1-%fYSsMqH~v7NIY#M00)R5yN$vz{Pzwl$=pzSS2^uKV=(zzFlr*sl3Po!g`j z^=hxI)vP`~-kX{2yq?95&SkM&<)K(Z~^G~ichLgxv%!=c~PC=&5-Q|JGq^L#9Ne_O_M=lvQ2m&gu zo4Ci?yG8f)a9M&*OiUr>@yDn_m4dlA42ItVmouIDS-D8ly+fhZ4PXlRgitsVk2Q7O zMP{t4h8Do$&@fxf<#0NzDHH|6b|Xik5pO2Q{9~Y>?1PbSK;)T8t zhvib0se-X9N&%y+7rpogo(ZH9fDPz;@x@Dd1c`tA!{ua{n8#} zdC8P3yx~w$5jFrqQ)BuI0*To`fo&BHZQLy?W>eQ3t-?bq1e)^|qMFtQRrrRzQY#fM zw(wZB`~68LcvywO5ehWV79<-(Tg2oLsof~)Tn=`$u`kf^)s%)??~Qb0ZaVgb zMl(vf5jRp+*ia2(&;5#oG!p&q{$gz?XEM@t_I;#56dBxfdC5o3nin`wIiaKY^HZC9 z8z-Z*gMMo&k0U=>`%0|(pVFjJMMd5iRUxR#u%~jkF#V_hzEmW6T7POa!ZH3$Z66gZ z?U41@L_Jjhh2arXzQ&n}EbDP9{0ki{cr&&jv{eU>$P z)Yu@Ee7fwvH)mx5hk$*4ptkJoVd~AvKC9z6mGji*ICm<%rZs6o35cWp7Dd)$@T^Hs*)==c9Z*Zypk zZYVaRPS9(@1-sUVEh@!~eZjkZ_9`kKSD#zCt4enZR zw(z0;s&U!-X>szW_VfXeC(7OPzuzMp)PH`MT^?5yve`bGD2#==fI_bvr|n_e`|A^l zd3_j)e+xT@qTG76vXA~9a?lT8B`$^Tbsf5~dQ7HDwBGMc{gmBr+{sT>ZnABCaGiGp zw@e((c&TnWRB7A=6dqiyCr<9XB^ zT3P1*4(Q6ofyTN-Rvkjt%#^O%VxP9hXr$01u1E0zrXx~7rEOPjW1i*^7_U`fB(cHxe!j;A8aEqXEFU!<3DP?~ zTujfq*`>cDmy8G#yg%-QNQ5pp4lue7*E?;$TY?M!bh&R~`o2F7kKeuAEAy>`(Z4-)Vu~6y#&BpJ z_<fG%w7OQUo!#V7(_%8g7scC_Hs`}c@b0qBCh=;r7CtFh`Ux%IP zvP>KA^>A)$_Dt?2cVkZnDY9-`3p&&v?gD3)^$<(}pM3e$2B=r|y4@+@(%ZobX4L|r zalf_paKF`UvSEm8e$Zoc?Q+hF8UM^=AKkD7^#rjFegv48#mN$_@fru)4dCC(ova&n z@?Bdpzuf-r5ku=yj(+y~?TX@6z{wz~Sm4ZC%JpQBA22u|8)tm0MOEn?2Ch@kuH|Nn za(KExk@)Tr+&1|&?1A;asZ#HE8s?78fhz7|fxA^gwyjY;-7S&#D8G6o5lsAK+>}K@ zx|y7BC3)ZWjB%ukB%w>(8FoJ3QPY`DnO<|rAaT#!)8oq|L%wA{(Q{ihH!yg!9j8IV ztn0EPl3@8#HmGtaeTDzy7KX-VcD-nf;caD#SY_DGf0Z@rlY4hVnSfB{@fV2I>TYCvkS`Qg>{g6q5u;d+%||MZsYC;IWpVvP|G+1ci&$Gd!I zJXurxUZ>%@7|CRr7&MRpo204AfYEA$pz5q|%g^CbwhM&qWeYz{pd&TZ@0B*DWj22N z3C(dB>O~-RA_zpze?-i9KH(HnK5t7AcX-|&@&GKlH8LAynj*Rbq^yQAp=X9SKkK(r z^7*Odk_^_<@oj#+N0_swmI`)Ig!94mezaKM~=ra_dco>z#qA~IDSea`=UGzGOih>VTq86G|9j{UT+ueOd_pEB-O21URU*EuX)4~*%c4Z!CuEMbRSC(xQj5( zg#YbM4~o$JA77K#g4Zh>FAGhoG3wSr=og|$#n`6AWZ22cuz<-%_1S=L0r?tI!m477 z5d72!E5=&jIKr?LIP!m|rR|It;CNAe3*(?Kgc)_c*qe5`X++@4x$&^UDtV~? z2M2TGK3i|rC9oaoG?hQ1V76qmuVF!2R(rUasP78)MQY~_{9ZZcS@OJ(EG}*6_X0va zB?;VC>U8!o4=l$(eAd&Ki%AXJ;WVB@Xo9P4T_KTB7NuR#xE{f`&)opTZ(F_FF_#PM zk2uto1NXE~P&oQjkC@a*CTKVXr?!2$=%#;F+xjpBqPvP0N6?{s6W#eTG{|&9@|64ZVnLi8=d>8h z6?J-YJg-rcsfI*#+zD3(>BQpO*0tNz%$h}P%Os1wX&!^dr0C2j?I~NEvUhMT;}T1i zMLlmedh$FkcCiG2%c*~lH%Z3o!&tk(b}GZ+aPyh|uRbD~oxExD+}ZX>BuJ!7{xW;t z$o>KuvE-LW^9|j$Xm7M&J?{sX#w9R(I5+j#qk;IgxHsa6(V3o_IYj)^53+6>x_xL@ z?dI3P_x#;|{ZIsH-rFuUd5qR?XW(<%E%Bdb`1}RoJL1d3g$Dh4?*p085yvOse!hcG z=GOP)`SRa~xR?3g0C#AEbPBZyf^~GK!B}B_-qKtzX5-Mv-)T1X$0|Sb>0{O` z(-Ww&q^Ot7G9SsX-Jhwb*QlT?MT_kl|{_XxWW-S@mNa^{@IrhoYQ?6v*0p65?W znYKpX)H851R9373uh^R}2aNRV!-`&_s!DB}>Ua;tr<)P)e9I@O@rT9~LXhs-7NomY zTCi&NP#7E|PWPkT?8T?A3xd6tp+EtUn`|ra9dOJma*@Q;puFMetF~KS^4PReuhxj+ z;#k9~df2Y5U;4CHuc9b0`HEO@p=LE-(sHuMRE(Aaqs@=UX-jS!EARUkO^=BKXnebb zCw;!!`t)Fj?{pqAL+HNZ=>;-w5y*0l2DoE38u#HeNWv3!MFK5kdVk3=%?@#qd>C2j8==>(7@k$mYLa`D(R6FZq@CI~CpzCJas5F%o8P_Gd-U8|b8!M7t659OV~v(C;eGk_q?38y&S`neJf4 z6QV~7zfQJ-Oo->pC9i#`vEGq-JwowdF`i$a?zS{*MB3C*!KT;#A!-b$Ze|gjIwg9k z0}v3+a_sd#;2aauso{y&7FQeoLXH|-aK{wTL1AE-(|YEQ z2NG)l{X=~OnM8+cPTB)*LB0Fcj4=w|2*`LX>HX|&&YvO1AM7A0V_*$@$?roe!HskB zK+;(M^tfL&Qo^{?Y7ysVzw@G5+}X-SW-1mR<`E>oHK{Z=#IQ?`N!VydO>0%B@amK? zskJ&pOiiz3#Yu=ES-71YBVp`pSWy*1`8lM9MOf>Ra(_^~2f;4u0JF_!mRR`icl=1} zftv9&9K+r2)YGCK#-?O6J9TjHqANp?Mhi5X3GjQQ$FW@QuR=ATS)SWK0^?omSnlhv zcruaf7nGTW0`b*Cdomb#Z6p`ZQ23R!T!shEPA}hm=>%#{&%CEE6@O{WbeV8JCd;f< zRmTl4_ZdXp_$fJGu6x`Gy39=O2MmR`?KiCHYJ!OhQ9|(Uqm2)j^NcJn1I|I+fk$X7 z@r@`_$o9k4a_zg-8f0(M87dVkKjqW+KW#hw5vF?Wy93X%2~&jWW04nytQqmm!`CP4gvJWuXlB|; zLcgzLD6Sv|(rXF1#H=mfE+o3FK~Aj_Z)!oG`~Leo?DRo?DF(Tc)^Y=me(ptOq~!{N zXe8#QKaa-MF`@A-Jt2%ViWz%hl4tzu%uJ+X@jtYQRZV&WD*jw^f^Bf_D`2MQcoPPB zBoQiqaQgZdM6&CnPqVD~W$kfo_Y6sLBBbP|_32Is zDWzRTTq)~Igb9MqJrQmhvT6A~P`OBS+_IenrvMnYGfw~Y%2oE8_quqkZex5MdVg@&-@BHwOK(@|8q$qF^*KAP{?i2`NDh<;1zm49Fa2>0t zzcD09+dIASMPe!!xgZhuMKkQdCrM9s2$c(o|FlvAu`{2X^(oq$`vO-PUjIju9YM(C z5UKsex#CjPj^sJtsoz>)4l$egkRoF3U$HMU=R$E16r@{|L?&Hj6zrwzoAf`4f3G%* z7QjZ${z0-}kXqY4_bt*@>irM_hR(0Lz!To%D1%|fOl?pOy1S9DXdyO!Z!`DHp(!~C zSt$%{b=Pz-*lMLY%kKTlUBA=sM+YG9C*Sp)1=;NMyl#E<<=HuNMzTl}hRiaZ ztk7}Ob}eQP`HmCKRvjvbcpK_^vO_zgY&wr1(?slEekOIQ*T+0|xSJq}jb#Qh{dUKK zwkXbyWvNtIr-}EHT7d{fG6$)KWxhYSDPF3;j>gUWe;aJo%O0cZY?6?b^VztP=wR+TK zhGFUzN`l-z)N7mFDYa>=3K`S-JE0+wRnm5s>(M~19M0#+f0;iR`L z9*;B#8qJjRc9u;JUScV~bqHaRp1hk-F*h;#S&OZ*+)1JHv}HH{*CZ`y!ai3OG1i4z zAq_V>$IK33+R<>?w|SR*C1oY{0MfL1}FXuA1x%=~whtkvS|)_W@29BC1-?&@R2# z&oG2)ha)bg8mn>MWi#b1636j)r3ddX8ASxXCQRAk+M4{L*HW48JJF~e&dO|w`*JDO za745w{>nE-W5qG6QaWx+W+aN*0Oks-l;p{E_h^yW7bRUdKqy1N8^Q#q^E~MmnJTES zh@~CN4Hq5V_9c#w_c~`LSE;_vONxlzp?C0x4f;gf7^N^eGIcG-y#fZYKx4d~=H)}rarymmN@`4z@i@U04KVa!Gwr3=DxI#Z9hokxCEbe?4)dn@*mHxw?kfUh(2Z0oFdHKwsUL6& zk>YTV&pR`Mm+;NRkn8J|rMTwPW4Gz#>!~X^Je+#vV>4a{`?t4b7jkIj@{pa_y3?Pc zWW~u&OU}-1`XTkvN21&PqQKwMxnHe3eb%-`)X}VxvQl}5@{IoIWg)qcLM+`DU*)8q zS9^T7c?|K(6BKMGmG0+PKZ)H!?vq3;*%OF*Z!7SCA?BZ{*73+a-{At z3PX{9G@?BoXRgnCE~r{B1G6d*Pp9&Hv`#XML(iSoASJ^#lG~}~r7T8q_>mxLyMmYy zGLu}*`m5u0=bDko8!bo6vM||Z35vEJfn=t3EaIAVo7JV>=b@o{-QF+V^=7b3Sm%bW z3@lE~23fumm&v6gBH>uQsb%VLTevxQiRez36u2Q+`1bu>c;(TT&w?&Q%8M1@@QV%z zOh*Q*EKO_Ac+d+t*01oDzef+h+N+O(ACahG91>lN9k7u$vM6;vXYi|NQbW^0OFWk@ z(aeCqMt!xp_!CXQ(95y6)SuV|b)8^4@{IqLRYA+^qyFEi~-b5Bu#m4fmLhdKLzXheqxti2lL2blv z)@%V8L8>-8*(b>}Mx3KT<>lpp*W`7K@Ur%cr^CqXam1>)1)l>7^8(VF7?WOC0)luB4)ih0xeG8KGiMa*gFjFHQ*r!u^5uEnC1sL^0r&! z5==wn&@;9k%$|=jp5G;Yvr+ZoB4nQsh$Zo7kA?BDE=d!*cS=;S3-;fvO_91N)biu8 zi`Ms);msvHW*NwZROrq9KIP*#!SfS-n{npPO2hkF_KIaX*rRA7^@QqFAm=aTENR;> z#GaKplm)kZU4Pc=^R(2}kT452oR~z$TlA&Z{uRP{O-gUU7G6GMWEUoCi87Tl`FZA8 zN!Yod_6L0=W{13sBcDo_`0LD9Gd<*4+A?v8js5SNRPsVOr(qx`=ON z@%W4pX!5u184zP>Il}JPv0fk`<;psk6^vk?Y9+ntRE_@wD5r`%U{IY$^^(Sk1oV<}&Im2npwyS|P0XK20h9VEi zp8c#ki9A_?6GD?hkRAxew+h#!@t$Yl2yGLo4^95618*E|7D5LK`7nwra6OKlmJtkA zTgn;Fh4=|HKV<8l7)hYwIMLK!4LWqqArrxDt>(D%yzCE@-e@2AiiJ~|)m0quy!S#> zfAuPc=*PRvtfwx<;d30>TY^nd;tlo(D#6-B~+^=t1;I^*(zbQSGq9I*qI0-^Xc+nsB68FM@xU|v0!ysANj;i3LNbt$j&@*-hu}-o0 zA7XR;oqNCzvvaedbr=S~EV#GtZt46A2x3N=s59DPOy@Ozf7T`*{9>T7BqM9%#qjO( zD=KNnXK&H7gDZ`Z!?T|UVu2HISpHij9(}4D5b#caMbDAhmrv-6Q2C3UGK!^@x5J_6 z2%T6Il;4oB=@YD^I0H3cBgOe8sUNV9;2-F`e)w8t{leAAI~pY}r0-RF*@P_;ox@Sc z*Gd~9_Jetf-3zal9x;{TSrg3*Zmvz-BZ^{V(>1i+U-3BdU)E1d^WFw-5Yc=>CyuL4 zkyy#zcoBaWM%|9|kl51UwZ((0KZr*j^eFk1m1*=Bo-}}Pe&@`c(A~_b6>nt;+QFyZ3wN(~NK^{lSDxZ|xrt`i(3PMiNwu&bu&c%*8 zgA=oaN=jrVZU3spDV#(cPbEWWOXV!ngl&?tm_Q))pv`~R-RVb&Rba)BqL8FkgtyK+KSt%f@k4G?29qnd}@)8p6$pav&+)Gqi}!wO#`8Vpl&<3 z(cnkenInS(j{8m$nva3Q+V9Tq@9>&vG*dMjb-r_CGwev6l-MKpu1grd5c#Z0KYg88 z4{=eW{i^er)zRex4~E1N2e7pS#{2ySj2H6vEJI#%kW$7E6bOqEm63u zLCKO2!E$Ht8VCC~4jk(YVR^ErkuVS33LUm$?R_=x0_o82gUo5x z)9UH+=;Rrw;iyGZ8g+htu9yntu64JpHQErq>(p5?vi26NO)lgmH8l{dCCAwtM%S+U z;81T=AN?iruk8kuIsBO;-o-h(Ql~EcG9{yhWieJQMT0($mN8p4P1b{+>v@2z0lKYL zz9?&J##hcUwZOg;opqh9IE>7#elodDxPhPA?{b-h5OXF%bg#1w9f~5i6ctHE;be1J zz15X@;N7#kUJcApo;lz}QuL>Mv-F2(l?1Q$ckM0t_^-a@L==h87|7A!=f1&T(CSND}qjU-^`z`;rSwBhXViESj zG{fBcrDl2!3uVKV==T99@=Lggt3Ile+(v4plH0F61n|uN;NNCi#K= zG*X!C$ZyE0HJjS8t8yhTG(mDythy{ z4RB!u)hHoI`96L-0lzcFjl@^OXAG8=DQeZIXtFu4;p`y&t9hZ~@FG$~VqNsi2^0>b zut=~xnx9FF5(??yH{nS?4vhTdOtjFv4R=stkgj}hh{g7evL}x@N zH1R2lD&C4kAi)341fi*d5~{Q$M5)Nq5FBj#Jdb+)B5JH;w=rigSQrkjZuWj{cndCe z0|(a8#&|KHY5Yd*F22}CDW%SA9no!bSlvr9d!sXLveL3brrx}`%%Y(ZTHD;9f?erkpbA;N~n3t>aOC?dXc%Q5nFp#vjKIMxRt(aEf7l%&x`8c4NG`z3vH019LNQ1O+$qZpjSrdLIF%)B( zqK8|Uk>x&+DZPT{$k%%Hhki9iJSIl<>&+y5YgZy}iT{AwJZ*atV z`TxtWHYq{MzmQpBLkoccV-(e5@Q%=f4llh?lY@^zGl12{zBEl2hhmwVfE_H+Y2j^z zsPg(m#f0mmNN*;aXH>YLNK;b*%1a7V>DhVc<1u~GNXtKtl5vA-Sg|6Yg!jEa392|+ zeF6CL>nodva^x{k@Iu5b5ysa61f?lmTAN8}36e}m-`HS5V_pL`r;mo7NB3OoylrFr%AX2j z$vYzb`$X0e&(*Vk32|pYF@GV0#1Tg<(t9Xas(8A3kpy3W<6$LBUcG^W9B=~F>-ty%G z?C4)fIV>H@VlgN(rle7)i%*s~1Mn(7LK6qoXCFNY0KUN(Mx12rKR*GsuW>}^X~EZX z$?Hm2GMAxU*g*ugtY*%VGH$ia=h@;fi}8iQWcHzlQ1nH>^2k<8Rn3#7%fp`rfvJ03 zpr8EJhp-Pu>)lE>jy{8k5daG>2`RtHWeP6lST?%sZN1^PwO&t%rB-BeK%TNts@cB= z)tyDqj$-U10tNJ;wek{vCOtSm3ILsWgIiKn5!HvEjOF7WlHM7rV1#!O1UsKDc1_1c zqq5-NC)6&wrZ%3B$u1tRcIs{qr`sq{bzD=70T$yf(&aY?2AK^Wembv95k8l75@FEh z2LLeemBo;3yr+JzMjzj{`xxY6k*6m@4YcV8iw`Kj4(WUcj0TMZgA;WHjYIaDtFCc4 z=K^J-juEWj;QWH~xV()V`W3?+!tN|6CEI*L45?7e6m;d^)@}J*k1z1)msTtkIg3x` zb1StG`^`2IYe5H(M)>J(wk-fBO6>UnsDb$gR-T9U7hlmOCcIB&ewu$zbVX2@5a`xm zGkrHjobNvY`UFB)aMv5Zo0s4yjJAJC`O7~VXf7I1KH#^qq7CH{j@#9^Ss-OK-Q9FG@5YB}}RUVD%&ZUOKu#Sb^Oi-xt6$>nBRGr~#m!PB(`?Y{{Y|;YHuH zUXK}Vs%5$eLP;@h(|%0tPy7A!WgNw4U>Oa3S1&H=eh0;l4(sMpui$L~(C-Ec!>MYn zlWH0dg#rvRL$}G2#9qJdK=nyx-yC1dS87n<1xbH1kTLbZcqIk?6LB|ivJHT~Y3g#g zTJ~;b#0j(~n5Lf8Lr-Qk4%7|$T$j6pG*+fBq z_SmHM(e$&Z+uh;W_&dO(m3pDfH_danr0E+C_Mc3bXtCcREsK!~QCZB})~yCkIOx&N zab>~?y2xD}F3RRT0QOf?>k(8(H4uQJ%ueHhSRjK=Ld}3A8A-@DHEamj>tZMZ zCeNh9#MuV`glTUSNe1OWpBp~TTp*U*n0vTY(e^eQ zpx$*__=-{?T@X|bzc5_Z4rt@ZrAP74n##>q-!xD$t>H!JKL^096M$@<#>e!)kByYZ zC>`dG14Is8XoWUVb_U^+w9owSpX|Z|3h%3_LW$-CDiD}5ZOkaK7b+=|sXh2RKBbE+ zxv#$}M)3wQC+)BvvcUn4p4K8zlm@wv6b1;*;*a;=7eJ@dAX(?F*lO4F?#^G4PzExt z&}}v!yWsVv)IJeTOd|lAZq4FfwC9A&Dy8gLSsCR8QvMbtL7kQakRcTq zeFX{emuth_^RRZ;ow06)HG|RJmQdkRD6K!q_x)3_FfF`&YEGMN{U)|H!<*13gE#)0 z&Gt?}EEloEW$MGZ=K9q6gJqR04e>XVsL(OV=hrjWdtY?p)>J6VXpL4H?jm3xfs(30 zsQI14hr2NPw~#%*%B=!%_){NnFA6SDPQHg9gPb4CPs?ZU&;G2Q`tXk;0HMc z9fCkyn~E^dVb_%|MIN+&U5bIzBjk61hh6j<(JfYCtz zhJa_OT(xr;M3o1O9-U+1sJL4MCNl|(W!$LC|og< z-M+CrS)1(Ly10Dt@!I%{bXIAGP7rXkl;L!$p!9a(HZjM?D4X6lW)1zI@;ung514f) zR`Lnry=0ygBy0vv9u{61_#&vtV0BF8i5sXvP$!%~rxmnjKEhB9xSRPK3(9={_er2j zC|RNqouCVM4cyBc!~AD*D&-EOF1k9J-vIF5>I$WBZC;0}6lh0=$)Zb5w0L=jwlyDj zAT7?CXWH}v#qN6yGKSL6R{%L<-I8c)n0;_!~?U$L`? zFZgnAu_%2`v9ry>P))^z6_o@Xr0Oj8CX3Bglu+RuMUPQ3XK{#AyOb%93xJ&{9A7co zd=n>eucOHAer$lhC(d)ay>0opvt%$EXuQVffZ=45V}n2xg4CaW+x`7(=xKiOsE3Gf zf|=d|m6|gAqa=*!H!O{mj$BOjk4j0OBP9n;c?6PzSbOU_HgFfJ95!(ls%bPCmc}rt zdgJq1W)N@PNWDbavSq$ub83Bg2%?a{2@8Yg!^Dh#&_g;0=^dyo!$HmV(k`q>^&HBTwje=-OVKndi~@d!|3)N52bseH zXovluz8h_wTQJpb-z6ilUx8N$Ko4!QF}L_6n$Ih)nKmetTMGIad`UrThcW!G46}$u zR5faDNVyqhDBC*Ss{tc5wIR(W#K}VtEje#Joi_j(4pme z77`EwFsOsod^xwO^gd7^;9`Gv9CSoEOn!*esk?@*+|s%=M5xzw>q3*<5RF%TiT1-4 z#MQ|!NUAnyy{8LRsNH4^gEnR2i<*fvm!Ley-L}w^MX6CR1d|W;IUJTJiqc(p zTMlST0Qe+LK=KqZfHl`6gdW|w%?5EVFhLKIqOgq0qA)!Fzt9Z)wt~@nMA#k;gs>Pe zr+^Vg`Suk}6Ht_kmc=`Sl_iH+`{K9R-`a@b&*PL70V~-jl~q)*Xi^e|_)VA-x>Tr+ z8%qAI2jNHPMYek8zri8(!DBb8drAEH&%@*0k;Kd=PAzyxsL?xIK$;YXm!T9ku>&mr z0-kwVwu8n68rlyt=(T=+=InviC;k;n5l~-*Zk?~VP3VfCKykbrSgSFxd*=ZnE1>z( z9^BY1sREv^!7rkBOF=tI4IRQM@J{t#9&WW-K0JbA;)k4j5U`J20VO)|2t;;T;P&tL zO;M-`W&pYofB;^%vOV3W-`0^aArT~Hk>WAx&pnaTlq4;_{3mDjmZtm zP=+SKAR=+^EI(e>qGN)-AEH|ORnh4>2jh7_4m!CFFxJ)qOlw;@5@XKfiQP5L}W*G8k z#sLlx(HK{Agnon^3PxG7MC>uRh%`4 z`y}PBK*3O|8$m1EROtxb>-3VqUO3rQ6T!5X(9IbD22$}*=wHR<9EEo~zN}cSSiPup zf1~Mczbeubnm5Hz$~yajJR2QY{eMOk1`g_@kdTAq9#jv_SBRsbF+|U|J#1O>J z_8iUeJiz500@vdhRt;SlY+z*!4oXpqlSr%pfU8<+3IT&^<_QQ6NwpGt|H}dNfPl)7 zCtc@J3D?&>sL)ucq70a8S8&q@{VY*VN%MZZslglg#g8yeOK*jezz`~}n96maSYTLE z1xCp1yUs>$2{H5fjE#sd0QvfV9RsDAS^`z|~1B3OzVq zfy}$EI}}G-)hY_g@nWh1#7uX&#cL>pA|~EJtfUFrT&fD->A;!uD3A{K-WU*Qd|*4T zFz)3gR@z`{I_?PDaAjRXQhT#k7O9b z3X^7wu)>(aZ-aE6F)+UvSWQXp{M(*d9YCc zHNe($-@3G>a%^fBr!J0`_1}V%`=9??@C}Vb(h6W%42y*Af`erY`6v9o*|?n)txVD= zQViaMti3-XfePFwevQj-@P)*QewE{)ph*ImE2TO!wuoU&b|15!`XRxD(Q3A9h%hhs zZwMC%|2kNqA|~u3j382&^q-6PpP=(!2oVKg;QxOKj4bpaEDHBT{E60A0b}Po5BR4h OAQ@K>FBLWL{a*m;J>miY literal 33275 zcmeEtS5#ANw=TUClrCMVB2_w}2&jliQ9(d@??sRf37t<`q=S?Y5kWzE4?Xm*^csqQ z)C3R$B#?uC|K8_*Uz~BqIOl>v@~)M&)~xR{pE=h%eO*mjDmE$t0s>mCCl6l`5D<3| z5D;T(3Mx~ z=b-XkGvJt#B!(ss8n~HV%R@oLr*=)|(cim1du?_9b0p&!Ze@+VA<`rt^UQ`gAOMcRP3a-?o`(@N()q4!6yp}r6*@sD}o&+RM3cyOt zeSbo9=6jbz|JF2!tVp*XX}tW`X!kGE6?&@oc?Xgn{*c9TWgPmr5iT~%{O8zP5)K8H zIJu{Od@1!>c$uM8GGjU@TtJRH-?wVRy@jJAJzS{RQ%wN$>#0rT*%v( z=?Vc6x>${#>Vc#u19pFP@0?kW6s@$Zj?{-){av_gDQg(jos2b4Nuv6s3r zF?fOfOik;*s^!}l%AFeD+)LO`mFvnW?yM^+z~^MsCJo^N^-IQb+B>ov2sqav@NMeG zeae6{cx9gIqk07wp%!`49dr8Nglw>S&QsXYhje|#uOlc;gb9OHmlDr3<94bP$V`Hh z%b3CtFgpJeMZA(*v9eW+@N4^UPI%fvpaNvw$%D6oWzslVIaR1&*H(|{_ zEF>PcrIehva_RDIzhXvzfOfofkg2M=tPgvxVM8;GZT-@N;P^wyXg5qyc4ovFq zTyQE6f8n<;0?3X31XZ}A4b$(IJAUTtgI?WlU`I2Wn$zS^jf%>jm_~Z9<&&yDmn*&y z=F{Q*EHR0V_3m{u$vf3%u!p%Z!4EDgU^WIvK-Ar{`+H`O{Q=q_gtRD~|0RPkvWhAK za~znQE-29P$9z>=n?|J8c>1*oZU~ku-8jd|jniV`RQBrVW+GJj;;l6k2 z`KunVJY+V1j&NL!Az3f;BFsJKqzRBK#Mu$()kl$QBj)gGBR}Ra_#MRfspN>Y6STYH z`w#7c#};DcM?EC}YD3tRV>YXyO$whNlBvYIr83g>>J`@%&+%*zJ#abR)!O0FSzw?O zQpa`m^L;Nb0y;CY*wgUeM<&>$0B(6cTkNhD`h@cxbZgBtyfYB~co6t+@C)nrphkm{ z>0_E16GJ~2i#H9Q1Cz9H0sN}b+@=Vip#3-YWU_r|^dRj@Uu?~^_@28P|fnm&}#M?|M!;=^7r?gHCEpi?7Fc&2OKm}#v;r57u#?t*~C}EAw9gG#AAEn_j+*mpqrNZrSeqCq(|Uf@SJq5`Vz};e(MD zPI&);mypC+U>2B9={_L8QO*E-WhQ?rSdy)<-*ir7oMs}|My)h0K4+w-@E?n)+m4k? zCF4Pgv4G2!a{z2j62kn9Lk8Zpj_HUhN&lz{yjQza^~5O{{j&1BoHMj8&GbJmk+>B6 zFvBuP7<&rdXE#wP^6%`OSR;}T4lPl#*K)!}B`3SbUS-c_J>~j0T%#Y)+XdX;Q|{Dz z7Hf%$Tk_kdmp|U?4~_lA4=SV8>zriq_K-VH9QP}2S6}$n9sEBzSvp|$cnqaS0|2B;GH~3wbB1Q(+#Ov32+LM ztd*x*4&inX376^ktXAo#_)MI3W{F1Cf_2B|Yyh@jn`&ho-!I7yiCAFn=ys^AIjsJi*`@yc>;n8=CcH@d$&-&~umm4kQi!mL`w-iyj6u zs|7Wn2=$FM1toHLpVzMmIa^>WZRH(5IGZZsOh>|xFl3TR<(x$TOd*O1o7PjG8C&^B z4%WSV2r+DX`^%W1XP0Q`e*G>NY0d`(+7OM9KfBP(nxWpDig*lBD?Im_29Cc0mn3UM zTnpf~L#BEf<8Rb8quGH|E|@(Y_o9<86X)XMDS4j7u-P4c+b|*mw8h03kU+)vKeA+vZ0mnf%{3yq$j{kx`ZVd+7trW0Zal;BRCgF3@GoTQVqBtP z&{l6hUOCvlk)U%8^{6gbxq^9l&-3a%JNeoP9;Jp_{JEHQ4K`b1L)EF795+jo52s^; ze%LGdG%2x5-}qHEq?xIgHca`b32i9hpOk7+vh8Zk-RsTXSj^Piq(8O4k;x63D zK+@xR8N+4@k9(wohS853+@Q8ufBkKqzo@8ljlJMV;~MrwN5}s~O(`!TDF47lzPtSY zp7sA1{vXsw+D^R#=#zQwU(wULV$3O>AKh0OzAiz2oZ9~l(xS>4AU}}Exli(MRK1wY z$XTspNa2jGU1!wsBe-__r=09xJLeNCy~WbLQm6aH+B9@KFN)@bbA_J>cE;tv`oHV9 z$uU~yJRm^XUzHJZFweIJ3W;E;rty>;&1%`2u-TuBP~v+>43*J?Z1(D=dMM&+tJU$6{^z@CiX3rVS(nzufcW`C?M=Z>%h(mU zSucgMGAd_Fjq^?YHI4#*u~CH`R#Vjo{yeM85&a9*l16gICP7R)^NVNU*S=qyml(uD zk<=NkF8 zbCIMsbgP_Zdy;mRk(ofw$KrT?wF+%nM%sL{?Q8ix%oA|cJsT>VKo#K+B9K{oGI$yTF6dJS)MylNqX}gWOtlQQ4Q_cF(i(RK}^BLOs)#R^DD7gx* z$y_GkibnSN#7IhR2P7&(YNhSN`%|Z%S2B(d<{C6M)R+o(<8s%}j^yP}=JEzwL?tk$+Z3e?9ImigwuA5>Ld?gF9_T+x=ci&a+vOt}WK zRRM+nm5c&+E6gEde=?LaJg`{T?q~r=8BnFtf>AUJ5KIFpRbO@*u2yTWWYZPkHei>o z0_q!LCaFjSIC^^Q(Lt~k&thiBz0i-T$P7BQ9M*{v2$p^m%n+O)E@-a<+C>oxvV_lX zJw8yalp0nN#b!Bh-cUVv_X`m^CzUUHH7$#k4cFx?*rQ9v0+#{zitU+y6#SNf={2G+ ziz>p`W*$#NTy6l*mv(`(>+t^2QvXOrZbU9gzNE_b!t8+fgY}1>UuwU5dMf&7SKR+l z4`NnGKS{wv@{};lIUJ_(2-O9bjPwlILVUF*-!6#jQb&B-irtwRri;LvGtF&|btc4gLE&v1ei5gO|I^*9z16qI>xvq{#(_O*&swa#%)q*W(Vvis+UoHg6cJhRP;)jDU4 zzq~!8>s3qGN!XKUBUYe1vqvE}%2=7^Jng?E_+k735eoifozo`P>4=DxA?M`2tFzv+ zEr}9?NrRu@b$n1cTw%A4T}Fl?;yz3^rW&X2c

3TkgP49@aJ{wQn#Z7- zOt5WUc6R5Pr24IvKidR|SGQO+HtrU!pxl_NUr$M`Z*@N&-{2Oy;@4;SAs%r={;*!O zfmv23!c--E-e2u?YvmwfbynF583Qx7#OEKM^pC=!k23_@X~?C?Ud5g=xzpw?M|E0P zJyW0u1>N2{6b~X@zM|eqkN#c#fKF%%(7*oM@hd;t?jjhto&HTglU6mUyWA7}HQhVS*sWDQAC_~u@_Ttz$2$YP; z;+?YYLMZQ$=T_y%!y4>^<94kl-yP;=pd6>U`;+VR>{9tTV-#?|A=q}{;R4q=zcj$q z9{WAsn+sp%7(Q|y|9tRWh1#{EfB04TLi)YmGsBmf*OE8ZTEhupM`4v)7Gt-lpT&rJ z_Hnr)?<8;3iNZp1&zfg$n@(vy%o`he4+8g|ezXi9gf*`Z^+G7Ekgcl?s#5h}Rn{!} zcJ~=uhUS(-b6QiQ&grN6b+%5JBZWY& z4gDQPrxG)rJvJzIi~OWd?czpBJClP5Y1i`9f)$C`#j{D)CEoaLND6R{Z`>Kmukx(+ z*Y;>rTg zW29;08#4#b#|ZGpy-C2+f)e;Vm@bm|smAv!lZ8>?0${lz*`o|WP>v52*{hi`)LMrS zOJ>acSBp!-mW0I(wCP4Pkw>wyGEb~Ji!EXiC)o?oSJA#Qu*sI6`9_HY$s0yS>N%Wm z_bkVV?i;0*gMkL+?8U2@Xi#i~g2TIm@miW^+qus{aKhX)4d|71g_WX>;U+w^i0@xU zv_1!^2XMzMv!>$&N=&i+MR({s?$$1{C}XbgBv(|ydgv2V|KYl1Z<{~J=+pk^hqFsa!8n8r z65(UN(nGBQ#k9qu1iPx`{qOHykvQtnrem3-wJRbX5r1SW51V2^h+{M$YG@nH`qJg~ zJ44?rqENz!&yl<+$Rq=onIHH;+d+L3O!z1=0wj84ZOSpaRc zrzl!{LM#N!r}FCjZt}*0Sj5_DGIq!}24Am!Y@Lji z%cJGh3wI7iWH{2RJPTL2Vm`BbF@5Qi+0p;Qdcc$L>wVJYq@cR*|6mI|?avxRe_ok5 z{*;*Qli2^!Zw?Kmg?NV1ncBIxVT#emGs)OZok&Pqlz31_N(uZsgQ-S5YrK!vz<*4= z_T3ex!EO9LaLcEC&I@twh3bTjee*;_ zXoaJ4;PzpX zzgQT({f+k%?HH(R1aH4sx9ml0@P&Y>oLCovR0+7nOc#|N`{128%!Wy*hUUj(W?bD! ze66%`2(bak37b%n2l<=Ms1|iPG_p~p$`xcSN~&8b>)Hs5UG)u7h(-{~4*Jf|L7*dU z^Dp1P)JS=vu^_tqq-;!;Ut^)JT;@F80R6Z4HCO@cG0%f8RVXg(kst?J`8OsdAqO*0 zUW6aZJh@hmuvf8e#DY{qA6;w`(@LA{c5@J!kCU z-_A>YsgA34?~}7PQo~5P&9Rs@;)0#T@n7n*HH^|8R6VFCexK&2uKC{Y|8n~KDbK=~ zM*b2IDWB5*{-+j*dQrQ-ZA(L=_Q_cIrI{CkuFupGZ zwb|tDGl2A6OjI^;3THYM@xT1te9@x+OM}3mKj*Lhrw5gmI0SoGq#roKq~B~=UHHO! z<-ZQBF|AHo+**jOkUls3=dbsFG9Pu+kpXYR3XQq z)rHA^9-25ZBf}`(gV-P9p7?bpom0v`?Jr5WB(N(P$q!_^E0e%MOukR2$4!u#C_G!G^S8gq+6Dw9 z;iy1nG_eMMo1^EK!A=u5RYIwCl?qpS|LtCW6bUBw+a*QSM|!CKy;5jB@NGV^&pq{T zL_s}Se-+PIwB=%qBMO-yb$b7Y?wdLW7%Ko&u#Hx|J~#i+xp#-l01}lPE#rVx%ew>l zxwKF)5rd~l*5SWwZ*-H3q=+*uv4bFxUW*`;F1BdNdZh&i`IjlNpAV5|WC!pDb*fcd zKYheI`O`%HHxp~9+kf+!I`WK)jM{#TDIYn#857F0|9AU!H1hSMYewdi)6`hZBgC<) z?)L~Pr&AyF;hlg2P7!(G?99i)INcCZu^^_TD~7mlPyD3k=XCPPxp6f9GBG3HQaKZA zU4^R%J@72eDl|ISYr554?;bSrF+>nZweNBzN}h8X?rjg7HyTKC*fRJEC~M=vum1!R zS{&b8RJh3nr#)q-h0un#4k0|C8sDo1+Mc~wt@aZY$)|yigfqM*QtgU`cHR{f0N&qE zU~Z*sMMZ|cT>XVvjSMb044QS9`UK9{`}N6OW1Kp}g6ySmhmYt=ebxB7^{-%NgOjoB zPb78iln%>ebbx50puBHKLY9>1c7OGnca<0oi8uG9k=(P~jS4lLumKh2FX%7id(?|p zQZ&s>&3k|unb_E;>dlsHch$fugjh4;Vsm2rbi@*z$Xx24Ho;OVC#*L97ho#P0fNx# z*c0#y^)0Etc@@BR+NLx|_*8f=Lpd`U_tmT4hp6SJbcq~AkX)E%*d0#+tih)|?PNbM zMX!}tPV={9l?X3WIMe*7CZ_HA+VzIRON|IbQ#g~;6#bx7sD$u(Z7Zm))jZFvt#Nf> zib2*TF0h(czp;hC!+xc5eYKV!eJ1q)VF!(=J^l(M1Gkg-x^}cRJgC5?awb#NoG`$& zv>Vr-G&alX1FvTY?mDS9vKe1UOSf@MF9G;bI)3`}Xc8QjDFJkAb2ug)rIsgGFk&Zk z)AeL=a6IJs9<2Ut1X4HZR-V<58ssh#BYSvT9Jf58{tnG%b(Nh~WnLvb zY4*OVbORhpO;(wLc2{HTroNb=cQX%NTNXXZ4HDPpjiQE6#%(y>OhNtU^BB{scq6&%>qZS= zSR)Ti)M);u0BhX=ZY1STaW2mShmCE%rBX*`aV4XThm8y2o;^!4*yhx)KCks%|~l z@LWw4K3sSPXMK$$M}poT2bO=OoQgw;OWAhQpE?HmYC9=VE?@C)NiOUI+GXj|jg%-+ zbi+-SBGj?!smK)=NBP38pnAHj1b?{C6`W0Jl5Vs~%J0(S+Y$5bsL;&If~{7-E?4-v zrIz2XALabtq!)d~l~ity`Wrg+NF1=P+;cL$^T%qNE7Ve;q>8kv4p?oWdkFU_Ueuuc zNI7JV>cb@w`&G=BF@zSTW3961rY23)hj{CT?PS{}469O=X3645$6mb7PAFkbLq{HW zM-;czj)gnzeskA3st>Z?daSXqzZ^~t*T{>s-hiN2-=H_;U>6!Yh|PL^KH)htWzL;; zt|rvckyqD&{#pR*CCOsrU4xk~_i#pxU)}DM7h2x2HGIJnDsAR-XK$J7YUh1Z6;X=No3gUMJ6-&XkAS7?(jrZ7sY8$g+DPE_8EU?`{KoQzfug{iTt|`lgeuc{{`Vd)50ZJZ@K%*CpUyM>0=GxE9$%hXX;m4VDX(vRxK7{is0y9q3$h!s zKB*UPC!;Yv=iHiu@z5^jDn(}!1CYFHP8>_;q4moI6ie99orO;_ zVZjIgEXuuodV09jMxnTP6MoQL0#C9PdnZy6OmSdNFWe3!ea)HTo2v-vG!>5-L@5#O zwvXH{=UzQhdv76#^%EWHoD)}{i?yBMW~pP{=n851;e9OSH5I4dG{f^tQps<@Pg5Ol z13OxfLqs?$aNG6<>+|+lKFr-E30P$=2{x)mWccauOF@fFwivHn-jTcH#HcMc*`UM4 zFHAxFdXnm{xIhxMyWABJ1Yh5ctryhfuUC@`sCF;Gbw4^ZauD;U;9xy1J1nK{pU#H# zRcL#XkVo-<#mmN(;n&yukKXjGL#A%;Y<8w`Oba&`(UX1*m}Hrt9v#FQgWGQ~t$*BG zfZnP*?eNd3@x zCiN3IVr-71M(a6ideh|$wO#4e)&uC-iA+9S{QB?yW^V8rz9#*?_R82x`gSqu^4I`R zBSkEc(A@pQs`@!^?lM-|d!>jU2nY&+PmA2HNr80psG68w@QBz6zjaSLS_+VSjoq}~X7B|v;Vu?mc7c0&7)pXphY$pfSfK$5Y}BPM& zm5GrRV~@6|kNx-Hr*~{!{68metaMCtvAN~sn}yx$m{F9jZ9QS7h|!pb&0)^{t)1ZZ zax0!`k$J*|`2iF-1#yD|N#41e&W^ti11mj@d;K;dSAI2I11N*D)&^BKt`Ak_7%O?Z zkgMKQKBin!Ikg-|jSWae*jlE>z$5)S4(Ue5!Cb=Jn{aUY;e$l|^Vk~Ln`%8Od7eF! zB|~2&dNyeC3=Fwsz^h(jh<5EhkI*nR)y?|A9~y*`_L6HlyakYx=hN8^R5$ z{^Vvk<{i2xH0V!{y?TeoV@6(v)!C!ZBhQds9>@-tVOD$rix4Kg4 zx}2e%EA|6PLWp7Z)qYC!UX9o9If;`69OSO^40?nVwqI?3J1Yw-$weEiy4PZYo;`kW zxQ87MrH;VYBLk-_0N8yNVW^KH?%ek@R2EMK=FV$hX9g=&EBQI?`MEN8!K8e+P-htR zg~%GM7useNtqA6>olsU>=(D~0Qh^6sK#h$%!Cfl;8>p6-(Ue{sIjHdMSo;t@tvOao zWK65uVQKk;+W~$e70i#6Eshr~|2%5~@13ES)CgQR$u}(d=b5pyC&B@J6;$s2Ei^w& z)#pvEq3_0P`G32RA=4s6RUJ6j`#)ELXKRt{nH{(LI^WMr_*0h2b8h-aRVA)`p&ei0 zDDe>xQE|cMFPI`9W?&SUJzOYWjz#o%B2qM0kglsa0$v)vLVg6fP14}LXFxh#02~`M z^9H@KUU|hm8zNRV&(JncJ~$orP3BtI0%zF1rAibgwyXD|@|@hbp6CiNBzF*Z@dI`! zhrX~eZlRJ@AsvE-b%qu^S2bol?de;2{@zdS>0i%ElKw=)k#nAj$hgOI`NdB2dL-Gv zRQ^Q#tibT`VJ$c;@9Z$*gAp-Z)Zl(T31DQwnDmSDq(Q^0r7+7>PD7jJZ!}w1sa2dJ zq<-3d4}X+zWVFJ-BmFbKyd3OuzGOq?!hS$HeiBv&pSg?`BN`rAVH7Hxb$iR9s^NtM zUmtQ-{!9Wk2*5iFn#WI&<884H4k4`Xx2%2M9;>0>7@wcHhP&D}w8BJ`S`@F0MUx1) z0O@ddta?poFNB&*13zrO;U8IrQO~LD86N%ew4MvZt(Z0snz1oj=UkDuu*kwo^4&FT zsUx|Ni)7J@AOxNte<>>MLT+^y#v(9GtK)PjoX=wvzgZ6p2Utns6zO+ReAjK&_ML&G z2lKv{f$Rp#s>0_Zo%3S_9^hGNWp8kgs6>N6?unilQGh)by8Z><(a*r(X?bzlz0m9P zfF*!GJpEJb@35rxYR{MZPFeZ!{%j3xw4Xn#&5v2At0w#Nf+b2O2;Em2XB} zSrt}~^|EG(KYT`TU3X|SV8$LsGOjc}&`8RO6_lm*lwHZmoyjj0*v#1K4IBT3Mbem% zn@ygBH61m{*bkoXz6Z*SI8ibPAZpjG9?6t^r*)BzsZWHd!y}q>sRcVrRIPRSv%snb z?aVHhWZ$5XimGZ=92WF#n~$>QZwYXd9&^XMU@F)oaZ)DPX(MoETJw$4+K`f+0L3~4 z+q;Q_NI`St0w+-~APM>>VQh0D!HsPcZl;z6 zYyA3BK1B!x|E`ijiEo@~J2Z({jUn!WWdvi%)bT3D0_{(ig%y!*VrDyul_kFY?eSG# zCRJoQ1mQWgifXXhrg8$wOaGz@t|$B9eSpUpRaV*e`VIIOtkPC3xGS-I9*M0g?ual2Gc}dEyUH(hYGcS1k84v&^dOS5zMy zb$J(*xIm&rg$|u@_h7xMUQWJFl7fXGn^^;ROojRYkXM7E!ZTT6DcDzsRe}2o#}%P9 zZo?}b!1)26f5NzlqAtXCOm2ly>NhBJco9oyk#VqNYa_L-f9Sxxj~96M6D!F~t!*4| z+YK5u!31RO_!_i+NieQh@<;hjpdSzlw96NspzPBJ`gHi>H#0XG!bMDSK z@2a1*vL{ejY3Yb&0|T30$+twMCpM5LTA zeDvg1MrUa;5xXkUcrfFsIL|RE@G*nYLeaP^%a)h9BTpw2CY~c-(zP*EYQ?~cQlCZ$ z>Vpi14kJj(cCN_e2;@WvriqFW!czFQ@+-RQ*%9Ml=`iJk=$deU2)Aq{`w`+`jbL;SrBlBTyta31;KXCA>DZdO}7VQ0|HdXi2S7R>`{2M12ajb5H# zM#9)C9*P4t1`>8q!XEbbxdLY4bWZWkq~{W)8rnOaM@W(%t76o`MpCM|dC8^|Bt`)U z25!xkn{klf>-y)xZ|it|Nh@L7(miP++Mo8RTpxB4nSp=yi6vU1&J#tS#1hW;5%3f$~XR`c2~Icp!o zl(CQ$?vixgZjU`xM*B{`(M7R+RmBgol7HYLsgN5xM^!Xma~`{;>wUli=NoAk{8&ld z3(-l?RCi@NQl)=Z?z7Dvh&v?zdHX7+|I)Va5$SW0YS2Ws+PMD(cwhMjdq240w#_<+>C|JuVrsiJsvyefTppp5mpG7)18FC^xaPdIRb~fILr2ot zeN#<;6<%hmd>nd!--`_bmRA}_j^L#_2+&4$i7rMy)q=JUi8J{ zlIon}ZT@QyiC960pK}-M^L6OuCC{p+VD|ETh(ff^yHjT^p5wiygOoif(HiFTDE~90 zTII>sqGe%~CO?RVBEm487dyt448NMt>u1vlAfi$88n4dn`_f=}19xI&)8|%wo1z|< zcG9>?iB_X#P>t0>cDH4`18rc(Nxi-eTwlW_}iyo)@7THWR{3Lav%Ki#q1A z1}7b-5MVq~&!9`Y_|bXrP=%V;rPR9o=O3bi%{~_^TS3mr~=~ z5q%D=WKCLOw<(gYMX3_YyM@Q}ORB9&A~~*teFD}lKQgR*yQZZ^C_|E4_m3}{s%#;; zVPI}%aDp{{6E&7Xi7vNy)C!YRzL8EKh{h+==I~-e%=+Hda5| z`rw`Zh}GQz^skulE1<)c(z2ne`hp=QDww4!(-ZLzVr2va`^gErm=TXv-HILz9qe?4 z*oAbtFR)x`0fh{}eq-Z)cUPuu(1FCPfr>P*X@f&JzT&Ov&JL!`EIt(%xK0AcxPY8( z;2R(XMwL05R(zCA_PPc3ewsPQT-&Ic*Mr%##B#?@_~mNxXY`1xm)fLswolubLYbwo zJ#KOLQNSe^s~=qpmUNHvba+73wm@fA1~qImIpJR%pp6t z8)hUPIwi60IPV?cazDg^xXqb>4#YBdFh?dKFd8g?I=fqI-cF`;L>}^z4b;qJ;dW!L zUJrb>l{(x7L(jh-7c@J&nu+P4e*Urz)lPp=7QZ-CK^QUzPf|rxLA^Wa^SdmYB z{qDVH@BPE%`m*Mw8{SO&xq4iW$pP)$*>?e661)^*m!cG(m+7%trG1Z+-b}TcEa1=< zb~&P%TOneZms}+4+N5e|_vvOG(TD<^CG0RsRmy+HdD|%Ya5#svcvVJC!uR_l?WYssiEaxgqdc<)!me<8aT2x5 zmvCUB>^}90c1U^Qy(1QKP{%d)d z<8*zGX-AE<|Agi;jatzjBzRd`USL|?a@*?!RB1F@*LHD}LhQ~P?&xJhO0UZR`JL3V zk-?;h3V4)xX|x13CpR{eUb3FND|~|H`hqtQE2kCe8**IlmQ-80 zm6Ql~@|fXmN3Z3vd-~@PuJiZ?%gJC7OCV=lud!P5TC{}H4p<^`c_Lft4<>puaiwk!ZF)N}@tULT14PAse5DJ5F`F5aHRDM#KWjiEt>%}K-IwR6 zjGEsUzc6^gurfB5B*J~Jg{wc|3Y#bW{bQfm+EE#{nWQk&YyI-st9F7;&v%$_i;C4D zLwvJnq@Rk;wWh{Att}tBms9uyVs?0`4=^m) zt{QMuoY82wH`AIN>cTUL*TJ>@F>K#-0a3DuyPUek1qiO)*g36SsA>AeU1WYsUWw(N zlK|tIzrNxqeBJeNE8;M2`~9i)Uj4~Oi$~R)<+vVSA5x)uc~Iy=B^dw8l1hz>;hs;IF*7IAeJnOT-TI`sGDNs6hV(<& z4Nut_!8D3&7el5(>sl*$csN)WRWpysLEImNy786iJ(w+>n;#~6-R)tz7S?}sp>`g>`_GrT8KYmJgVMB zE0whnz4DE<_ebmituD!gq8!`SaClfN7qRzQUt2Hr10qWF@XOUI z|JC!8O8!>@hV3h3_RG=e?)$&M3}uTIE07a)aF#b^0>JcFA%(fS(#B=*)-{TCr z^V?M*f(aDKsA!gt2$<18Bnwu{Fb`JQJ5gMGD=ZDBo~@^_@Ic0L^~IHlC+{pKhhl3( zrIuOXe#Xm5M(cOau0(%TiqxVqRlHa`>d+k?UIzgsH`hne)g{0%-xwmtjmyPdx7ax; z!%;H&T<2=j(g;E%?!`%?NZ=5A(c%J<=JI!RdM}Y9Jb|H-l z*F0?W%cIW^z_h0E(Qz{LXVZ;?_H#3ym!x8FdNvTn7-OU;tq+$J5)=_U^kAQ5^}E{h z>&ZajDOOMu%W}fu;=+5uf~DvBt|Yjl*Lzp(b8rYrM--=vvmwqI4H0p$a0IKYi_4Qs73;pqoIh;FT?}b0Av=gX}4l^#G#EhPT83h zrdR1YzJ$VhR`rVr)8^=&0wR`>qFKAu%I+@bcm{Htc{Z8W;>m`Nx_61@AtX2a{P%jc z7$BuD;=31~4j9x1*#&A4NpaTe4&mRp>cupK3%YiO@vbzN!2f~Bgcmz>;j$|iEN)@r zh6QXoKPGY$%=Uvt*x&n`A?7_&FYb5Eyrq zXROGt+wau~CwdaL<)3J_-`MvJ?aeR5#5?!FIP9f!Ts|46USyJ|-Qx3Y9||RQSs5JI zs6+A*%x_at{M`j);OEu&Z=&jFiE|R%6$udCqXz8DlDQhkvdf+)})++*}0dWHS`lat-!>Uxv zFq3`i&8+V`&%zxoVh*2|egsKezi-idTSCdL>T9^8y+FHUnMb#-br9eli*CYef}+kg zzOuRT659>iRqij9!uU6Kf!*tK4anbwZej&L<;;@bcy+rO8<2aL-I&LpyfDr?fAp%y zBRRB9Rrv5Q-9NIBnW5z{=lb=Tj!*4Wn^}g1S8bP9`?=T3iM5olZkyV)AY`?-(0@>grk@Rm47(JJWAWQk7X8ePi zC-08;PrFMnOs|GF6=pO?9pEm$lxB>lfHAG?(y!%b_p8m1HxBz)t5ZXKu6zMw4600Z zAKjKRwTx>NlQ&qt3N9PXn=rx|UdN8$11}ivFek;!+5*RDw4k&N$_!1OkMIdl{-+?7fIs1*I7?mnV}+@}3q?z^p+s)aj@;?g+OG z6{J4pMGXIj(J7&rW(rlTVVpVgk9>4I8?|B-*0mb+Cb;N8HF5`PaPntMq0dheqj z1L(E)dwugMeV94n|JB`l2SxQo`<_jftmNDVKqL!DPQnk7j7U;KgMegc$wbjEcOrE+AKWz?r;q;l@{GE;(^C&UwMH=?K8*A^`1N`%ja>rnz5 zxR*n34Bj`+9Kc&F4oE6jC5$1|1y01`%3DRQKdvN**0jS1*)4Ep!g=Jghh)b+>m`f~ z0eBzG!*N}07}fUe^U6S4CuR~>la7qJ#d*qol}X+yc9Co?K`GYFAhzisLp7$KMB)?? zTFAEdu9m}Ly3Qoc9G`FR1QIl3Zt=e$VO>L1MWY7$pcbRAAl?CKm~XCeX|H^@-6Yp# z&aCkGXUY0swq?v%`l#01a_46gYau}yXNsB9F$%HB$gBdu{AW91Bhz@_v9x7zc(T7HH0%bNJ=b6Q_MkQo`7i&%tv1+VbXxx z#x=Xq{E$bq$rZr@u83(B;l&AEvn|8nd)xhS<_jH4(VQUq!+x5Uv?5W(yz$X^e?z`2Kq+KzSkBsBFZ)&4{ZOTSJukCDaG(usTkwoT0T zWzWL95hUWvYTgU(BA**>W`2tYiTl@O+=B(+w)T`WWi_`1ufSMwS~R8Khw&}dj;+NHwYVr3)n*5cX{z@%Lrb+^1&03LMB~paq(< z8!o%HBy0dbRu+MZh*9&~1_?mLaAjQ%IryQn|Bugdk*XgWLV+AqN7E;8?(oCpSzCTM zFnQl&%S41NVkko5r*S4C?foFOFH{3Q)d*kGX6gOn1yo91jovk^eyeSJ0TQ*mZkOSGhEM`V$X;mNSF{~ zdjfTvE5fqVxX70~<4oFgk`6uSpR&61!P3#{?%vHPHAlhcP_k)dd8GXkaKh80&J*v+pF2y+2?h4!py)D7MD^K|PIAWsiw60766t(V` z7?Yx5D46rf5dWPfUWQ8YihmQ6=6tqHCe=1)h1`WR{Whc5oP;@fC&wVh=e&y9m|}EN z9^qQgs6LP99vd~Ceg8L`fn6q)oUMki{H>)7mRY0JA(*&KC^Y>ow+w8Bj3X>arlTjtVzH!hJFqB_UaX$wJ3CE`Yo9Y8=hdgbL+p?4PrO{JzKZBVlOxpvg(uV2{4WB`T;UQ9M?uoeTca_>f$N`#b_G(D*%p6CR=ctkwhw{%460 z>(#xZtJ^EIa(Rb82%;`C5J#%%7DWMs1S0O6RS&p)G8eEQ{ z<+c@-+3lf54KC+AiQ#;EK{9;ToqOqA6%mF}u2|e(t@R$c`gkttj_%1!SnOKQ(TALs zv9bk{mNq2EOq4sPlfuq#%w={Xb|#eL(J$gKKk1=rYYhQ!{JU5ou`|@K$0!`XJ7>P* zhF+!)pUUziM0e9$y)fk{pTFU1Q~vJws^N!h4y#x;J~*2~i+j!9N995DU-bt2X4v7d zr@<>{@HwkXY$Lg}6M4qRHnIcd0_DL%i<2hZ*OpkX?0j3<&k1vkP;ZH-2Kt;MzhPgz z{XlVNczCDi;%_!suR?6tGLnS5+x6%Wbk-;xqY*YYy?*(UV4$9yWfj_T|4~Fin>Sdg zTku^>@iu3`A9;dWZ{1p1&r02r+VGVmQy@Ophu(kxW(F(T1)VYhs!)l2V>~L;D0E3D z&r5wzAs87HvYa=4I%v6i*$ED>mAN8!6$-axM?sf@NuL}$fO|vN`$yFQ`y?w_)m%wH2j_Kzq_CnogBFod7RaFL*k>Lrvb79o z;QT#+`uBZ&KBbdxqfnQLc1-G4!|YqR*;&>RH=lLv@cUuQ84Nt?wXDF@NzH$&>aWebz2BCT9q}gf3&k0Zv=#^0lL#b#OM~NBD}?AcP!l^ z&y}N>@h=fc?1o?FT1LxaQq%3Cr1iz6V)jR7K0BFHbqa;r*hcZKyZzm^N7y$rjk!Bt zGu>m4&dd+C)a6J|>!_39*jwVKT3D8tcj~1w0nG57_-7>?S2%*cC{vw~a&yASIU@h$ znJm5e?`MqMvkbh9O%i)n^j=${LCIOL?Wg)oW9O`EHN97oFtTxMOYRUY;HguuW!TDj z3o-uWD{+KvTC9>mjj=$LYg2g&Y-TO|`dl!Bp;4cBUhMG9aWeC&OKUS81!WVLhSXT)B zZ4yj^;DrwY+eNTuv-7ufvW-`!#P&M23;A#L-I_hzmq_$ZpmyqO za+`atjr_m-706DCRxa=R^M*ux6W+m&C!u4MNlAq$Jyjv+u)3z=2 znRZUhyO8nE7l(NP`VUQ84@(>Wq`P?_u*HlUsa`ZhZ9d#5f*MHk-!Kt#*k~WVakk2C z=l9m`Dn}z{<|y7OQ;$HpE}R&nwCb39OsZSTIf{EYWt-Zf(jQEz`0mVk{KNgFWY+`J z7|3SyF#3&a#{);2y_r%1)UumzGG^j-P59&Ii)UKU&EL8gbEa{(A4FCRMFo{8o^E~u z@fEys%!Rm1{w6f8VjeN%mbv?&hR8GbsA5YAM3+wPq)Fu)<_v~pta#(iClJ!6niv)ju>mXv&kCuq;yzb?CJj8$X0*DOUY>xa zxq4>9Y|^Xx?Y7g|(wZru*R!gMfgu5%5=HO?%gYSnby@2Cp6b*QwAi|&OvAdBb`+> zc*O}%WgzO6&;7yAml=RNT~JB(mXSXAHp4*fEL2Zq7^j7vDu``vA-1Qaphn^5P-znu z(MUdZUo_Hr*v_^7Ggk)A=Cb=&d6_l)r&^g=6N)Tk+0HTaBaYBzmx zS0aIiLd;XU3+mr6aqNW1wI$yPYw2B1V-p3hITLut$b9Z$3xrc_<*G!2`sFq)Z}v}u z%f7{erL}t&Z?~)=Ntuw{Y&@#xr!HrE%JoOib`L@5CYra@AuZtWIr*s1^L;6IOL0_} zTcnU?koO?LCPMYJy_e{_`e(iF`5lWzp<#?~kqju7gCOOSNUJq$u21n|y*E}@>V-eQ z{yFc|GoJeS27r$YFTJ;+N;un3*~Y9K`jz>Hn@NORC3dnCs4VH zU}UKHrHN08!0f?I|4Jt9CJOPEkK09wm8P0W&uMHfrRg1sz2zxz#EQRM0qNQIfPiC@ zuJtQt494NA$14s?xH@Cwlp=d$SDt*=Uo@|I#nFy!zFw;e$?L5hKqX&K$goST`ow6S z`30LNIUQW#wA=0{8?j)dXvh5ae%tAOTP^vi+8Rfghz`1sC-4u?dJ~W0}rZ;Ry zPp_sQ`}BO&gL_t~D{B2n>XX+eK;|t@!aELxPWB}U^inC#n4D`^jEyM*Ai1jLFLLod zGu`tMj-M~V?tW6A-hi6}O|XGU=HaC94jcBxyxm^;<+Fk;%2R`=nMUgc7c6AB*yfd0 z*GQhos3ZOkjX!bys6-oSZ>x0gw@tkwABy37LCbW;e`NsM4lVds1sja)>_MSPBpnlV z?I@Q0xe4JyaVJ@@!yZ*xv@$X!=huwxSCjEedt{qq+n2n>RhNAg`cHloiBpu)a-j9N zFss&x<5Sdt+VIk3Okii;eDE_bAcE4V$W$yn=8N91p4#88`0L#+CIP& za-%S&F^#d!vZDPR5Q=@2d-0s;IJ$-Qr7(*FB>4{mZ$GdMkn92wgfAVxTVPOS(oZ#a zudH@N)T@x^NwL7qh6-KYL6^T1Cyyrwk?|WT8mDN~`l1ZYX z{!5SQAk4YE4n#&@FKw57rZ-Ech|aohYCJxO@GjPDIr;Yk9&Ln^D5ad zjF^uj)xZT8pxt`8&weZeaxv2vUj7SDw%iP&wk?f}4kUif4M=`#%-ktFiirl-83%m+ zXyEk{3l)N0E22(^01&a>=EWhOkDq9BpmF<$-;MwbSREe{#z`Nzz)qqXNTeVbsmyyV zS9K+K?0{X_Fla|HlmUUo4y=B6w=q)wZeWkr8dR4++SMJP2J+32w;&y?)QDUI3#h8( zNW?cU`>}Q96Kj$?pgX=<(47@3T>RK+WM*~pzWU~K=vKNl>b}lmDzkfT+|2OE~ z_Kek+ca0xOE>{;A0Fp+|ulrB`AkS{XIKQO*$9yAr_RZB8FHV18a<`tXwFHo)ByoO) z?*Ab({0H1HA3H_pq~-;bS{=CVzN=GBqPNQ=M}@A}ezm^T>wR5cwJlQHix9@i^+)}( z+B;1oj0FT7chvNjGJxwN+IIbSDlaUIf8OF%Ao#+lG`%59#n5(f)m|KJq49#}IZfX- z-fxrYJ#jC+(OBEOzOj(!%cl!a^3pWwbZN!ZUERj}bO8zt69%yFj(_+xGVxO;i)sBi z8%W)QTSeadi8!{Apol_}j?Hpir`NBnv??gP2f{{dmA@M0Oo!-Jb0xYGs^HgK$nr}H zIKtt49anGLbJ>+Kmv|$B(b#PRt|!?0?O{m8q}RGt)cOIG4kw{xTsf1sC9UKKnkA-; zUckPW9LfO%FRsVt^8~V3Gs$MZp&bs)jdD3IE17w};2AKvf`8`Zn{LCXadMH`I{v87 zNi%vUlK1H*uTxT1hKQ5>)Nn@Q-qRDyH^zg5F7V4q1+--pC3dgFt%Ljxz$Wyx; zA_c9C4jhV4c!f5GqBb5F;h2D5zP63{+t46iHO+B;g(Q?*Z8LAQlF9#Mw^+@w%BI)qvJ*b2X+ekF%iDP_PRKw!pmIr0uIj%7I46& z)12K1{^~73rn`Z?~FJL~UV=z$Ytp*!K^AN=&`>`C?z>^E2q< z{Jp><5#`)I(R0apj>;NNr)m7LIPx9y^C>Uc?rIB1*2!kDeuLC^zw?zHA58$`ddLh6 zda`eW&-TXUnz<^4G4do3ifr@bqe+vvV1@F~+&Dzkm<3Ix|IFO#&apzc1ugdEHiaYM zo@ZFEZO4)~f+gQMds!Vucs*&;JETwilguZ9Y|tsAlB#1Ky{Vl}N-_6l{qRHz+65Q} zcimNy?A}glur^=-=648eK2lUMM>c3Q=!CpK*aDkTP9O&w;bS~@%}ksA`1H0N%_fVh zXd-u(3OLpJkAGWyt?Xtwyx^p~l~;EW*$dZ=k(yE%7|v;a0}E#?o?Ool;;79(L=A9-^=7{Z1ViHk9PTV zzmN{!b({&B=PBa5ucl{sP3hOO94FHzH#y$yTo2JYNOzqFM<#uMD15fE!)&zbI(P!d zYOUdxMsx!YV!qy-E|_L;vfGI+sCjYDLQPlKNDG!<`u73SrU22o2v5_6 zu|entYW)^;WN<33<_b7fw5;nGosK^-sqMfd=NhN@BxTl1+|mL?~E!dR2D(%KSaQ zL?*BxlTU0D-Ipa%iB_x|uyrq4hf35=m&C6kk`Z5PP@9m&AILk-bix$cEkaduAr(q9 zj;P{d=ppXor7)ZGA)neb9smG_R>cFxkMTw>DW?lh|wHLkse?33*M{l&nlKO@(bo&A$A7ZnbV~E)9ZfK_Umtb+w`qwN7 zxxTMsU*s7u^l(S?=98GvFO?;1XRr>KNB)3QT zVHY#Ly=PUNy*tZ*3`uRia#Ex2E_cp9<6ix$h`yT219Bdj8^!v;pM!o#t_6M+w~)C~ zdJ5C~(=uuw7yhhjS%0h+PPGT^e{3{ZBF$gm(Es`edVR(-42G0LXQg~e+jc{UCp@*ur{^e&5B<#o9H(uhiCIp~ioY#g#KK}PrE~aV<7aUXC4we0>;>ABt`S38K zVR*LfeZPBS^P;y8T6%4#9y*h$OI)d~k30i>w{GaN2n}5i& zytiHFfh~l#mBmtbZ>%v)FZw@9ZwwE6Wq;nskQvX?8uCXroIbuXxi033MDg!m@(=RL^)NvH`WxU7k2$% znN-j9j_KUWw>mrl0*RO4#B=he`!(|IxXMPkQg%v}iV(f(T#rXNK2+@4gV-0hB=7NO zbyhYD(b-ed2L&%N5)4{Vf7{eHenkKNdvKMiQfe5@4WF4eLLX*ZUK5v2;;TG)9FeA5 z#an6A4%r**w>;rAeniTv6*?k%8y7a|{EMaIn~=4Fc@wZ8@Lh(95P@qs9}xdB(D2(S z&MqYC7iG@2Y6dc|@FCVt3^+PLFyZcv6BQ5ynGqWiEXc7=)oL1?RnfB5QtMKD0%c=~8S8?H7T?U_ z4UR0vCqb1mD4^J}N+WF&)OzclG_o~0IusP>Nzaf3Kl&AS8zVCwSRn*u$M{+mam_XOx(Yw*f}j#H|N9Z zVNSdto9=EB+?%5Ot59jRK}Rvuk!H)}3;TdCvHCW9LlC>Jw17UxzHo-GGTO=vcVBjd zqY)jdlQqz~b3)RNxL37HtrILGM_Kk7VMT4DV3@Lw%Dk()9a`!+_X{;QuyX{DN}2BF zSRNBXHPgDan*ZW^jQH}-Y{t1D0HoCz*~b_CSZV?AvCD*CBaHNW=GyjK;|IiD=}~4owH_H-y+{@ z2J-1d-Sbl%47xibe@Gs)4Z}TzH^_>dmq9t%#Ob}79#+jor6{h8p}>dvd-~hA%1^QUagf#!>W(OUS}%NyHk59?Ne%4gq?>@UfiSbBam;P zJi1DW%{}3`1pu7XIBV(og zG(F=ZKEO=HrxTnDN5CO%3(A1R%z;MBRk=xZBtKyy3I!=CJF zEOa$&F{-O_6h{ zt2i1~zh861Cnjfd+q)Oh32xv#&IfzfBqTi=PHCdIyBdJQmUpipA!no8R}#gRqmA7R zQV4x(>1}c4%dlxq!>g(jLToHT<|T7`%b36j&|Al2#3_Q8}}u+Qt!`MSPaKR=?i zirD0dSaNKk17cN-=;Awzu$^U>S6TMrf^u$8a%ne< zFvAd;$$-PTobDp}Y{Gj?%nnY~70CtPJC;j;?rmkZpO!257Ic=@(k{3@0Ea$OlzfAkH!|FS*`D-?ARb+7+< z+*9mJ&97`PT>My^8u-zL9tlOuKV-V^KxZFS7Ae_H?AJj1gx+-nMuy~Hb4L|X_l#WV zqy@Y=9*&#okXUFd_(j9&aPD*F#cpaq{4xOaTkZ}{f#*Vm4!K%g^yuRLym-^s_6CE+ zvhA%B6|I)6b0X4odBG~v0}GxDC#+6AyuX{{sJjrgo}#>}(A1Vo+FJ1f_7~a5yYf=@ z#;+#Dl5g>t?wC-k*pQ_#R6{io=fvq`Wm0eRP=A_SIw>;H(Nf9i2JxC)!Qlz5t4TAc z@FF=QBOo9?`))rUF{eEGbFFWjqfLh6x!dV5+LpS*9Ly=6#c7ZEoBv5~h$RrTtPuJT z++o(T7+qw9HI)VlDv%ar5>hIV?m&)c*=DJ`3D!{=CJdHuHCxh<=mI@vp18mS#V|-UK+nNvs5Y~A3EJXAr)VYNY+2`-k($dYl zB0}3k9TcvPM3_&sr&god{BjN@_WK)hYr9RUROx$h4_{`0aG&Q7{47X$4jIJGn9u=) zUNRi-p#j_90%I;b8}?{8)zsFmvzYAslS~!tecQ@rnd-$>A!Wo;Q$*jy z{EdpChzB*w7~3zSXDolC;W9enj<+IBaxkv%+dLHx_KJNXz?i=g6BGyD2` zjoSgDrhzs97qzgh)HJ3#(A>J=hmqpeim8m69`QBnQ2$>800!kxMb-cM_ww;VF0!Pu z@!s~WBs#`SyydH4za`l#lO3D0*RDPr5J`Tx2FD%<+)TaKLb7V5=~}*z=kLMh@54`r zv^s1Wv@vRnc?{iUovLfvk^B@0&X-HADaTayxGkW2n_OV|6#q}-;N9tz2z&BLh-b8g z79)=YYaf=Feh&}b*KjqiU1=@PUjgcLiJEgzI)APBO*cK~#cThBYV?93V@06fclMx1 z+w|z|?TeC}9|>(d#;q*L>_jSp;dAQBQ{WKbTxy&R)iYpKhQ9{O8)UV#e`frMt=1g$ zMO?X)Aa~oQ4ZxI`4KrQNf?4=OxEEIZ`PKerM9`yI;lk5ad2?-x9RhB@YYNQ&2((l8 zuOZs`+=0c^!@np4aPZ#)+H%035_p0d>e{n8eef3&_`e5!`QQJR9mSv>JmrePNsdeI z7)ZkQzt=e+{?FF?cDg5Au2s}*By)CDXsybnrswd-8z(ayp!~V-9ssoCRQrm!juhBY zWR+2Rx2L>Hd6IKh`hUGu_aGPQ_6BEd#!8PJ5#@(KZ0@Y)dHn=9WG?sa0TDyHQ*5 z%yVDOigIwTvT7y4yCkEUr`me=%7T zq#~RkE42Ee-O)(-BKex~F+FyjaHTeSeqHH`UfA)TK2CP(r=9J$jW$#7;UuTx?Q9LV-<&}tm{9!qzz*#?p$>hKj!W3GtF{;2G<}9KoqR^>HqUj@ zYquDE(RZ=46YojaY-`oenR69E9e5b)W+7mM9eRyH)eGVKB{+4<&Pcb>vg+PP&K-yU z#=i7D_jMl1#Ic3@?a5!5XTCtf=2*F8?gD=6!0SEt*N>#~VO`lu)!8EQ%9y_wDE}-^ zB0(y+t=R_s>)~cUagE#6yK}UCFLy_~TlovgpWfK?XJ_S8W37#s>@YNH;8b~@!(ii7|Lru4$F}k!&iuFJW_Mxc5YQVZa z5jorTJbTN8Z6#wshY^?{=DXV?_S$wyu`hwj()`as`F-oDM%cSAx#wxKr*k5N?>Xps zA?~+i(f;UwwHDGEBvlez3LFbWiJ4!?c{C8UvM>k5h^M2PJc^jC%=L!}%H>He*c05b ze%r*f9HC2B{WCQfA@<@3T#4)|$WDaU+v+=p{K2sq_fD!AE=;k+-mO_-}gxESro)@Rbfx3goXJZwA$hiV5tm0 zL>WjRns4o|=0T@F#$3##0IB(?L-=f?-s{YhnZn?X=d-@d3e;d=drfep5S65Cxf`~> zA{y^pVa(_kqpA~ZgedR=kKzS3;xWjBG|~~H^UybwN@f5oKF?5TU-j};3a7o_HN*X@ z;;Jp#vAC*hM7xSeEwvN*y`Xnn`*s?aHgV+(sck#J4zG_GKK+ zu(MT>Y@V1{PYkaAs{KI7)=7euG65Y2KAP6kear4E{`gVSYz(?`59 z1)4u&9&vo_jM=Y)S^*C~ap0jve`y;@2NFl+9uj2&>E%9hST!E|xi20|=N_s)UtH<( zg>#^Q12Dbz>l2Ny!wv3^c7CHzjiGMaUP9T7(}Ghs)8LG}3M{p^e+AyN0@|h!9mfwp zRkHIN{4|V)?Y4b8uo}`iB;Uspl)<1ky|g|@h`%fPad_Fb?gdWNe~RBk&-*=p~Rgc1KE{VXXXVzz3ER0c2MoAsGxh)`Sv^y z$UD$c5LYKeV4(g39DNtsKdJlXwUF_F6yWFPZ6yZ$0~`6Sq4%ei_6F!bRs=AV@8bVf z4WGtNGoRhn->p&w^eQ3o%al?#Bdrq%qXuVfW~167m}ds-+DI|&>#5$MB$^?3iW85a z%Bqvzb)o0n=Y?zWtIDSjK5h9NtKSv~42XXP8fjMKYkxAm#g1Dl-wSuw_G>tO5Ch45X7 zE(v(sa>8Yf4gp|Lg&?r3lj*-WE`PvC`OYvPZHGS?-OIwTY zR+Bxjd#I@k?yuT=UQD}hQPjL4VCQRtml*>L+H1onU+L?nu;lA(-SeOCb9ZV6QFE0> z#B&Tu{^dIRH7f6Telilz-B4(`;ap%_20ydl!HM|4QJwFCxILn9WEP)4YyLp1F%@<+ z@9kL8<91%{w)?zP+uGNxHv{t&_WhnO;GfmEn|p>|+d_Oc$FaZUr@O1nBra2B)>K54 zq7HcGBl|=#tA}ky)^lUKLHn%xuF}U9WT!$gr3#DYJ{aANK>b~+lb`mAfZysad#;L( z#VoqDN{WvmX$7xAYd1R)K~tEr3x3bqTj=PL25Y1wzx^m4C(-+H-8mqfA+ocgV&H;m0W0$Esq zTsD|e9Pi-2{M2k_``?nVY=WgE3FRMV6~1aOSz#f5ryN_JFS_xt0Ur&ZG@Tj@eR_2C z)%wkMs79q~oDz-p8wrm(G}oNUerSq5()yyy=S((O_2a?UNss#2jv}ovw~fat{P`c= zud?eFxLqhu?nSvt^-;IN-LobfWn_>?I6360f7SwIy=C1m6@K@{OYg4-YJ&62eo{We$kMGSWFiWBl?r$^FW8~oD z+0eID*7rE{YUKUqKOTSebNGSf?p$_uQk-CBRT$5nW%q9E?GddlO)|jp^Ih{;u z;#?p90G8+ded0C72MT^f+6EG<*)f4xA&XD$U)GLi?LUQCI%azx=^foeCOzumieu$x|Qf&R@bo$T=vFn|D;(ZxH|OqU^3hc8 z^LP;YsFw->t4`tep3A`6FIe}5uk`)wov+|Bq14c3iRUjPchR6yk2Os__UB{vdke6x zS5l>VvB}m$5xoM`_hm-s_GZ?pYCfI`f7gl0xhqBl~;zBY&BeZZVXuG5}uYKFg zoPYPum0O`n#ursHgv%6-x^KdvzqL1cZ)!(^HOsrFC%*+EQlF5b0GF2Y&EI{b%!qY< z9_YvFIvUpd9Ik^tyS(T&^9vI3%+8ZHAC!}G)0I2c@8W6d?YSxRKet;D(w(HkgpKP-~7g}6Lmcj-9{ zou%ftw-Q92P`Ac~i@Z-Tq(yb^b~qJxR#`}Y%8eXtN%y(BrPliE)86l|RtcxctqZ&E zMk1A07v0FOO&-3;XKGtSTg=#n6>v*Jx2q(lhaFdL zzf(1AA--bX4WMR5xk=mBIm>jo``23+T0FGf#!$sZeFfYgf?+$s5aOKQgYu;efeF?z zii9A43VdOZR@rjrN_bwB(RC~dZ`cf&#he)WskF0vxKb}X4!0#>jM~MG zcZvbzqbK16$teHHYoPHmLUj)N2d1AV|@Y;NS6?g>3^p9|KEy)UlHhc*WF?r z)c^za!{wo3>|t%`VIytjW&`{Ki3mRw7Z8>d5E0dXC?+i;CH+X0Pxzs2?O*%;zZa;%?1}*eKx!)5|CB3Pg#I5$9aPK! diff --git a/doc/tutorials/gan/index_en.md b/doc/tutorials/gan/index_en.md index 99c8d73011..ac9ed37b22 100644 --- a/doc/tutorials/gan/index_en.md +++ b/doc/tutorials/gan/index_en.md @@ -4,9 +4,7 @@ This demo implements GAN training described in the original [GAN paper](https:// The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images. -

- -

+
![](./gan.png)

Figure 1. GAN-Model-Structure figure credit @@ -111,9 +109,7 @@ $python gan_trainer.py -d uniform --useGpu 1 ``` The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution. -

- -

+
![](./uniform_sample.png)

Figure 2. Uniform Sample

@@ -135,9 +131,7 @@ To train the GAN model on mnist data, one can use the following command: $python gan_trainer.py -d mnist --useGpu 1 ``` The generated sample images can be found at ./mnist_samples/ and one example is shown below as Figure 3. -

- -

+
![](./mnist_sample.png)

Figure 3. MNIST Sample

diff --git a/doc/tutorials/gan/uniform_sample.png b/doc/tutorials/gan/uniform_sample.png index 4a96c45cae82673f5a1df986f2643a8026da7937..e716c48e782019a757bed0cb443f2ed97386cbe2 100644 GIT binary patch literal 24880 zcmdRW1ydZ+7G@v=4DRmk?(Xgq++6~}CAhl=cXyWrcL^jo!QEYh2HEDlw^jQWcB-bR zX1b?u-`jo8`Q%Kbijp)U93I?<4<8U^WhB%-e1Iqh-XdUV;6FDR-~!+e$VE(60}Q-; zz~&LaHiV0swAhE*34){d53NKMML&FKh=+eRfdaN+9c6S}K72rPeSbr&TM~PI_+Z>3 zD*XaiByDXe<88fTDJLiAN=-#3ii89Vh71ZI*|Y?0 zw|e>D8ls?pKoFuLC@QEZp%lnL|NEQ?1tqjN+6IN{KcAt1BuGcu|950QJg{|SiWdS4 z9QA%MbB&?}yv|3M~|BA)G3eqcV+e@{#TmJk(b z606jxKoj(RN_JT9lwGJbASWiypvBP#dTTZc^ua=dLBT!u-sXf_;EDRzB#=Vum>pHC7+Ec|*h35UfXQzRW<8ZY!D^Zy(X8z4TB;2Dmg9$6jJ zI(~C7Nh=UdgC4%j-4f-aGXh~WIhTjZi zs-B)a4HhvkP_K5Z(TXzR>8I7IGVoXo`Ai1wQ^0A8c!Zkm7UJB`S7rCcQi>815*9t@ zUp5}0Mh?uYi+z246^oJ|D1X?@Qax=3BP$dod)B&NtSi30zH<3{{e2?Ibjn@SIUfB+ z?Yc9JNT=UgPn(R^Q(b4+RoPA4Xz@$RQbkTc`5DQm!4zt(-7PwaO3`*@A*Ab+K$XpE zf`B|@C>om{I6D67!uRQ~s^w(v8JNpe+-$yD2Ts$ueQQKD27!=oPPN!o+zMFI_N^h+ zm~^LHBEhf43_6X2swhW66I%n}nA#DpebM9@G-|~WkWhB)vg1}V!2A{0bw5Sb8ucg% zdf#biWtA!A6h1%RRAnX#p5SqPma$)ME~rp1n^|ly`_yEx+ppJ&&1|?>D(kkM=@Hvm z$gbzL;#Ht(VUe*oXk2PK?6rOIZD|({3?`|yr0W#lUdt66Va~)vTX9t{Rj8oL9E?P- zx)XQTHKrV*)M+&5u$`lk%i(KjU+eaN>ptp!o)%PCWR-FX~TYvDzku zh}T^7D=mJe%_XZsDs~tH?Rs^`3H1XALXeaMPFA^zw37!hP`xZJz}(1q%o#D?lJJ=eL|02mmM3eS$bjeuF&7QcY9s^!!(D+&2TAETN4B`W+oWLvb@v zBZCDSC=r>|r{R&V+l(V?zx+F2KF!>N0{Q0(>cicNgGPtDY$mH&kjNexupDh*Yuw=% zaJ~3IHt&h`a`Rp?tC{6JI|acZR$!?j{$A4}(XE7lg$VuR6-D8SFftccn0=d3^MeA{nknpeo zt_W9&M8gySL)=Hw!HxPZ{`v_lcD_1jzD6(prs$K3LR*E)!R}ny?!7Tvo9hM|C-EY zuk-Z!uaAhlqXP;vboU%L;R*_f>RRC6Is8OvrFsEnoaGCLCv{faR`s=>9Q5pk3BUQ8 z$kKozFNXGJ^Y#=FOaI;6u6eDZi85#|{$>xXXCoqW+4t9%`=f{-nsH%BXM1x_F%mv6 zPbqX%8$LHspC~mZd%JtWn?@D2C9N{&m5n%<*%=!zKGbwj5N|vEf71lck$B9olSN&o!B9R29lpbpoIJsSGZgdH(iL zw3#F^9^=NpyR#xlC^!<{OyXE#;q_0AxVG8rUEylsNRc$^RKk@iCRAc6j^?&Clrdj# zO>YL==G3CwvCO6u8BWg&{LB)ds>4D@=!?FtCQ1d&@0u_Q#Xi zEaWQHO5jT5)8ag(In9O<{_%2oDSx@}beH$s{Rxr&h^DsHAKbtrBZ5r4J|LD)e^?@I zACfZ+K3?~YSo^#7hgB)N6^Xdr?}bjqk |6?(2(hcJAWnD<)m>0t*BjdFSGC!8od zdA<%QE03tCD66UKzdL)Mqo&SR@7L$YBOnc_E>p|ozjO}%Mz@IloWuV^s>!9?PW67q zD(}xQtZ}nZf95ogh9E+mdXUb=PT*TPgSW3 zZG_H-p_t6bI*Tgl~3i)Td#ZGZu`89C3TxDD|RB<1J!_#c2pFU8^8fBHs2%uypIo1IzbO}9-d&fN zov&(>H|wk<)2b7>IHi35Y!h@-gn#*&VVgzuWUe9-b4p#e^-`-+wMe)|?}vcYGVr7o z!=iT=b$S2b|Lh};4ve2WcU08%;WD0O!`_+ZaVT2LRGKe-rfd=d$}j%)UO4)sT|6As zlu5#PB<}VSg-EJQ>1C&1HI^5HU+HB5&a#DqrNV};#cn~<@pT|D7h@4ArNL~3?!8id z`$CJ)?V!PC`MbPWF-tQLE0&Gux9eE3>^F`p(wv;|-d}0TtRro#Wdu{vCtpi^|2<73 zwL0zKJquzUgXcKLjQkm0h|KuP2+*$84hZ19=SePqF&AAi97wz<`IaIRty(E^$J?V5 z5L9C@Xsh&bl4P-7ZWC$RF;jD%u61w&H6;AKBOi|qh_nhbu+s~k1oGNAVJktZ#d@ZA zEWIvQ^=J1#2luCcU))hP7mc=lHfO{l5j-rISEV2yF@sytAcW9=r?XPe>U8-qxLPZ9)$(_93VNH%s#~GtUc1PNv!!lA6 zm-Q+J_wm`>caR5vQzh}wIcrJL4(ejF4W(l4v*22*)1RwBMVZ7&&}^xNbb&AGjQ!Bw zEdf=s1~J(vk}UC!F%A*iJw93 z_+;~?yQ9>ziR2w9N^*v^4dajNT^;2L8SzsL__g%{W$4s#+fza|pVtKLE*izJ7b`rv zHXDW({Silk2*m;Y`fT2KV48VDPuq6P=&iQ?7_`y&@3Nhu5@Itpy{woEYg z_4#c4IxB$ihNIx|Zo;(Dg{(DKfzQj?UD*E<67f26Q#diD zl73jFR$Uac$6r-J0o=ND^Nk;0_+Rc;XsXBJt%IlyI|q|B4Cag?@|& zmo=ABw{TMUOAY!&a|0@~p;)JW zWe|8lvb?52Hx=V7QMJqMgwJ9;6TU5XtXHvn_k=~N^+&j6>R{B0BT<^cin+vJrPP?< zvu--?`|A>8d8xTt*R6iDZOM@N>#guxlTrgP8v$1Z_TD6M5Kq+GaC1zg-|Mra^loL?(L~1jd;W z3;%ej^B!GW7NtS#M&%!Ba2+RtKu)Q6_%Ug&8GlO8=r^nFjeFJTO@dN*g`BwfNUaJq zX+A3#JZU-H5Eg0NNNiMH`Rm<^gpvmu-5iZsbeZJ zd9(ZDb!6Q#R|x+ZR&F#FBaG6pgd=-+qm}YrLQDf6YP@dqVFdQELUv^50a_}6T(^^) znvn~q_I~+L|3p)&BFy2vp)=BceGWA?^^Rmwu}f@!A)RSmOd~(B7x?b5mRPD_6NbSS zdi0B#ndEei>x@iNMJA2u<3t&idOVT%&joIG$}~DmBjio_KV{@J&q`46*BNOT%A&v> zrDWM|)N@g8Gm9|9_nqPA^%6s&cFb+VnrrTMxC_@ zKK$K~XRs(-xbv0Eei_S(u}KnFQiN{or_*F=B2DDzgiI4SuOnA7xvg7KYc!3K7k8R7t= z$4TUWs$vF;N-vGJ#y!(g@(OrHfNpWbf-FqP4y~TT1n>y3T@MQ&0br z0a#>3O7B`EX}nO&2#(gkrs9-u#e&Ns4L49N0*4Bud*KGb&LNO5- zgrNbubdi(rQ*=!xj6P(sSx^anbxoeg6OJTlq*S2FiFilMy6KL ztFthqsOaSCWQe1&E^xDFxZ6syi1s>x2?W1JIPZfC$d4Zn=NX~Y^h`hc z#r@s&7DRwTUBCn;%o%EoEsVt2806#oO|9!|D5)D$Ezx%V)MYo=g^eQa9<@>`BXytq|e>+$5i<-lJc@jEROq&}H za;@E0UcHn-`hhyGnN$+RQ2-b?L41Wgjl)tS)}tRE04BcoR=N>ls8grK&7fL2K;B#1upi?Uq+bz_p_V~RVHcbjO zg$mo?e}we?hg6iwS(os`gNWuIQM2{5z_>P}(JQs}UdisL%WMd2=SLR2_D=_NGN+Sgz2TvQ}O008&eWW#jEJXUWOJf~rWb#nbh#i=#o%{C{sRK6`3{TNCAW)y$6 zmtp{mh#1pwF_scN#YJojwq>5HE@xI9p;!5=8GAGuD<)?6q3Seo|7+v*I5(jHE?VMR zAb68TnpUG6rF)=&OZ`u?`1SR5|4H;0$2YysuC9#-{~r!(*1u92wTFyKfR!_o)Z~vs z+ZXzL2T)mww#(kn4-G4rEoH0;LcU0HS*epBMha^sqawUC3d9i8dJ{No;9^_^O{@PU zvlL(}Bwm>jcppr(JggXN5YMs95v#VnU@w+<4}fl`D1GqVP>Ay$=!PYb$s}5pWWP~y zSMvPb9%9$+`U;W5>oQl*hLdxZkbu&^AF_xyeDQR7>1g_5Y&Tt;fJUtp6TX!axeE}J z#}@!a=NlXjfC7~YwUW8XjBCbIK| ze3g1*eaLbIyb@-LWiLKdHheo=Z4>@2HfYItke_877E3|uZQq|j~CwU35 zu$5?IS=@DM0Ohp@PzBR;0BT_{LNkN7fOCi^4I!8M8!GUT3}Z%oXkh`_IW$9zs|YS|Em5uBShNEp2^;?G+}9L% z?EZAf5%Re@a?`gH2>U*rx*klJUuq(@u#IOe#AI^*TeavE@_+qcSznC9T>EP{IglW_ zw=B<3!et);dYX-H3y2w0UEb8ABh-OrFwU$kR+G6&J^rlbBudeV@w2<58(VO4TG%e6 z1Cmk!li!|SUXBi@i+JGuX3I#piaByfhotmhYY77nCW%FHuL1?>$(eEve4Y z{|vyOx%H;(XkXsYOE_)wP^m@kw4%v4h-M;(B_l}X~o4dO}m=mh6 zcb}2nSJTQJP5k2%4M3boKrTXH(RyaXW!Z!|)#TcAP{B-d(r>a%G8Rgp5cnbuq)q|SFOnoC z{StHE?Ujq`9c(rvxv@9(@eer*5Xjw^=Z$n%~Upu39=amgr*5(Hf5>BH$%7R~+CCCr(~_k{02 zuybW5OTQ`gU}61@Or_lwad0mi96$7Hu4+Ml&{5AqP1b-um4;$r`>b&qpnIa4l8ckc zhQeC~(xZrk=F&R8{|cC5QSofuVLxy-4`eliHu?0zsDGGey{XjR4j)LZd`_~f3yomJ z6cp<2db()n0X&5O8<}Ct_l8;O`>(XMts8~-IoK5ey+S^1pDJaJ*VOmzMWyxIft1+j zw(E22&J12M!;!M!*AUZBgvia&fnGnP&z+G197>;dM?dnkJ_FE-XI(6TL5FYb8#Mu1 zV@2FI?biVLk=r$f2Hr62Q1dvi7_y=9EUl|0Y1F(05voOa;51jJHTsV*Y^2O(D?gZL z3&e}cKkLB9Ee``c=9k^mtB;8nDllZPx~R)t-bGx6GyHInWy*OX)!i{aX#}trAR1>j z{J*&H$b}&9Wrq+k6qRGh?T zSgq9n`DloqM(0Sa9H(y-KlcXEmVsf2CvgQBzI6bZ@O&n4rM-a(lvEK*(=7z1RSu z9m*UcThJ~`>DH>5?EAO27@dbvG zfxuG<<9H+X{!j&_!!pzC-bN)@(s-eO`XF_x{gB zd?dligwhgWC@8^#BuWD_wSP}1cL~X6!j0_Hh)>5=L`a`$YU|%E|r#hW}LQJVt?95 ziG5aT-beee&L)%1b7idQo3=e9zssg+I4-R+=*b+Iao6UEd&7q-gEBiR0mgrl&X@g` zih<@#=)YNY`3IlF&)H+(8h1Usw94SzL~VdUfQ~rg>wlP zrvnKhLd036l1nqS`#d^_9BNf4%6qqD~g-dP{zFH(O1*E4r>O z@w)62;P`2Q;N!1u__pb;CGBa00Mz6K4T2CSVR1VKrN4bu@A1)$h-Z@YTUv%O&rCj&_wzTLHY<jNBCz;ldr|_v}>p%PRo_3c>@BMn?^P{%ZX~qEb&{G`Rb#lmIqM z-QrYqE;xuuf)+vchRV2er~H)k2iBX%OB+mh7c4@jvU2WcRpuosrPB-#9etiFw&b{X z;9N9%X|DPFd`;%QDv@4(t=1RR;*{PqJml!9-tJ1vT1we|i}xe*7hnrGF@3b)5$%0> zpO-fRFbpaf5YQ1U+oOL@eNCd%sy%krFICQqn&L;)aM7vNpS3wufqmJ&mQ8>V!~=3V zk>CrNk2xp`i!so83Hk79XMB*zKRZP6-DlE@1nf{dpeq#x5eSg*7`U zGo$*jfC~j580#+dl*Lc~cb*tAh4zNkd4!?_4pVnFi4q<}1aKOcDj*K};*T&$DwDZ_ zHfOrO)*{Pg);qn>w;o_-^F@OX0qvd$&P}5sF?4RH0f#SHaCxHWPNFN$C2KuwfP&=` zij3YhUsb#R;k~ z98DAz0N(-|VZl)@PpxCK-idpcz#^IVJHtf`X~J{Ng3)3u`H!P>3X=g= z!vWhYv42d7^Ij&9IlU37gIO5Bhma7AkBjnYOf}W|k91!IFk+K9?dEBA<&tR%PFLG* zY6kuu|B=t&ut~zwMEK)zdn}Au=v)qWEIF4Pw!M+~Tgb=LNH&`XlZB?()0gB>}523HZq zY1Hil5Y0aAPCMf$H!uM2I({z%fM1AW^f+l1rD-}-Vq#(g8fz^Urz^4xv?@u2p+4%_ zso63Q1J>sAVVOiSO!J=jIS~Hl7FNI^Xn2{FQZW;{V-=?NkF);~cku*M5r1|l=>Z9g z^X`wz3inr^$rk5l>?L4clhV@C3hZoe zQW0BoY~bRL#hDJUowK&q)(LSsK|Ne-K%78g?$h`O&uPa6A!pNmo1-|-#=?S9dMs|c z)?|h3G_UQQoJ18mn3<4*mR4WS(gJvtIOk-?GdcfoS^P>Z0Wc{}rSVdPM*0;nqG`a` zG>4a?#yfAo~?R!sLpOOcyr$363~@Gt~e^VbVt$S4$u6kGpt^B63vX^#4Ev zhlGbuResW&G9$7zxjon(n*E$_TwD5l?*DLE@btQ$_vSmIM_)q~s?q7`wA}rYqBRYu zNg8{}Olw&lJTqm=%0M3Yv{KKDd^bds`K^U^B2(ia9Iz%;?F%=h0XCfJ1kxUo3_axQ zt4KR{Cqc1p9;X=L_^mCIpFS)2^so0eJ5te2kSpfRRe%=j`>+*Wk20qcZ*{6Fmx(b0Ff0myx{r8!I2~;Jr&7{vB<>UIJ|VLAf>VaD zzfg<)N@~0NStVW8bdj_)KXP?4z=Yh9)tik#7o!qG%wEWP%gP9os1!-Lovs>fd-LNl zr`1`XXqr)hU5Cs8^An_I-VxwdK(bwb#zn-r45knJ^Mdot^g|lA!8j8GkbgsMR=(S7 z6w9CjdL81QDlKKG6JSimfWBGPqgAO9>3+5>*_pM?<&#SR&}9^m!fyg_X|_H2N;zV* zU)JURP6~$;%C4JPLqQ7_AmZ1GtQ)DksQOFD-RTgK_tXS6icf6#Uj*(QP>kpN3$Ix# z@#T2u$rPBq?@s61-RwTm>c|1YH7(#Is(Z_$R?eldH|GLDNR#B#kaB#+DU0O^=lkHs zdk(sF0@=hDZJlHx-1qi>LNH3XYBhVP9oaIj>J<`#{d`7tW+npo+|bN{&`!3whnWU& zaFAFG9fgFBgjQN#Y41qtop#{#_i!MYRfN;Gy+}tZ=1RbjweL)!onxwMc0_M7EdlAS z;GEb`6OPF4mpf_dI7^2auj}1#m_yQb-+ver-b=33z$6d?ct=0HC1#fbG?rEuBR*&4 z$Zb%!ub#wEwH%1Wp6reg8gvE(_(nqZqXKo1TCp*={(UkaQ%?QPFe|{g=m-zgmJ;;5 z{F2FK&yhqNH4x53Et9SWu%pE{Fk8d1O&Ayys(rTtih2I)kRB{yvsQU)Auh_Q?fyna zCi$AR`U;ft^L;r#J^)5EHh}>KleoVzm6!XGz$s^rs>4G=)rO)<97Q%j4Kvr9-CD>w z&c{oif%Pe^Q1|#4B3NFu3Aw7cMtfB8_hN(a^m4);d@PyW;5LQs-5*VI`JQI@d~pEV zM>q}6DdR9zYF0$zaO6pl+gQy(<52C4glKuH!8ED_{b)74$V}-TPar>;=d#k#EIxfe zO>HlRW%fHoTh?zUd{w5rq+kWhGY0W_r_U9e)3N4-ytVeA#m-V`>`-!N;W@m&E|s7u?ENHdbiE`R>&4L3cZYXW@z$zVHyW2NIxF@&Q(vxCDSSN*r-x;NEYMEQi#-U=Kw{+?sqk$b$RWr0A%bkMm={@$nI{n zirwECFwdbRmwR|15kDoHnM2{mr$1F`6=ox0b3eX#e|>%^FeI|s8W?@g&uqPSNy-Vm z*!Q9nWUFeve4dVw6SVs)@?-Wd?e6wr4ITxrv0u}>ANp5Kijt^&+3V|90^&_JlWEzH zVoR(ay&f)!Fd4LisFWQPi=;3m>i5LLHuciyfI^1i42LfkvaVV9>CkKtg2VD5btInG z$zUDqb8QTB_0vWhcIy!J-ES0&?;l}B!>*VoG>yVyztIhSWQ`ZRuw1@&IFa-F-2ozg z$)sQ_4&1|B&!5i;0pr)ckklEAJHtWLmcama`6w!)U#Y4jE5inWyrt`Ruomr&!k0YI zobN8ZlDg%)eI)xfXTVf-TL-9~S=YEZ7)^k=YBZF6wQN#r_e2k8>)3_7Eo~3}Knz-6 zAi3v_-*s=yM)!L)f3EA*Zh0de^NuLLOEK^5-T!D}j>dWK=a@cs*eb&f*sx;iIo)emlOu%3~5xVfNa zrfx2u8|Y<;X`+P07CO6Pby`B54<53C)baTN6~LyP?q$3Qu66O1F^hJL_R9##tUDve z@Ok9ChYdKgxP(0Qm;lv`&C5JvO2MU|K&^E8hrGIdUlt=Ny0pywY@cP}rCO+3!br73 zEi!>!Zo^QADM%{Ji40I!{*gHk%5^d@&r;_#Yf9q@buzDY+;jTvU_R>{ueSA|^Ijs% z(5e-O{#DSnnV*Cx1!FxvJ2 z=O!xR0d|avdwz_b{guYpw?fQeBIaN0e^JUW6#G-f(eXo}$sj3l-k_BE2C2X=+$3>h z46>jAE0P33ZzRu+oWfA}XfnbfT~#Th>^sk^q_53vGjJv8r_E&L!LMmdIc0d9QRdx{_ObZ)BEiC;p3rq)9`-`L1(O;AL=A368;|Qwu%?~XG-KpraJ{}S?7w&?YG`0AH8C<_9t>6 zmFt~2Y}8-~P$nfcjuVH``vVBREHy^GKFKq=;5i;Eyc^|$l`HayVRBa^zk)bFxba~! z>JBHUDQ5J;MWon#nlp&#x{D^=V?mp3vC{$o|68Q(AKkbPIjESy_Dfr_cSX!tZ8A?- z{7+>|L8C?Vbhj@*>U}i&KisaKvHYGNydvbfz_cqjWP80l z+2o_Yrq9Fsqv9gbnic=Sybofg!1xq4HHVNJpDj2N-TNub}hn>y7kADxR{rCW+ zM=6&)h(tFtqzeM*{#b%f#D!kabNX}jCTesB?TsyKg!8o?7f6>Vw?*e`D||7fC~Q2 z2tpE-#@&CLg=QdXgT(Ivbu2Ph8o;G^9skZUJXe{Ow}yroiOY$Lo?><^;HQU7S}z*? z=I@c9&Gv=_Ia6W|SWGb-aDQWh6|%Sz)k?-lEK;I}8FNb4SuyOt=8@GsW+p zeYf~xHrt+-Tm~C=q5VN$Te_JwUkEEH8a?2gn;DM9uK*}r4v+Jdo`-kGAW-(t0OoZw z3pu>^a?A1M3CeciT<`0{B>`LEc_EeJvbs!!fJ@B5Ksfhl=pU9BCIddjvlZm9vVBRw z(T2Z$>kGY3*%GEorSeS-aIB&F-Yr?p)fovt9hK#k`@f~5`}#iarL|hIQ8OZrw+FrX!3OcnZf&0}Lao7ctBqkw}^r;%dVU?u{U+?e`YA`+ci(cfL1@SiBnd1gX!@0cAy8A0$J1soUCXRq?rq*_9uim=EpUKs@Y5`05``=r zpVu{s+aXK%*Sl5uGUZ>-_%@B4w8c|XD(N3Ojeec%?MX5No(l2TR7O3ruQwF8?3z+n z0KUP>1Hq!`MYzZm85NZT zIQ}A4Eo!#w}*WR)J0;+KpH zcrbzd9_LfNfgg{uzI68m)i49?8*@x0yP*crYh29$E+K#YB<{)CqK>HucuXV4Jr$BX z3=>Tt*by+hld` z#C6NW+(foZ^l`{F{_8!_xhl`0`XY8Qrn`)K znbOdmKhB?buQYPy^hHlk``mCGe22%yu!*xDpgeNgFV+WYR&NGNMHR*{D0KTg&fxi# z#FA`cfp}0V(iY5O#$i$07|H~^c(5@|4v53}If@$iJ}Vzg;Ep68V380C05Xv{3p4KK zPnAqO&dMX>W3dS$kmtw#c^2;|s!+g`Nc9Q+8tDLNM)*0RuDQ1b%oB?ZMs)uqFz>~2&NwDRB?i5JqE3sN+=wrBfZehNrQYc z4QkCctzl=YfxDz$XwqF!06R*#dKqbGP6D5W^$?s;-fMEHf~+{usu8{uxjQp+ zq@gpEO2y1yB~@RDnkRY%wSq**r#-Ek=}87j$k!aQ(@PqW3u7~(+;6jwbKX3(fQi9u z1TE6n`4Z~+?+#I)j5)#%ZK&^`QnnXE=%W(SjvHrUe+(69v7&EdBQK6f@K~3bUr}G) zO0-uF6cm&zf@?2r#}&;uB-e^IVPlxHbyzW28k4^8_lSKO=z?&f+=fX%&y8Z6nFYZy z1FiuDcZWPk6A~`a_OL~h=I%2W1Q?+CeiA)irv!8~cDs8vj0cxJ`@X$0y6*nm$R(?- z-Ngtkt~x@oEEFeEA}}I&T^Pt~%ug-x*bodJI7y$h0ZEGX8iFc6(TiVn-Uz2>f8A!N zkLofn&dUKZTO&FT<63{eVW{4WdYi`u6BDmTIR6bY(7z<;4;V|;R}?Zi(5y&myM8|J z#P1yS1$VW2@+G>T3xHNQ%F+_Zz&uESfC@VGbbIRBpE&I?J5zFhbo@!yV2dZjtc>El z^?_iXjDjM9PRlR8(Q-U~(4=#FLE7v2fs~02y9DB+v&u*uHp9HwfC~P>%=7kZ_G&fHYg0~1(zUBD!X1~8u_!Jh|{5$i?mrXDwC+dKO^m|D# zXr821^4$4@!5Ia>p$7a+1&jGe@8eM_zkb<8d=>xxx$Wv=K7tGR@`f#G!(v{jRRsm) z8q(Wnk%cnki)7I8J$rv#uaW*v22?S0MfLZqYdWQ#*C$~E07`)&qsmne#5iS)=G&m z4Y%W5sTM&`y}sxj4Itx_idb=D$G_ff1qChU@WJAF4K1?pJkT-~U-#8jTexz1^~aFp zqk{lroDx+(9@o>~M`9+o^Qjy*)AgN^1DcPl^yCic%tZ7Is#5d6TbkdUJ}W2*+|l-) zvUULd8UYb_bMPjLff!eAa^7e!%~bSF=>=bW025GGwI&M zT8vyWPE-}*X^G{8^Tj%%=XOib&VVSNizUAAUsdweQEy8nlQohQ8Vc?JhY3J zDY_wDx7Cq}O#5F2ww1@(vMs}YsNOi()9b3uY!^xvM}YPA))vrJ7#j61rWouZ(Jn;4 zbLKapr;zhi7h*9k&ph6o^3qGvLq2`AZI&Z{F^KCunjI;Z{Wbw$N((;s&!j2LZ{6?Y z{93d~5Qw(i?WEC|OxPCc>)Q)|gz0*3mx3f-m)(MUtF+cf8~HFYB?D48SlI7bKSE%? zZO6?x2^=tr1lKw0mkN4I^=XW1kri7@0VdL%R` za8V`|D>VCCy3IMo`euK0k(RTJNN~bLv`Lv=ku*Rr0WPW|Facx#JQre;P*|&00FW>p zKJ-?tq|B9$V*rFy=mDgu+Gw2tzx4gZhE#6Ry#if<-(Zpq;!_6v)}S%Kvfh0C%^zg< zMJ?GeSeE2eH&`T!5OmlNrltzu{*?EHx9-!sop4DRmPG?tiExwBEvwOd)Io?;lB|pb!0m>*-%ok1 zo63$%2Fp8T0r2TKrkyLMfss`9yE)dmR-YGJ&`)Z-QI%$5lt;?FN27U^-DUv2w5@beX3 z2^1T|$6Iek)|@cZfFO`X@~x+xHJYe;#CX%Rb!=C-sL4_>1}~g_d)JQu69BWb`(2i> zKeQUTa`8}f*>nJeN5!WqOLsMAhaYi--ejakO2v*7N*CS#;O$p`Zm9umZ;*&NNV8@+ z?pJToR>dR!-nbr^znGxB&1Nt{6f&6*9I^6)FyH0~uwYT@wk9A?z*m?9Ef#?)6#ud9 zU^Ob8;A3Ga09|s~Qbu5S$R{b1m^k8%cnx&PE5g{NV(+(CM7eKa%uq(+7V3sntr^6H z8Wn8EdIHpr)D9a;AO7f2yu5gMzxje5X_LvNb=T;~E!C0}n{q(~RURNFqwa_`B{YdV z91!yFVlY7`qf{!hK2K;5X7mHU5jjne{C3)3ro!_F^lv%QWyOFw+1T!`6an`cCEp3U zM)x)?nm1wV-93p1db+g>xBOh`hY4S2x1hx8PH)f-w>LKAkqBLM7yfuoR=9Dqtscer zo?)RFVOnzd+^~MTmf4KN_0USfMf&fNXotp-Y^nf?j8PELf#7yha!~Nbd9~x^ChF7G zhzcJ2Fib%R5`QwbGHIzSDHJp`+38nU&s6h~Ef1+x%{aWhHpi|0-GhU&`Rl!318zJ& z)$4t$vVnPnf&l{a6f5M0zcAwJ6;m0nEi_H0(fXMEOz~*B)uhSaKU(4_)|`=Xl(Wos z0H+Svz7p<>=?uDkGW;I4VE)rlBg1DghM|wuQ9S=UgMIeLhF_HtM+p0Sz{d)N$DRPl zvVjD|dA~2eeE+#mg}`o1uXin@@3om`0&Smw=BN)an3+HyGj_QgCejSuP{uolPOn)l z5@Pke2@PQF;{hX?^km-6_@2Lf2HRU$5wqTRhP6(w*^Fzjy=dClSaV zAFJ(J^*otJ^hr}wd42BMT1t|Mw>Q>eaQ zitg0?k#H_UU#SdvoWYPlV?zm#)2TY+-EEZd%E}5pGS$D;H$81yNv(Ik8Q@k6I*^^F z@xQyhEde0e3_yT6;Wl9lOltCia7ecWK;P~4PP zH$N>TlZv;i8gH5-e8;_obUVR%=^y7@?c|l)|DEkPZEDFM*1-uIkZ5m}pTYJR2<=_v zq*G_unrcb}0_=t2fJ|EarTa1L>F!KkIZvoXnbt{&!do8b>Q31iiLY9_*yyR-jG@t0 zi^bzqd#3?*SX9K|Fp|1Hdn)t*O?mVX5FTqx7n^-A2~7nbj#Y*hxNDsEf6M;3*#$!b zHm466GeypE^=ai5$sN2! zY|O$MfGdt(S%gPKEWYS53b^8}y!udmTsF1X@X>7d;PFPuDZH)`zH`KXuOYX#wiE*&VnIY&l&w@erZu#=jF?f6 z@h8Ee5?c;T?9x!mB&^b%4-9<#)09iV3#a!-=(|*v7IS#g7Pci*v3-QRdf6)^b9K@~ z59JLVsf~^((5*A8r6Fn`ZwGPYBvhXF$zOp@=<8pU+R*_gscms1pJ`-%wG);&ME1uH zl1*B$3y)4Gb zMNl#-d9LRz9_Y=yFN`tuM%ldp>?x>I=8Y~a%%zmrSXfaV2$)-VMwx6DW05%nHd*FC zBQAtOo>c;5AupjIA7<(FM$IX4oZe3M>>=ARlfIIJ_|CB8S_#l-hrZuJ;K^8YnXJ>~S2PfwRk)`)N6*7gP;eK8%}hFDThF%5&=jpA9E^5m0VQek-dT|w^MBeq zuc)TFwp}ZNASwb`O06zRQpP(XU`0s_*c2asZrD!upeN)?eB z2p~QD6TkNxtmgGt}9bvQqv+2qXf)D>61V`Fu1+&%t6wQ?JFovFyw#r!(`W3Pv4k(&F>(SRW+y-N)j+M zWXNvi*AtgV!-xI#F ztuM7^e-g>aT)ZNZ@@u5&vH97NVxfl<2}Xn3<+0_Pl-UNI@7lq)XdI%PNvexSrmJX+ zybjAt*B?dVQb~@9!AY`uYqYMg^D3j({JLr*wM;eXND9NN$Zd zxZ%nrS!X!r7t$49z&l>Jdt;y?Ct6C(xI;fpsFe6fPxbV0u;4{riJOb+hbO5@py;JV zW1lw_+zh?@`TJ(~S7pW&oohBM4@wEH#xpV5&azquG)$3`3P^JrIL3f{( zj`|mb$?t(zBg=Wm&>`Zxj;Pk+!1&F&u5@TzowC34E>l3jV`R1KV`u|y6iaT2_V8WU zyj9)&gIg0uiOW919`1DiM)AsF+ z7dSa4Cja{`F;Byqh6*-jDH4=#d&4@2~qg`F>C53^{4@a6LcV zGV^-b4|vIzt=rc)R;b!&zBiHsW+@8rr^qg{oS4n)W8L(#)fVqJ@C|mYGqU3!&5o{S zIcY;7RQLdclIf27@!d~^PXfAuabU#wTvecmdn0~LE^J2xAIi!d4zZeeXvxlKb!j3k zG*MLT=zu|Scm^^87#?=LFV=REZ{Kj8*Vde#AFcqi1GE19Z%>Uo>`|f+Pq~H7>0HCe zs%FPErGn>93C@sMvN~Y2r_vcM(yte*D?Wt2=t3(Evap%~UzG`}AotCnw89PYcQAOp z;R~8)%E_qCw1e-RorG?&+|Aa9WG~=!GVlSiYHA3X1pNAV${vG`Wb3Tn$w|I!HKF1g z47*r{T!D|*w@m|L7cNVO^`Y#hs$f3OA12)|hA>U#1qNTa+tdp(jaKYgmjO2r`X#(0 zP129qaf7bqN0J}u3e2+Xfz?zYqLF4~^bvg2nXG@${};VggQca@;kK#Dd0v$jsrmdj zXn)4+)x-S!(X(TNLKmxE+it<-UiM$I;6910QZhJ(3MvaZvvxIYTI{pq!BjYUCI zu})~&)#PLz)}xX=byo~gPRu{yO=WP*xYZSI8}3}W@Uv_>RTdX{s)F6wdj8H9Shy$A zoFa>Ta5ZggfAPq{h87*4nY}#|C5Of>XDADS7qfdXr3;Y(^O84yYg?OrnGShs*nDgyByku@fuOkZm1vIJWqz*A!^2rm02EYGo;W;n0 zBlHxW4BOwKZnvcRdkY^#GRc7aB z4K{`g%B;5fB!<+4f@FlKm%+DbkFJ-tg}5#5k;j^ws53}@v#BXr?!5T)P<6B5;q%Ow zsS-Y@@pI1G045Hvgyoi9DW^%~e-kzPjKwje;5p{)s{jRTpIY#(wVcp=7EwkCw4P42 z`R72VFT!1bxhG|n4zyM}*{@H`Q@*foS9>*lw35sipniR9fvCii-D(nNp^3Ek>G^+< zcXy&DQ%=L#AD*r*0DR^sjw*Ejh1DmfThZtE#=mLq-_J?lOue>TE$j2#Vyfcv%*dqh zn@^R(kgb{VP=`&e9y6%r`_Il%^su9btsFV0U7Z;s-RPZBy?WpCR@!PIJ4Dpr2K@dY zvfgneArck*(I+L+hk})9fxDxh+F~;+F_z_2UYXG0N@;$UsEhp22Wn~I0#KP9VfV4g zesj?LSb$EbMAE%G>?9~o)aCxlL@BLh@A;|QLX|a{cV_3HyKV@o!Owo@ZUptQ$m!Em zVY|Tb^So>;vZ0)qc9YedjKZqm?v%&EKKo=|W14a{iWs!htpG(@QSZ7$s2}|IUDep; z8$Z7+qu;ijiREriACGJy$(B{xviS=vqrd4l`GVHc~3EawkO$z za#Yp|KuCqf7Xf%GW7=b{kegpPUSv6rUw4!~D6iu)H20@dLNQK&my1vGgFJE zLp)wC+I(OT2@?}8E|k8u(O)wW#j@q>?{DSuW3|}6e`1(8XEG%IL^TX|2dP54oQ8Mwl zWNCJ2reH=q+ls+X|NF;Hn(`6uH8;^N8)Kxt!U&pdjoFuKfM5_CfxcpiYo~Ya) z_FPkIc-mJUB8%Pr* zFz$eJ<^SwSl##}3nKS?i0tLYJqDCW7Sd87b@UMAjW9xQ-6HGr)*S82W}8)PoGb7^T}I%k$^l66H8oKG+{}DV zeZ$Ejqz`JPtyv0)S=#?Kl}Cj8gI@m8uJOC)5Pzak#$}w8{FL8wS?Q(LpqoTI@fm3X z7J)E=>6m+4*$%_x1ON4+2ZPPVK&;lgrHeq|wR*0c#3}7vbY1;ksZJH!wtAnm`?=5i zc)=_6$e*?Wqh>EbG4RruWq4^briZyMU=aqIXCTb(Gh032Fws{D)7PKOvLu6?s3DMU zbNg0u#lMk)3!Gq+hZ4frg9A;EfM(a_Sm_zI> z_86WY}FHZU-z?7Qp1Yp-a9RuoHHf4cDa^P^R;+CacBR!`^$ z5ldr(0BAn(tfiKInO{UbHN1Irbhj_X}}z*)oLfOP11`Hl31R!E_M>m+%5%OoS`l@gYEh zmT|EDuU&H)%Y{?rJ&+44l(<&%QNJukx7Jn#K&~u*z7!jL6^f)4R+WpcwOd?VT*m;X zY7SUZ$!qEPOo&|8#~`}za$#D09U4T*|DG!q{*ieHgK7IYD`T7({NE2rWTddnBDa*7 zAA&IjK3m|%2X@Q)a0?o(%j5x2Q1u2wmv8<$xs2j?$9{Syt4oWNmBg25HMu-`im6)vv1~0*M679-Tc4fjF%BSGkwAZ4`Tmb z$l%NOPv!!Zl*=T!|9{Z`7nc|CNWhU@S&~`6#h}8PM6lj{-IxFc>WPaf;!*fKyL- zY;4{VNRS}z<$7{{IBx;q9(DI3y;|-_2JtlP>frNvzYB2yo4p5nCusA z-4E2#OF`Q&26FY^9{w{^WA%wWjM=>=Ns)g5poTo5iE*htV2N1+9&|Hu;Db)1AWvTg zfoR!$F(OW8VW4meLM=Jvg9kFjivzPjaX@&+UJ{8!ss#QnbppH)yT!;SPh(|%Nq{&_ zJd?4**Jhh*OfLfAnMbGX^H6?D{vHZpTYZpT%ErUPlU~qs6WVq!IT%cEz;(^mn9T_! z$vE|v|J7?61V9SOC}oO?5D*ZkXFmG94@B~}^0@)n@XsFDAS z1~eZlb%+{W7H)`mA60yI*BRzBGh+uZQZ6tRtA7upR54y!st0~dMt~M-oRg;-6_|i@ zO}E?W&C^&lVAOu8&3NST)%U#Nq|dZFE~vG&_0~n2A!|Ek2V`E2fK)9Ih-V_yz5go6 z`50@uR22gdCp23RkCxEa8q0B|bC2C6Ii+N7dXOTO4l?t=l41~+bQRa?2VgVMlt9WI z!)Db}Ny>fucAgqu_3`!|Bkbb@ajKC>S2LKMQC|BmP5%Bwn$^%maxMPURvn1LbO8!lTBlKW+WNU=sFK5Cv_v+q_S*sN424V5ZV^laRbP#dZMr68IMzZ+*jl+;4LMbwURi z{mx=N54Tl72jh`M^YY--tu7ebe`nG_e}c<)LukcuuuTw9S`USMv?aVw_91yniApZV zX7KA@peVkj9^yXhkS?sk->Hm=$XrG$KAHYb|Ce>^X;kAY`J_Q@8iz$0pI1_7r=B#y z;+ct@h+qBb1q?BJ2Km29%XqyJy;)q zNuRTim`W2cA7XMTn_7hA0|lo0cec-meVNK@z0v! zAR~a`tHP$~YE)o2ig=q10$?QoJQKuI21SB=1P-vmYv7Jo8T0JszY!;VfN4iM^nn!N zs*oJKX!4b0q_28~a>NJ~k95s#^c~7)nJ==R5*T!o((~I(sIO0!m(z(lD+DZu1@fHH z_J8!JU}pi2*K=ZSxnk^MpW38j%bNVkT_iC5pGoHC~r7gK&<5#>3pbOeBTm`Tpp};6i0#v(LCD=ZqN1v(ojq#*j0)z`Y*5HN!`F zhP76E+GU&m^jRYy9Ai=@AGkbPRN0Adp2aQm(7Y-l?k!s#`6zvjAbD7m%3+TGXq%>v@@ybe$B+t6bc=9~AUp7!c%0MlU~wRT<414Dt=Fri zoVwNQx(Gm=Wn0fwE1uIzrq?Z75T&}y1Y}s3pj*lF4dJH~u4yW+m6(C{9l< z@A3#txDGL_^n;`yu9SNiP1&g|UH+iRdv`prD@x#qTrL%AdkBWiXQQ$y8_Ib)gWh&C zC^|)xfh*r>Gih!iwQa=v{9sbReaCQVHNMSft=|6K4WTVP;?8c9H$`1}z~^M?e2<;` zG6my=9w@l%%vp0G_Z8`MevVh{92mNBK&|+E?`|r!@0SOIloz=9`c~B>(5K6GUH(ep z>4skH{Y?db@GI3XgyuLBvY-Y-dhZf&>DQ?pyxW>A&q5R^tc06}MZo+DaJ1(^vpcN` zZHbrUYsE5?#2~My5#i%rZ>(F)hTrOFA)h1qC4vH_>aORI_Vl&k@itiqRhYichSMT- zhTPT4J-p|^rk(r9V4Y%Ykgi1g*vsx1=C_{AQ!m_u+Mu;vL3SK_%=LLYSF21ZKRwdn zMDN^W!Y2E>IiYpKOFnVh9Lsgu7%5NWvfJ&nC)av}*g(V{pq0; zK}0lSk_R8>D~I@Jr-s;-Axz(0LL`uw+IQHAf>Io5{p-yZz$eS)3`0`EJdC@Bgf3Q? zdXr_j?L5I~V5ILi+T+uOV^=Vg7XuS4xztB^qyueZg%p*P1(1hADwRe4l%aV`AF(wd zFM+RYoL4}9>SjOr>%jhp=m)hF-v-;6-QC^tYJo9}{$x2?5>XW-V=qWaNZmW}q5md2 zaJdz=8lQWQW-sgdCq)GOgXJpmRNB+$4`04H)yBenQ8BMel$S--|(~<-d z-x0rMJ0!8E>EfDT^>Y&Z8L!DY5zqcea6txU?nl5waIizdCgJihe3I&7Ak*rMVfC$) zMBUFemp^ACFI&P@yOVjc)~`Ns-PBGL#6>g?aS~M4CIE%N?oN7paiu*|$GuAf1D7~k z4J1^`xXHaq)7`Wzm%WfzxMO5(J;(2v!e3XwF~wEr6yyxyP}Vkr+8ac-G7Lhz?APiS z@=6y7$_DQGh2Ty$coGd81)Fmj*5N2Q8)EYC}9Uk%S?x_2mcLqWbUL;E=EgP$M z!W4ZNEAEo@{uzK_*wvan$(f0H&gNbhaN_2gl|Jpj0&cq26Av7W%~!T*(u*FYnj9be zDLXA!vsc$THcd4QP_wU*vuVAkbr;8CD(*vwB0hfGAe&w--TQR>xt@~P}9{b0@F1F>Eo4j0;O_v!rA{zyX9!z&<9k^J{&@}PLXo+VrckGk(ok&4)b3VdE z;b`&W+)&ntiBl2s!JAo=sG@Kccm1h|KCJe)M`DtkZa;;hnnzLv?)eGn>0k?XVg7EV zu`1f$Y#rE($!FWozHR39!(EDOPTf2?^@n5FLfvo zN0-tn=d!ILM)j|VYq$)7{r~TO2!Oj|GF;DiLzjCN{Hwtg NMLAX3vZscD{{un^Hgf;~ literal 20609 zcmd_S1yok;x-L8cu_zS@X_b%$32CrF5Ebc0KpN>T6+}crQW}(y?oL7JZX~6}mMV>9o#U`1ctzaV1LxA<#nqV0;lxGC&X-MB>g(S-Z%^ zVOJ~Jt(V8geLAl&-sjxB^ZKnQ%~xe3M#JXwscgRZm!w^ba7wVw2&=pkHmSr*NT3;H z%BYk!&p*)5j(IJ2t~1K;rct50j^JG}<{;jxf(2#Mw`VV2NwT@ZTF>see|S}=*2#D}rTWfcmtJ@xBebsS^;ruw4D>Hd?rQMMj#!HXensEy$3_r0 zz4JD3Y)t9DJrrW1wKLoCS-Hq8E+r+!>MJ(Mm)KabRE13Gxqct=8-~-(VPt|fX|2LL zsyx{$@UMUt3y5_O=$Rmy2h%9gdsWD^+Fo z8}|F_M4sQq+h~jAx6HF!8&_4R>Ef&FouE+h-Y^nw&Cchm>~+{^xgiHvtT!2$4$mU@ z{%$oBWec7C^hcgSAX~}oClS9n(vsC|KWC|lI={k_clHML{+An>-Y zwT;YOmb7~OpVmEm?6@;*oA0<|`h8;U&cG>Zr`VJfru>2eL+G}W^+sk(#oO=uO?5<9Ph>)e`uTO%54)#)r)cFohxW!D}A4Z17l)3KxQ-Q zvcDv@DcR@dIT)OJmm7%+g({E^bS;GpURH*|+=D zY@@=%!$}qKVRneg&+tHhU_q;LJkaYRZC4gCVZr5|hP?w>WvOO^Uqash-#C=Y(MNfoaf^way)RxOu&1{-WFZ9u zF}~A}JtonW*mI3+m_Av$Q$)i0-MvCaii}qC76h?*Mv8^BapKHr$hcYHI52$=mDuMm-`yT9xqvyN9_%xNk_S5yaj-3#TgZ$_D{# z%-{e+TAu*f&tkDMAH>ysjZVhC)*}6prS&>IBEnLQ$c~h-8?lp-knkcS>rE{#R&v~4 z)QVE8y7?^oW8%Y9ey$IBY8<38v@%Hn0Ra@fxaxPHxsRBcnV-tZ5#1y)I~Q`&JxYH7 zcFh7U1F?Re5ZFyM$>GCcbeOAnS@2GmqPwY^i6YrKo2%#yP|4<|S^Rc_2To*0zy13} zn>Sit5)wROahf>a!%Xg{C-yezjBh}9&s%Z+jZIBF2Aw*v5l)pDu3kf@P8J^dgDO*8 zZJ0y_nzzqPVzQ$5BR0PFQk4e1OVjsfoWk4vJFN%QaH0sVw%b)s$v#0lHVpA4ZRZazd@nZ3b(Gm_Te>#JSZgc=@BjoG`a@!eYwKDLqwO>quc zmf_(=JC?tW=S~XT-HO*?bC&CPUP!)BijvbkyCmy^7K!iANu`oETKcd{xaVEu-z`3& zS-HM$f&KdO?qR;+%F+OPvNYPp*YA5g@=uqVyn7zHd82z}$9ND8t|qK45;o-9=Is`!J2wsy1yNX@hL}xuV>qL*9 zY)rRwVUwuFCPh0uC>*wluvzTyNN&1l+GjpBIy96f?6^H?Wzs8K8?~-A%*7xaintx< zs`OKGK64*&-cf^Sr~2^W!;+tG$Yn&f^+fl>3QUJntQUIcGD?>NXdgxi$ibRGqw)N_ zKI3|LPR@0e*H3K8scHQLqekXN-U~kT9B$KP>Yv3#);&#XNKfLD#{R7F+*#;HFEZ}U zE`35xf{fSr<8sBmX*onH0qc3$f)p>IuE7)BwNVa!!%Q%P-gls&>c@9CalP) zKY0>IAo5G>jn-tW{OYR;`dH58Zk0%(pY3=zDUhLhBCW#we7S^#1hq!NMtmwEecRwM zjASV>?d>dEKIiJVz}I|8QJ9Ig4VU?1KPM3pku@VVpINL_S9i8{oyBA3A~gZR_DH_v zi*x4d?lrzNBUDy`wVsduv5%A;A+a&4=WR~mk;M}-NLw+Ax*QZaiNu`5BkNvRvz%`3 z@V|UJpt1UAR`pLa`GqsKTA0c%=Ia|vlQj6qz(ub+@P9xfKDCJPoOa2T#gLGY6j*AK z_V)+gp13O}RhJ0c6?teJBM+Pe`QKuR z@mlxw7irhwtJw})hX^M!*Dm_PED*F=oRz6C8RW&I5>kW_;{QTaR+UWH{xMAlFG*xH z?EnMmHCOF-{(I~UVti^f+kUzH*SNP=4mz!j`Z9W8$d&L&IcP}aJWlahV=D79FlPt# z+>hLIl5E8rcN_lIe5+tU;Jw!F`jo@*;g0oK6^2A7yUQBR(rBedfUJs2DumNVaGMzU zO!agKv-dyW^9kstvI%8T`s8}p=h_2NHBPJM&0KO)w2Tl{-)jO9{h1iV&!Y20p7BHZ zci*3d^QKQXuNacxzPaet&3|QRDj+a=1%Hvx)2U96IPIBHzw@{kL*9!~5s3f@uMm6V zL)9H50%V^){fypQB<2w|b^~53Ha0f830E90y;wLU`s<}HrC{Q_cW2Yp%fs2R?4%3m zndu7gN3R%of9g>7sOd4(SB&{ib~PfB^c#lYY8ZCvufO816|SABq2aSvWU4hBxIUDE z@(rw|0vpFX*Kt3$aRm8{Vjwm@xtJ@JqWUOqJSVBQ9r&0Q^k9Jl;lqwmLU$gRW(>@9 z?7BUdB$d7vfBYC!`K$R=uxfTIoBxaJQE8KtJi0G)IXTZbJ^%UuLLt7qynG_8cNgdO z+#sT%D-n`QU(4Yeau5Q9-X)}sog7d$Bv?kf;#NPlP)24#T7Lj(aZq}~wuRTuW1H*) zsvI9JT(Rh9lFzI{QX8~Pqd$IgJZ*NWn$16nl;I5H&P&$egzd<@XILwjt0+&Gic!sz zGFQY|vd?+NgYu>4sn69R~*-De2PLIfG>3 zB=_~!G$(>J{?j@nuq%(}^^#{c)`omDE0DzuRAF<7ByOZL8olj=kE!(BLGkI+K*k1R zL&HXEIsaF~rM3|PQpv4B&C;SqQWA+;q{SJZlD<9TC#iQY-E1{CohsIgs!js*VL|Fkn_CD4&KS|XU zUvxcGtQs!u9QbniLh@5S)%4oh@Na7`#p<7w%JW+@D>09t%^JAh@1ad=n=HLz<(*q1 zTmA-nsGK3HYBI!|iotW;xWe-n#fcohESx$s!v(xZPs&t>zL=`XO>e3~_jM`sv8f5W z$F3!(6?z?fXW8nuv&Ve)h?&>!4BBZO5E~mpd&xH+t+Q;Y(3(8qR!%i5Qik5OR29@S zwP-iQ#CJ zaZJ3x-Oqy;@fBu@o^8(i@i$QZg99!p9# zg}Dw}^s*K{2-sd6_zlhKi2arM zSOB0S^fC*FMnXyH(q$54?9y*M6ifvEde6ta&@t~OHrH(LUt|3NjqYk2EhKxHb0fnqlht1DZ``a z?t|S89*aqFG!6s@2fw@bnURv*=N<|!)a-tGpjjNGHeX5vLcDLTpw~kBiEZ?nKgQ8X zhI}b0QEm!heX6Au2Z&JJ^>9nwZ~r^HOPnUhm#0b6bEC(Hqg(yjeg>;!U&A@I--NKK z89rV5c~4qZe1rqnq(AE>^jd)^FM{mW0}EiV_1bu}LJeBsu-?GHqt{9y(dk9VVAaCY zH~Yz(tAR8$zI-r2AO}5_tjve$fTlMrImai%8qA*7EtkrW9bZwV@ zW^NH|!{ndbY%Qj(!h2HcoJ&}z|P4l=ql1!<$iV_@q z-%Sk#+-_kFD3}dac-}Ze9wdum4vGx<+ckTOhd0e=L-0=cKP<>wqyp*yx=XF`6h6hr zqH>0rNK0QdQ2@wxxS1r{^T=hLPz|O@xQKH}!HZE-rKp$2Yn>_Q2Gm>A_vKf}X`}oN zjvlM)pSi!M$a_wVcc3#z5QvE=KJ!nKyK575gwRn&8vEh!-3&@W8`{Q1>8KPx*Mo_0 zVaEa(pzy=9SW4a5Om|=(v>TG$C)s|EW?-i$pUb^HdVs&&B{xRp86`dnc|Hp1nKopP^+K{>F4LWdpIFZ6YxsP9m_s>KO2Bg>%4sG6 z=T6e<_&W}>ByrkL;Hh|`hsJrvN7FHgbSXQ}VOphKP?ZmI&r*`gUxps}?_+UoV z)=|~SD0OaQw;3CU*s6FbRpT&4>G5MtDp8lRx;hg>!|QZmfWE^HF~mYh-)Qyr^+f>U zEnzw&_Ws_i$)WS$!NG~wm$`_&w-$CFbLm>4Hhf?tWiT3cOdKXMDoPu?`#aJFO;w(h z4qob0s@R`(mbaZ2maB6->#GGz!8&S~A=>WN=)v?2y8M+dHt15U#$oS%;j%$4(Rp;Z zzxDMLwLT=_0r6c)kBs`Ww1A)SM)L~J?1TOfSMOVG^(ZVHY_#IPyY7*D6#?=}QzQTIO z)Qxb~8d4@ECT^HdtcqD;p7@mLR-R|r!?ZM9s=%y}AvKhln0Qa6z_??sD~V15AYHhi zZN_pXP*QdiKIk4Kam(V@U1G4v)D&k3#6*aIdLikC#{rxBMntD&*Xhm_A;SyXyR#b>$pH)pYxZ748{cfciWS(K zDCFQ9`*}N6JCtmD-I>35TJ}N82kr+lKA0~b^!;u-Bvo$4>pw`j-{@?<=NuThyHn!q zKu%6zN6A`hM#sMNfIdpt)co(kwwUnW$7vmjI)@S$*YqD6`}=e~4UlnY`fpjKKmH9u znOa%b&uWomOeU~PWa2l}UnD_xUoKlC^4g?Ws!hzL0rn>1_js*T5!y$p;Tf<(wY7bK z^w?OAv?Cf`K+^8@dl=bINd}mEtC?f?yDC1%cawwMTDZ_v!UnhD?L8wRzlaCw#Pw>o zujhCGfhAVJ@tnjAsb!_#==^Amh?zo^<#SVbqoWh3P^*K5SOAyB_jb@zM~4u&WH)1= zAQetLim+H+ z5-zUrHPM z=3SC|_lh+@4!Rz3GP>k@z$B=={rt#XEtasPu|4e1*I#w9&6jj#dlYGrretP@0_4(H z%mijgRRIsb&Jb6Ofl^JkLqC1o{OJ5x{-XMsWtPAp!}+M1x>`r95Gvc zyQrzkoEz)64{XXdFLT@F-ry$}Yz_`!H}~JwAuq>X6{u!2BZm84?$I=`CCLh8tB>T1 zYw)tRxXzA0I?m%po9M~I!xI+JoS2xXs(|e#33Lx2?XZ9ge*x`8gYfC);nKFCzOaCS zIXxf725#J%oRUkwH!hVuQsLbNjpH8+_6^t!y$!6dzWUGY5D0IS4C}$3Rq6WW67Cxd zwfwwQGBbwM4Km=C+Py23_bdmnRsSSFk+^DRn6cO$!1{LiRk&Ef+>qrnz__^zSu zjJ9-S`{Xx)Vu`xDYL=F{01hV_O*-xIVi^E8ik-Y3#xdP%?(x@2PuLuBu75W|gfGLf zH=RqEHZ_<><*=6F@@KZ;VWZF4V9?y2%NYB)4PR0{YWA{zRc;3tgSk^5AY0-VRm7~~ z%4dEnkZ|FI;~JTmEdXMK_kpbfZ<58hd!lMjgKyM^KN&*>+XqMZPS%G$`4?v6z3DbR zjKH(~$P4FUM)Gh@-hOvyzTcg`F7j8&on;Q-;f%i(-C8l*?>C~+fF@Kuf6r_sn)$x` z9FA|Xng3Z2Z7i3Ah2m1U=w&fhd8^sn^6c$>o@7doKf6Wb<^)-Te}Qbtx8cSn z)?TSEnErmGxi~Z?>T25E3xP!+A7j%{11TAw{#jCpEL{%x*z&u!e`f2@AYC;3pHpI> z2gEAvvLz!WRFN@Z7`Ryyb{j9p{O@GQ`XwzAv3q5`bW~p5K=sONYok&4baBaF#}I+F zXWl%>Bo{olRgDpAFXEpGJZI-K(l^S=%KB?3kRr@fPDI>G!Il_sHLP2)2W4fs(5a=Q zwMgtE^--l}Q(SBeWWT>R$8mj9q9sOD0?^|`~>*V;sn%Yc`p2QZ5A zdxZ2-?Vuf{rE;(U!^dmfA3Hrg4Qqo@Tvv~C(|DsK9GS@m5-S$2koe^DJpoziKC+3LGw}uF^oNs2|tE~3%u@Z&Hc9FaIUstvglIo#gX#CAgk&87dCrJ z+7GVd)zn)PVbCz7wHg8P%TOzo_weut=P{E1l=kI|IG0{)cW=6iqe+}vj*iy@Nl7^s z6=n$mhs`fjTOK?Dwa&E@P2D2P8#4KR`>H9mwGMB%`BZ_@22NbYC;mx*Aq29VT*Y^! zXEBjtEy@1je4|i6l5(o5k+MMUeE@Jte#5jI@5WGUf@J7Cib+6ANjiV^o<1;5A-$Id z>O#v)truCI|BYXTA))P)v&4S$3K{r5L=fK?(nG4Oq9UiP{3D?(FHbhnU>*i@m}=)} ze`TS3XQT@B3dr75YV%UeQ17 z|GVE|%CNG2kfqYK0Q2R!nTz62Sctpw40!b9-x~a(sm#}WOaa|G{*7JK9N%81069-) zA7?*IFS-LHGH%_*i-+r>mEAoG8tM0`?7x*fxVj)vv3Pi-od2l(KPhOZftr2jC>>v~ zXz@^1)>24E(8Q@Eh+5P|$lUQ!okXV_2G(ZV@j;uH1;|Z~%f}Yy(r%UJyeR^+?1Cja*7K zXfHgWy&%CP==3Wq!OuXlA5xbkn=iKjwbB}VdwYJE>W0}AJ$8>ab>$^@8g(zVh z(B0eHtJkkqW|v*-M?D&oa<%DK&MA0;O$8=PQM(uXDz-j8z)-eZ_o?X(X#cqWUTdhx z{5tMcPL{Zu0+YeXja_cjp(0big`y>P*IjSGbT#pV0`g`>tJOH^$=ob*f6$0X5Tb(} z7`%=}DBM8}7@zTJ;%p6pX!koJ<_o%L?wcZ)Dho?jlCAhoKpyCpOC#libNy)dtl6oy zfkgai5i-ai49`{V27~l+RENE7ExQ(5Jw7;1l3uhQ^A6DfR_G$JrIz`I!bg%fqOUh!UTN`#TGv zl?MrwJ*WI{{}Em;v(L(}>m>Xut)uv|m>Nd;e*{Qx@Df1RXO^wMn{J6nLT4{X<778X zBSCT?I(=d>MFfe+E<3kJhGk6rA6|fJorHsLusE6uvK(Qa>vw#*tDL9*#BaxbDp;X} zl^Uq{XyKnPIT8 zJEZ#F@uoy*$YH;x@F5mN#OUE~np`#>`=@6x-Fg&#ATD0`_n39ioDJy*#j_7YKXP7U zO>K-K>Gce=ecB#Ivf)ItB7r)dTxz(G8emO4oJoyXR7`D z#~DFtQtM4V;_=ci#S-GlBtFyjTs!xrx#`a-)dHnv9=;*5`g!s|Xl)Y@x1d)z7fW|d zE?ZFwRLn2g*&94pxLW)n#TNMVyO%@zDd>e&V^N;bLw7dAD%w}V1?F`{g*yC<(-Lm-{Ynhi z@x$rf?5KK`%Xt$oM2k4vlCy!J*nfysw7iSes%ghvPG6=@V0`urYiZVr;b5dS#I#U| z)rwk6c)Ws>iUQDC^#v;h%vous;dR`Xw@UOiZ@xGUr{=!Juwn={pU7*!tj3)cd*l6p z2c19(^XCp_05v~E&HgAQZc5+8Di#ce_{bS!MM(*KM}VVj;_xOFE76a;?V2S;?r^(_ zjirX=88Y|D1OE_szrW#%jw;M*wzRtI3W&`2bJsRW@L13k02#Xm_ax-b8Z!n^=k~e% z)+w5Z)1~#2f0i`+ld7-!aajI%X|--;)z7vNWRj$sZT+lwkIHKk;8vyY_TL@R6l_@h zoIrt$S-GoC;t{8|i6}Q}F*uw5nwc(_aPRF4XMJX|9N&EtHBn}pLS`C z_4kw8=byT4AMEw7yH}MUsx2$5$`IVDVU+G7)wbnN^SV>*;Zfhbz3;GIh_)QGS`<&K`Ap~wlcD{6lPVj)}p zYR9v?RRvjs+O@c>N;!9WdU})#jKj|2;Gi<=-a=;5fuOq6+Fg^uf`3nOn0o2A zeE+?RVTx$vY5ge(T1$h43mzou?K>4a!pqg|mDn{2O{f;YPiOFyUWhFTTzxGG`h*)2-eNn8PiF!wAfv9V%+$qg4gIU`cidZ8 zkpMKF5rj*5x$|z4d?qF`@$AJ5P7X;bvjT0{BTrWaRH~OjV=XcH0Br+E6kP^viUYW2 z@|^c=n&wYcIa`$T8rUiZiSkgY&HQTs>9fz^TNB3Cc@0DnNUi@Vz1jh`iWG5H&edz{ z1+>lWa$qO9a2h%M-iSH(d4a-B&Ddbze}gWCz$=0Mgos%lHG~BODSG`}aXl{GULH8EDLFmQ%x8LAOFyVOCob{|RIA%r63MPZGO+Vz_g{D>iH00qOSfB59Sz(vC{cF@LkLxEHCnTjNiLE|ZblF80SQ zWR$k%)aB`t)_{Jy`O}ly@cTO=ube=JM}`1XVA6Nfg{odj79Ls#@M9hW_zzV_MUQve z0$o7wo@@x*+K4$;y)k0^AEyei1=7zlbv9aD2eC z`Af!?BDhnk$q^5c^3P_XNibM|qn`WRr((_L#g~i>X&69x5P`v1#-mm3{>>RA^|NR9 zGC-n*x}JxP>O<4{VEz>q+sC$)U;rP2St5nxZaKqTPy0aRrStXXHMJ4~Kaf^LXWm z(FEDd&bC?T)oY+9-0#QM#DTn>-O4KO!u`+r@9G}r7tWf7jEbtfMAW)wsY-h;Yk+0P zjrHT>tq&vgISKDF3=nZVU`$v-mvjro`nW;519)bUwq-zBl86y&3z1!uzjyBhP5#ch zaevm!sWnYz*k+rudl~#k!FELdFgy(wCDX;T$z^4#f%FgMRa8_mQr`#xC4jaCW7qpk zNN zJmciX_yu26QAEA!gxn;V7D-6sOjU1(H3{g;j(x)khASm1kRI--s}ytb6C;uCa37v( zHkQDZm#l#qf+dHD-5EmJ50dU`iu9vcD)ef$R{I#M0lsHa{suP6L8AkSaS}sb+f6qB zYYU zNEQP#1w)MOEFoA5_|b3H*~~0OnsE*f_s(?^1=x7C38E;yh;-sC#)1_|4(7|ovVmn) zi`ws2b<&aG3)OoN?VtCwli}M8rOa1NlB(}AmlQs-vpyCpnA*3Qp(N>Ax@o%?);R|g z5JcH8#l-`?8|VG%+71?$b4`D4;Fo{aB{AcKJNN!Er^$_AGbTOkoY=rW1U5+Zy}8H0 z*9XL63eq^CP9t%Zr%T!<>C4agx- z+TXm{;?JP;cLa|Wy2ai1#%+b3?nC&8=)Qui*yLSvM(z_5&Y6-M?))K1q%wYgsUMf;?{VmSAz{+4CD? z($f6{c*|cus?m5V;?WdF3hbT~Yx}7B&g#6)ONyUZgZv+zu!>FvN`7F2ZGwM1FP#sg z3h&QxEoS;>&h7-ph&VNRyqeAU>z%ZWUsWgFj^<1(7=4)dfM_bvoHpK3#6ytIKF;E9 z4&0u;z9tUb!x`6%Cw!_8Kc6kpd_~4mcPA}6n+$YtjhTi7Sc1>3$*q9iZsN2oc#(Dz zp^5qJlnimvSnc}qY6OU~5bZ0(NUY#Mc-z-`-dQ|YTL1aM&Ch2YZo>@y_!b+PP1Bye zDO4e~ben)pZomC?0!;5mMm*SLa_!``i`B$Z-(b@ZS=y!er@lVt!$h-Kt&ScP*CP-H zyclwXCmj0(TR7WzsF+ zGQe!Hj>BTP~9!#G74LF##@EvxVgaJBm@oJ-a1eqsynP6mc^ z>G{$ogG|(?YUzXK#eM3I>2=zD=_*MorV>lV3ke1`~BLfw5jNsNHRBj^!7i?hT{TIH<;IQJZ zH!<2dPZOmYV$XBh@CJeB_G?)_f!6?a{@DCZ(pzo4d zc;(Sn=5r3x>vVK3pdeiG{tg>|+nImEZyF?LW{Peo59mF3o)qJpNSj+xA<`$8_x*m` zOgNY8KAB|9qf%-DGr>Q~W33N4Lk))&5N7e~0OY~UZ2+`XIn`)q=O^$Yqh>{U(Ed*n zP#No5|GcF>Yq~61VS9h@yQDT-pvon|rcCJIJ`3F?#VRF$Ob_LUF& zor-lVa95LndJ7*^jfMOuIX(R$y7GE6H8Rf-($g+aZ0yQD_8;m$KwYT;o4+RfYBzyo07z_E3nJv36j5u>^2r;;pjv}4Yx?o9<4CC2_LM! z1{2_IKkKF>;#K1~o{LCXO~@HTH=@OlP80EG%=8{P48UFzzsKDAV`DT^pJQm==rE!I20f2px?d0MLUI9-Tf zuDivEFatrvPqCKPm#7-MXx_E8OdLLzkM^`$sJ*g-fN;{pmeVL17sX}DYTveoEmTBn?WD%R}hwk1;n0VaQ?F)FTBD{CKo6>V;g5By1V6h>Yq z!7$i+iW03u^HqOhtIwI-Y%AhJ8JD)!)T3oltK*n>c8LH;pr^vwx_$Fi|8DdNp?sZc zmr#gZJ~1v-dEVy3&SJ&<%3j<)&#u$!$zHbZKhB*F0ZfHY-h!fL=Fy`7b{5O$R}SJz zpM5_ok)#_gdQt6*+u`hZE4yms41>*Mte}V`xu(WO0qYOjLmrKu*EQ22EBa}c51QNs z-Iro-*-Wdhf*F&+=0#@skcR*eX3Mj;bQd1l-DG34!9b~#$;bCe4F5P`9KDzSIAQn- zad>{~i(~+r7#dA0cIBN97c_d3M79|3_e6;K$__YZ<5W=r?y_k1$#pMP3Ej-^vzd!_ ztoBuU_{s;->OfwYV7KfE!s;^8 zxAkoKV|?7ZI+Dqj?-c&%@VrxaEtVL^872QI8m{(S@~i~YQ;k#!#Y3SYa}9 z8E0y>W;JGmocL@**A-_KnU^-pLt&{Y7_B=0ea~>2D8?nO1Oe0%bMkeZ zszSQP5pXKua5O0B7sQL_EX0z#wxD_KH)mTX>imc1HESWoLd*r8m8)~+37Aj!@)!>* zMcR@n+iLFnWluM2r)4h|x7Kd>;~m9J7d>O17<3>K@aAL8(Pnya z#ua~JP_bcp)ui8yEo7fIcCX#r+vA$i9aWmqjbUs^%gkKaE^v#U#}rotl;#qvMTjT(N@g!?@>cI>H8DiD_sw51jQm z+!bD`yZBM2K`H$KU7~FJg7)B{5Lkr_n4<&PK1Irsl*SWkMp--wz(b|WRFO8f5yhxJ z(p>Md52@ynhdq7$O~wza=M8W(&~>A^L#|>xQl_-j^!`w+p~;Um&}y%FX*hS^KFcB4 z+gONTfNWtfX6E{!b(Ttsc2If2t-2!fiPnQA${0N{X{8Hsl&?b6B3oL`Bg4cKJlG=A z?dEKDGZnKx^~onB#kiF8)mb)e_`j2iKE+(p)8))IJd_{PT5PM*Ce2i~DRRq2oAma{ z)1`b7Q4@SzALdCc3n~|7me$+2^-HJh#pPUMPBEAEJpJ=EX}7KMa|D^?e*TbHT%uY0 z#31iC(JY&y(Oi6x6T!EJ-!-%E8Sgf;arm0we4LW;7(YprAbGxRW6XVZ%3VO`%J80J zK3zBS%GF8sYE!tYS*}&ZN6R(c`>8Stb3z2tv+}mLD6a>%?z{J~j7iDQ)%sNkxWarr&XVuR%bpf1WLg2B_eF>7iY7i7p2GvNz1rHpG@dw6&cg_q7 zV-4JgYNE-vgvu*Xk5A<-DB=_e&)VQJhyBB?V{hu_0!Pn{52J+VVua zAL<6?a~rg=KQRRP6yk=ZlQYca7L6vkW~VcyNKZt%b3!uAcY^Tb%SE*AXp93s%<=p` zxQPF~N7Zq4?B+0(C!hvZD5cokZF5ye-HhLPpaRxyaNz%FQWXRp+O%|uw;V;a%eIxN zo>UIvg0{=uCWXU^1qHkjhTxAyZsh3BW&a2L8u&;gncs3+JOFH^s3TQ>q|Dx7DZkG_ z$OZ&w7^;pHmmNVLYEkt7usGAIS+?+?F4nF3tA71!3~;q758xO9y`}>x%wyD>c1H3E##Pf{0kl5UE0$S8yUw=ULi{9l*9*HT+-sMs4f3A0GZB zaBbh}(-@s9fO3WWyu4Wu`35pM;GRNXL$U_{ckkzR5%A7Xm9lA<7Ek2|g<=R?yUv&L z>60f1%aw-NVfLshxrD|c_u9JxKIAKgXkgxk}g@@aP zl|8`<>I1r+Dz?Pz>XCWjXhq{W$|WE?5k$2yB#T)BCP2sG&S+*yi2&5~%>6LR z?D=O-Gk`q8+uJ+ee&cxykj#D}dtanuMA9K1xOVSU9y_a(9T@9Vz&!p0ts1;` zG3WE%>|NRm4c0_LP!jxCRrT)@>jhI#9byg_^?sCCr$#$2^E>a9j`k>2tP_=k%Cf$H z2!(pvX8Jn_at;T5FMw8NU9@E@zr*2H-&Vxv!95&emUhrQwkD{L72#gL>*CEP?g<{9 z*mNU67XGCGdl;w(bWc-rTKirCGQiM;P46Ku1a&mGcccDCu zu8VtXI^u=0j2?7EQ4H(};O8e3b#V$^hmFm7_3J6HfGmv;{P@9?EX<8A`r&N4#e#x@ zl)St=v^4JP>r-7vQGTLFMQ?B~eF8@Q{lY0Y>jjh44gwm~`eZ}UvWl!Mt%Lw@{Ntlh zs4oRPKmFO-20cHs>!`XQ!{7Q$eJDN?-_w@3_-ksc&YAAN@_+Y*i>XnUes}TfJyxu>0qlsBUscw zbgu08WV$4ot-;(cKdr`5QNdG80jXASS z=6Flv_-;D{8nFiu1cgBkg2dx_`C2VG%Kri7m55fK?r$%3!5;4jdowYHTdc zc{@J}{!giy70RF}IxJ?_mEhWw3OiU8F0B5JK<;YE&h!(ghyD=UOnsnI$U^NWYFdxk z>rM(-w*5||0B?Yx$@WTB7pe~5(T@GkiD9Qy^>^4~1Q_|D(V?&mM=XRZsc4w?XCPeoVAjQHtv|jO= z{LoSJP`egrN%3D)@stR78!fWk_oM2m_P~K+k^~&KA~Com$cxsEbvrjKC%vUwS6~_N zSWZi!Wm;%c?)g!rE9E|)gO$6aq06FFe&ZCxB9jpiD5t@6r-$0Frk_3%)>Q}4Grc<# z^I~}*H>pn>2JYFOlgI`zve&5{7dbaj;}f6(sjiis2!J1XbL?imV796gf;y3AsI!Q8 zBzl;~uT_2GcR2*q7(}T130h3PRg#x~4(j>$X~IWBT5L)=-zdN}#c$Ys4JsQ3WF2j1 zSNN&Gmb42#txi=aHQsIJYWq0)tM*3or%zWl=2J6y{isDvpDqo@Rn8RULS1nf?90z@ z-h8>cmFYTM{iEa&{fzC;Y7g`yd$4_dWR_3k2xZqWb(vz;$Tq%dUpT+Q?*bFD<<%?9 z4cMZ1(Ssse-A|_hFg}N8v}_2VVN-ttw0f=>`{_#?f}^{>_ArnUP@FRzciXcw!&|RYaQ zo_@jFI#sIGd}d$6{5#R~RX-tgIIB$=!XiiE_TeyZraKcP(=!rNbuD~>Hk7smZgd)? z>kGQnF2CY_TTcPjh4c)V0KXY_TDtQ1a0QwdYC+mW`)8`b!v2w93>tA_+YHxAO%*6ri5k^@m*9dX^%J{}W)1 z#*$CQP_0q|%Wexiql(HeZZu3%j9N6Ysx=CzZ|rUgEJ@PO6x9HOs1u$z-5O=2E2qX7 zxKRrB+~AN9`ZpdF!VcZ*dnHfkXMVkK-H!leQC9I+2Ys?jr3OSd?He7^|EOS>x_-Z+ ze)3=h-t?l@C<5`^2(anL;SaOXS>z!nqQ7k!aE7>`%=vH z*+%JAn}WR)EWM>cwn0(la^_6v4e>5rzXKb}rM@;+2SL9?gKax1?d*?WO!=@iQ{@f& zn_~{ROo)hy4I6_P2U0#Q=sNgTZl%94fRrL!z&b@v&MN;XdUR*hRRdyCw7TOGkML?0 z*52`8aixFah$#Npie(EUfic?%hg|>OOK~RJsvtbD8*+n!EjZ z_wygM=#1K9h;lA0)N&}PeSFXIslD3l=;mc6tv5fWqend|eOAI*ZMTyScgr+ML}rAN z+BVkA_7?4h9yvP;Q;9geU0gHCvxbo8lzCcl@uhp8&jVj?QSxGT^wF9~Evd`lUVfS-Yc5-EZmwi& zmAj|sYtAEH^bHMfJ$gg{_T%$JL{qXFuG822LMB}2Pl-Jq)x64GCs}N}LiJ;C z(ERAYK0ZFai1nW9%mZJIGCK^Pd#{gQSzthi6xK7W3)YCKdsU)kjFmAtCz`{rgT zZeHFWEQ6=G6qbL2v_K;)ObMak@wJPJii%)A_YeJH_$xR8#1el$2XHZ(@*^qnP&I-jn6(1e)dvXAs4PIu`aEnAuofUS6iY{m1a_4*ffhj)JXGf_hz4WMuDP*9O#D zrO$nZg{ArM=Be|xJrv)vv$Hwtr!2&2F9d{WZ!7TMw9Aq;MPL&Hnubo@rlTl9f(g@q(sTwJ{D_cph-UQY1U>*Ubw zn^Sng+?cfzDAmr?sC*;Kl<(MaMe$5a_V?9Qj_svEPpD;^7|7KFelZ$n*s6SSG%&HYA;={a~&@DQHnld7?Y(G=Ns7gdF@0m zbJxK}^twT(y1M$+Fz4rv)42nIG1(~Wv^6hRyCDP>6CwE~HZ~y= z2{IvJVRO4?|xCUU#amU%zg=J-|bXYYh9oG1TaeTyl#s&m&lzoTP=RsY4&w z@MBu4YCA0s30z&j3x*Ka>~&=|14dNKv11tFIM zQoXk5yM~5LK@aGx1SlOG9I~JY8u3;c)X!Irxb!M4?2?Y2UX3P4Y(W8WTwI*Ksp&~= zTr4as2%AflWC3mG!?nYt-oU_o&d9)r^2zMt=P>EO>9){g{_A=izxvs>4L;{RZYcAZ z_Gv$S)qc?0(xL@X`q#lOS4wK?hoGPn!4EtNUyOcAO~tV&KG48xm^s~I{`4s!85tQ( zV8df|b^O2v_rby!+WPvZAwweC4i4c^ zEBxwvZEa3{y?B9M8`bu73wh~AKww}P%=jar>({I8qK_w%a&kH&YJSa3&&(*PsZmOb zi)&u_@D@lXA8T9N!N|avn6_KDZY}OfOG#yQcFGhyZy|#le+TAn>3N6ODurOlBY1?R zmdn3{6qYAO*M!=j&B8<}8Wwbw8kH`(kioFf)4%b$b0s7q!l*B_C}aZord&v>a%X4t ziO-+^YGG*^cJK3={H)7We*Rs*Hrb|mrMq{ran78{R(Ysm+H$_xeY)>SB@~|sP~NX< zUc7MY^kw&R>!o9kSl;K*zn{QkvN;1E-4d_*^eptxM=bxZe6&Zs+)d)Z|EskdQX@p- MuJoOZTbi%_7b3Xb=>Px# From c60a321528b3cb103d6f4d549512a830b25cfdfb Mon Sep 17 00:00:00 2001 From: Zhizhong Su Date: Wed, 4 Jan 2017 12:09:31 +0000 Subject: [PATCH 21/51] revise new_layer_cn and fix a few problems in new_layer_en --- doc/howto/dev/new_layer_cn.rst | 61 +++++++++++++++++----------------- doc/howto/dev/new_layer_en.rst | 5 ++- 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/doc/howto/dev/new_layer_cn.rst b/doc/howto/dev/new_layer_cn.rst index 8f5df0b36a..d5d37e83d0 100644 --- a/doc/howto/dev/new_layer_cn.rst +++ b/doc/howto/dev/new_layer_cn.rst @@ -2,26 +2,26 @@ 实现新的网络层 ================ -这份教程指导你在PaddlePaddle中实现一个自定义的网络层。在这里我们使用全连接层作为例子来指导你完成实现新网络层需要的几个步骤。 +这份教程展示了如何在PaddlePaddle中实现一个自定义的网络层。在这里我们使用全连接层作为例子来展示实现新网络层所需要的四个步骤。 -- 推导该层前向和后向传递的方程。 -- 实现该层的C++类。 -- 写梯度检测的测试单元,以保证梯度的正确计算。 -- 实现该层的python封装。 +1. 推导该层前向和后向传递的方程。 +2. 实现该层的C++类。 +3. 增加梯度检测的单元测试,以保证梯度的正确计算。 +4. 封装该层的Python接口。 推导方程 ================ 首先我们需要推导该网络层的*前向传播*和*后向传播*的方程。前向传播给定输入,计算输出。后向传播给定输出的梯度,计算输入和参数的梯度。 -下图是一个全链接层的示意图。在全连接层中,每个输出节点都连接到所有的输入节点上。 +下图是一个全连接层的示意图。在全连接层中,每个输出节点都连接到所有的输入节点上。 .. image:: FullyConnected.jpg :align: center :scale: 60 % 一个网络层的前向传播部分把输入转化为相应的输出。 -全连接层以一个维度为 :math:`D_i` 稠密的向量作为输入。其用一个尺度为 :math:`D_i \times D_o` 的变换矩阵 :math:`W` 把 :math:`x` 映射到一个维度为 :math:`D_o` 的向量,并在其上再加上维度为 :math:`D_o` 的偏置向量 :math:`b` 。 +全连接层以一个维度为 :math:`D_i` 的稠密向量作为输入,使用一个尺度为 :math:`D_i \times D_o` 的变换矩阵 :math:`W` 把 :math:`x` 映射到一个维度为 :math:`D_o` 的向量,并在乘积结果上再加上维度为 :math:`D_o` 的偏置向量 :math:`b` 。 .. math:: @@ -29,9 +29,9 @@ 其中 :math:`f(.)` 是一个非线性的*激活方程*,例如sigmoid, tanh,以及Relu。 -变换矩阵 :math:`W` 和偏置向量 :math:`b` 是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传播对所有的参数和输入都计算输出函数的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 +变换矩阵 :math:`W` 和偏置向量 :math:`b` 是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传根据输出的梯度,分别计算每个参数的梯度,以及输入的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 -假设我们的损失函数是 :math:`c(y)` ,那么 +假设损失函数是 :math:`c(y)` ,那么 .. math:: @@ -43,9 +43,9 @@ \frac{\partial y}{\partial z} = \frac{\partial f(z)}{\partial z} -我们的base layer类可以自动计算上面的导数。 +PaddlePaddle的base layer类可以自动计算上面的导数。 -因而,对全连接层来说,我们需要计算: +因此,对全连接层来说,我们需要计算: .. math:: @@ -60,23 +60,23 @@ 一个网络层的C++类需要实现初始化,前向和后向。全连接层的实现位于:code:`paddle/gserver/layers/FullyConnectedLayer.h`及:code:`paddle/gserver/layers/FullyConnectedLayer.cpp`。这里我们展示一份简化过的代码。 -这个类需要继承 :code:`paddle::Layer` 这个基类,并且需要重写以下基类中的虚函数: +这个类需要继承 :code:`paddle::Layer` 这个基类,并且需要重写基类中的以下几个虚函数: -- 类的构造函数和析构析构函数。 +- 类的构造函数和析构函数。 - :code:`init` 函数。用于初始化参数和设置。 - :code:`forward` 。实现网络层的前向传播。 - :code:`backward` 。实现网络层的后向传播。 -- :code:`prefetch` 。用于确定由参数服务器预取的行相关的参数矩阵。如果该网络层不需要远程稀疏更新的话,你不需要重写该函数。(大多数网络层不需要支持远程稀疏更新) +- :code:`prefetch` 。用来从参数服务器预取参数矩阵相应的行。如果网络层不需要远程稀疏更新,则不需要重写该函数。(大多数网络层不需要支持远程稀疏更新) -头文件在下面列出: +头文件如下: .. code-block:: c++ namespace paddle { /** * 全连接层的每个输出都连接到上一层的所有的神经元上。 - * 其用一些学习过的参数做内积并加上偏置(可选)。 + * 它的输入与经过学习的参数做内积并加上偏置(可选)。 * * 配置文件接口是fc_layer。 */ @@ -101,9 +101,9 @@ }; } // namespace paddle -头文件中把参数定位为类的成员变量。我们使用 :code:`Weight` 类作为参数的抽象,它支持多线程更新。该类的实现细节在“实现细节”中由详细介绍。 +头文件中把参数定义为类的成员变量。我们使用 :code:`Weight` 类作为参数的抽象,它支持多线程更新。该类的实现细节在“实现细节”中详细介绍。 -- :code:`weights_` 是存有变换矩阵的一系列权重。在当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。每个权重对应一个输入。 +- :code:`weights_` 是存有一系列变换矩阵的权重。在当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。每个权重对应一个输入。 - :code:`biases_` 是存有偏置向量的权重。 全连接层没有网络层配置的超参数。如果一个网络层需要配置的话,通常的做法是将配置存于 :code:`LayerConfig& config` 中,并在类构建函数中把它放入一个类成员变量里。 @@ -173,7 +173,7 @@ MatrixPtr outV = getOutputValue(); - // 对每个输入乘上转化矩阵 + // 对每个输入乘上变换矩阵 for (size_t i = 0; i != inputLayers_.size(); ++i) { auto input = getInput(i); CHECK(input.value) << "The input of 'fc' layer must be matrix"; @@ -193,9 +193,9 @@ 实现后向传播的部分有下面几个步骤。 -- :code:`backwardActivation()` 计算激活函数的梯度。梯度会就地(不使用额外空间)乘上输出的梯度,并可以通过 :code:`getOutputGrad()` 来获得。 -- 计算偏置的梯度。注意,我们使用 :code:`biases_->getWGrad()` 来得到某个特定参数的梯度矩阵。在一个参数的梯度被更新后,**必须**要调用 :code:`getParameterPtr()->incUpdate(callback);` 。这是用来在多线程和多机上更新参数的。 -- 之后,计算转换矩阵和输入的梯度,并对相应的参数调用 :code:`incUpdate` 。这给了框架一个机会去了解自己是否已经把所有的梯度收集到一个参数中,使得框架可以进行有时间重叠的工作。(例如,网络通信) +- :code:`backwardActivation()` 计算激活函数的梯度。通过 :code:`getOutputGrad()` 来获得输出的梯度,调用该函数后,梯度会就地(不使用额外空间)乘上输出的梯度。 +- 计算偏置的梯度。注意,我们使用 :code:`biases_->getWGrad()` 来得到某个特定参数的梯度矩阵。在一个参数的梯度被更新后,**必须**要调用 :code:`getParameterPtr()->incUpdate(callback);` 。这用于在多线程和多机上更新参数。 +- 最后,计算转换矩阵和输入的梯度,并对相应的参数调用 :code:`incUpdate` 。PaddlePaddle可以通过该机制判断是否已经收集齐所有的梯度,从而可以做一些与计算重叠的工作(例如,网络通信)。 .. code-block:: c++ @@ -208,7 +208,6 @@ if (biases_ && biases_->getWGrad()) { biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - /* 加上偏置的梯度 */ biases_->getParameterPtr()->incUpdate(callback); } @@ -238,7 +237,7 @@ } } - :code:`prefetch` 函数指出了在训练时需要从参数服务器取出的行。仅在远程稀疏训练时有效。在远程稀疏训练时,完整的参数矩阵被分布式的保存在参数服务器上。当网络层用一个批次做训练时,该批次中,输入仅有一个子集是非零的。因此,该层仅需要这些非零样本位置所对应的转换矩阵的那些行。 :code:`prefetch` 表明了这些行的标号。 + :code:`prefetch` 函数指出了在训练时需要从参数服务器取出的行。仅在远程稀疏训练时有效。使用远程稀疏方式训练时,完整的参数矩阵被分布在不同的参数服务器上。当网络层用一个批次做训练时,该批次的输入中仅有一个子集是非零的。因此,该层仅需要这些非零样本位置所对应的变换矩阵的那些行。 :code:`prefetch` 表明了这些行的标号。 大多数层不需要远程稀疏训练函数。这种情况下不需要重写该函数。 @@ -271,7 +270,7 @@ 写梯度检查单元测试是一个验证新实现的层是否正确的相对简单的办法。梯度检查单元测试通过有限差分法来验证一个层的梯度。首先对输入做一个小的扰动 :math:`\Delta x` ,然后观察到输出的变化为 :math:`\Delta y` ,那么,梯度就可以通过这个方程计算得到 :math:`\frac{\Delta y}{\Delta x }` 。之后,再用这个梯度去和 :code:`backward` 函数得到的梯度去对比,以保证梯度计算的正确性。需要注意的是梯度检查仅仅验证了梯度的计算,并不保证 :code:`forward` 和 :code:`backward` 函数的实现是正确的。你需要一些更复杂的单元测试来保证你实现的网络层是正确的。 -所有的梯度检测单侧都位于 :code:`paddle/gserver/tests/test_LayerGrad.cpp` 。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: +所有网络层的梯度检查单测都位于 :code:`paddle/gserver/tests/test_LayerGrad.cpp` 。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: + 生成网络层配置。网络层配置包含以下几项: - 偏置参数的大小。(例子中是4096) @@ -294,10 +293,10 @@ - 非零数字的个数,仅对稀疏数据有效。 - 稀疏数据的格式,仅对稀疏数据有效。 + 对每个输入,都需要调用一次 :code:`config.layerConfig.add_inputs();` 。 -+ 调用 :code:`testLayerGrad` 来做梯度检查。它包含下面的参数。 ++ 调用 :code:`testLayerGrad` 来做梯度检查。它包含以下参数。 - 层和输入的配置。(例子中是 :code:`config` ) - - 输入的类型。(例子中是 :code:`fc` ) - - 梯度检查的批次大小。(例子中是100) + - 网络层的类型。(例子中是 :code:`fc` ) + - 梯度检查的输入数据的批次大小。(例子中是100) - 输入是否是转置的。大多数层需要设置为 :code:`false` 。(例子中是 :code:`false` ) - 是否使用权重。有些层或者激活需要做归一化以保证它们的输出的和是一个常数。例如,softmax激活的输出的和总是1。在这种情况下,我们不能通过常规的梯度检查的方式来计算梯度。因此我们采用输出的加权和(非常数)来计算梯度。(例子中是 :code:`true` ,因为全连接层的激活可以是softmax) @@ -309,7 +308,7 @@ config.biasSize = 4096; config.layerConfig.set_type("fc"); config.layerConfig.set_size(4096); - config.layerConfig.set_active_type("sigmoid"); + config.layerConfig.set_active_type("softmax"); config.layerConfig.set_drop_rate(0.1); // Setup inputs. config.inputDefs.push_back( @@ -323,7 +322,7 @@ } } -如果你要为了测试而增加新的文件,例如 :code:`paddle/gserver/tests/testFCGrad.cpp` ,你需要把该文件加入 :code:`paddle/gserver/tests/CMakeLists.txt` 中。下面给出了一个例子。当你执行命令 :code:`make tests` 时,所有的单侧都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单侧正确执行。你需要在配置cmake时将 :code:`WITH_DOUBLE` 设置为 `ON` 。 +如果你要为了测试而增加新的文件,例如 :code:`paddle/gserver/tests/testFCGrad.cpp` ,你需要把该文件加入 :code:`paddle/gserver/tests/CMakeLists.txt` 中。下面给出了一个例子。当你执行命令 :code:`make tests` 时,所有的单测都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单测正确执行。你需要在配置cmake时将 :code:`WITH_DOUBLE` 设置为 `ON` 。 .. code-block:: bash @@ -344,7 +343,7 @@ python封装的实现使得我们可以在配置文件中使用新实现的网 - 所有的Python封装都使用 :code:`@config_layer('fc')` 这样的装饰器。网络层的标识符为 :code:`fc` 。 - 实现构造函数 :code:`__init__` 。 - 它首先调用基构造函数 :code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)` 。 :code:`FCLayer` 是Python封装的类名。 :code:`fc` 是网络层的标识符。为了封装能够正确工作,这些名字必须要写对。 - - 之后,计算转换矩阵的大小和格式(是否稀疏)。 + - 之后,计算变换矩阵的大小和格式(是否稀疏)。 .. code-block:: python diff --git a/doc/howto/dev/new_layer_en.rst b/doc/howto/dev/new_layer_en.rst index 0513f068f3..46481f5ead 100644 --- a/doc/howto/dev/new_layer_en.rst +++ b/doc/howto/dev/new_layer_en.rst @@ -209,7 +209,6 @@ The implementation of the backward part has the following steps. if (biases_ && biases_->getWGrad()) { biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - /* Increasing the number of gradient */ biases_->getParameterPtr()->incUpdate(callback); } @@ -297,7 +296,7 @@ All the gradient check unit tests are located in :code:`paddle/gserver/tests/tes + each inputs needs to call :code:`config.layerConfig.add_inputs();` once. + call :code:`testLayerGrad` to perform gradient checks. It has the following arguments. - layer and input configurations. (:code:`config` in our example) - - type of the input. (:code:`fc` in our example) + - type of the layer. (:code:`fc` in our example) - batch size of the gradient check. (100 in our example) - whether the input is transpose. Most layers need to set it to :code:`false`. (:code:`false` in our example) - whether to use weights. Some layers or activations perform normalization so that the sum of their output is a constant. For example, the sum of output of a softmax activation is one. In this case, we cannot correctly compute the gradients using regular gradient check techniques. A weighted sum of the output, which is not a constant, is utilized to compute the gradients. (:code:`true` in our example, because the activation of a fully connected layer can be softmax) @@ -310,7 +309,7 @@ All the gradient check unit tests are located in :code:`paddle/gserver/tests/tes config.biasSize = 4096; config.layerConfig.set_type("fc"); config.layerConfig.set_size(4096); - config.layerConfig.set_active_type("sigmoid"); + config.layerConfig.set_active_type("softmax"); config.layerConfig.set_drop_rate(0.1); // Setup inputs. config.inputDefs.push_back( From 0e7d77f325a38531a9a84e6cdcde81355ffcc754 Mon Sep 17 00:00:00 2001 From: Zhizhong Su Date: Wed, 4 Jan 2017 12:52:15 +0000 Subject: [PATCH 22/51] change format --- doc/howto/dev/new_layer_cn.rst | 778 ++++++++++++++++----------------- 1 file changed, 389 insertions(+), 389 deletions(-) diff --git a/doc/howto/dev/new_layer_cn.rst b/doc/howto/dev/new_layer_cn.rst index d5d37e83d0..897a8be5b3 100644 --- a/doc/howto/dev/new_layer_cn.rst +++ b/doc/howto/dev/new_layer_cn.rst @@ -1,389 +1,389 @@ -================ -实现新的网络层 -================ - -这份教程展示了如何在PaddlePaddle中实现一个自定义的网络层。在这里我们使用全连接层作为例子来展示实现新网络层所需要的四个步骤。 - -1. 推导该层前向和后向传递的方程。 -2. 实现该层的C++类。 -3. 增加梯度检测的单元测试,以保证梯度的正确计算。 -4. 封装该层的Python接口。 - -推导方程 -================ - -首先我们需要推导该网络层的*前向传播*和*后向传播*的方程。前向传播给定输入,计算输出。后向传播给定输出的梯度,计算输入和参数的梯度。 - -下图是一个全连接层的示意图。在全连接层中,每个输出节点都连接到所有的输入节点上。 - -.. image:: FullyConnected.jpg - :align: center - :scale: 60 % - -一个网络层的前向传播部分把输入转化为相应的输出。 -全连接层以一个维度为 :math:`D_i` 的稠密向量作为输入,使用一个尺度为 :math:`D_i \times D_o` 的变换矩阵 :math:`W` 把 :math:`x` 映射到一个维度为 :math:`D_o` 的向量,并在乘积结果上再加上维度为 :math:`D_o` 的偏置向量 :math:`b` 。 - -.. math:: - - y = f(W^T x + b) - -其中 :math:`f(.)` 是一个非线性的*激活方程*,例如sigmoid, tanh,以及Relu。 - -变换矩阵 :math:`W` 和偏置向量 :math:`b` 是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传根据输出的梯度,分别计算每个参数的梯度,以及输入的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 - -假设损失函数是 :math:`c(y)` ,那么 - -.. math:: - - \frac{\partial c(y)}{\partial x} = \frac{\partial c(y)}{\partial y} \frac{\partial y}{\partial x} - -假设 :math:`z = f(W^T x + b)` ,那么 - -.. math:: - - \frac{\partial y}{\partial z} = \frac{\partial f(z)}{\partial z} - -PaddlePaddle的base layer类可以自动计算上面的导数。 - -因此,对全连接层来说,我们需要计算: - -.. math:: - - \frac{\partial z}{\partial x} = W, \frac{\partial z_j}{\partial W_{ij}} = x_i, \frac{\partial z}{\partial b} = \mathbf 1 - -其中 :math:`\mathbf 1` 是一个全1的向量, :math:`W_{ij}` 是矩阵 :math:`W` 第i行第j列的数值, :math:`z_j` 是向量 :math:`z` 的第j个值, :math:`x_i` 是向量 :math:`x` 的第i个值。 - -最后我们使用链式法则计算 :math:`\frac{\partial z}{\partial x}` 以及 :math:`\frac{\partial z}{\partial W}` 。计算的细节将在下面的小节给出。 - -实现C++类 -=================== - -一个网络层的C++类需要实现初始化,前向和后向。全连接层的实现位于:code:`paddle/gserver/layers/FullyConnectedLayer.h`及:code:`paddle/gserver/layers/FullyConnectedLayer.cpp`。这里我们展示一份简化过的代码。 - -这个类需要继承 :code:`paddle::Layer` 这个基类,并且需要重写基类中的以下几个虚函数: - -- 类的构造函数和析构函数。 -- :code:`init` 函数。用于初始化参数和设置。 -- :code:`forward` 。实现网络层的前向传播。 -- :code:`backward` 。实现网络层的后向传播。 -- :code:`prefetch` 。用来从参数服务器预取参数矩阵相应的行。如果网络层不需要远程稀疏更新,则不需要重写该函数。(大多数网络层不需要支持远程稀疏更新) - - -头文件如下: - -.. code-block:: c++ - - namespace paddle { - /** - * 全连接层的每个输出都连接到上一层的所有的神经元上。 - * 它的输入与经过学习的参数做内积并加上偏置(可选)。 - * - * 配置文件接口是fc_layer。 - */ - - class FullyConnectedLayer : public Layer { - protected: - WeightList weights_; - std::unique_ptr biases_; - - public: - explicit FullyConnectedLayer(const LayerConfig& config) - : Layer(config) {} - ~FullyConnectedLayer() {} - - bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - - Weight& getWeight(int idx) { return *weights_[idx]; } - - void prefetch(); - void forward(PassType passType); - void backward(const UpdateCallback& callback = nullptr); - }; - } // namespace paddle - -头文件中把参数定义为类的成员变量。我们使用 :code:`Weight` 类作为参数的抽象,它支持多线程更新。该类的实现细节在“实现细节”中详细介绍。 - -- :code:`weights_` 是存有一系列变换矩阵的权重。在当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。每个权重对应一个输入。 -- :code:`biases_` 是存有偏置向量的权重。 - -全连接层没有网络层配置的超参数。如果一个网络层需要配置的话,通常的做法是将配置存于 :code:`LayerConfig& config` 中,并在类构建函数中把它放入一个类成员变量里。 - -下面的代码片段实现了 :code:`init` 函数。 - -- 首先,所有的 :code:`init` 函数必须先调用基类中的函数 :code:`Layer::init(layerMap, parameterMap);` 。该语句会为每个层初始化其所需要的变量和连接。 -- 之后初始化所有的权重矩阵 :math:`W` 。当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。 -- 最后,初始化偏置向量。 - - -.. code-block:: c++ - - bool FullyConnectedLayer::init(const LayerMap& layerMap, - const ParameterMap& parameterMap) { - /* 初始化父类 */ - Layer::init(layerMap, parameterMap); - - /* 初始化权重表 */ - CHECK(inputLayers_.size() == parameters_.size()); - for (size_t i = 0; i < inputLayers_.size(); i++) { - // 获得参数尺寸 - size_t height = inputLayers_[i]->getSize(); - size_t width = getSize(); - - // 新建一个权重 - if (parameters_[i]->isSparse()) { - CHECK_LE(parameters_[i]->getSize(), width * height); - } else { - CHECK_EQ(parameters_[i]->getSize(), width * height); - } - Weight* w = new Weight(height, width, parameters_[i]); - - // 将新建的权重加入权重表 - weights_.emplace_back(w); - } - - /* 初始化biases_ */ - if (biasParameter_.get() != NULL) { - biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); - } - - return true; - } - -实现前向传播的部分有下面几个步骤。 - -- 每个层在其 :code:`forward` 函数的开头必须调用 :code:`Layer::forward(passType);` 。 -- 之后使用 :code:`reserveOutput(batchSize, size);` 为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。 :code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。 -- 之后使用矩阵运算函数来计算 :math:`\sum_i W_i x + b`。:code:`getInput(i).value` 返回第i个输入矩阵。每个输入都是一个 :math:`batchSize \times dim` 的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考 :code:`paddle/math/Matrix.h`和:code:`paddle/math/BaseMatrix.h` 。 -- 最终,使用 :code:`forwardActivation();` 进行激活操作。这会自动进行网络配置中声明的激活操作。 - - -.. code-block:: c++ - - void FullyConnectedLayer::forward(PassType passType) { - Layer::forward(passType); - - /* 若有必要,为output_申请内存 */ - int batchSize = getInput(0).getBatchSize(); - int size = getSize(); - - { - // 设置输出的尺寸 - reserveOutput(batchSize, size); - } - - MatrixPtr outV = getOutputValue(); - - // 对每个输入乘上变换矩阵 - for (size_t i = 0; i != inputLayers_.size(); ++i) { - auto input = getInput(i); - CHECK(input.value) << "The input of 'fc' layer must be matrix"; - i == 0 ? outV->mul(input.value, weights_[i]->getW(), 1, 0) - : outV->mul(input.value, weights_[i]->getW(), 1, 1); - } - - /* 加上偏置向量 */ - if (biases_.get() != NULL) { - outV->addBias(*(biases_->getW()), 1); - } - - /* 激活 */ { - forwardActivation(); - } - } - -实现后向传播的部分有下面几个步骤。 - -- :code:`backwardActivation()` 计算激活函数的梯度。通过 :code:`getOutputGrad()` 来获得输出的梯度,调用该函数后,梯度会就地(不使用额外空间)乘上输出的梯度。 -- 计算偏置的梯度。注意,我们使用 :code:`biases_->getWGrad()` 来得到某个特定参数的梯度矩阵。在一个参数的梯度被更新后,**必须**要调用 :code:`getParameterPtr()->incUpdate(callback);` 。这用于在多线程和多机上更新参数。 -- 最后,计算转换矩阵和输入的梯度,并对相应的参数调用 :code:`incUpdate` 。PaddlePaddle可以通过该机制判断是否已经收集齐所有的梯度,从而可以做一些与计算重叠的工作(例如,网络通信)。 - - -.. code-block:: c++ - - void FullyConnectedLayer::backward(const UpdateCallback& callback) { - /* 对激活求导 */ { - backwardActivation(); - } - - if (biases_ && biases_->getWGrad()) { - biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - - biases_->getParameterPtr()->incUpdate(callback); - } - - bool syncFlag = hl_get_sync_flag(); - - for (size_t i = 0; i != inputLayers_.size(); ++i) { - /* 计算当前层权重的梯度 */ - if (weights_[i]->getWGrad()) { - MatrixPtr input_T = getInputValue(i)->getTranspose(); - MatrixPtr oGrad = getOutputGrad(); - { - weights_[i]->getWGrad()->mul(input_T, oGrad, 1, 1); - } - } - - - /* 计算输入层的偏差 */ - MatrixPtr preGrad = getInputGrad(i); - if (NULL != preGrad) { - MatrixPtr weights_T = weights_[i]->getW()->getTranspose(); - preGrad->mul(getOutputGrad(), weights_T, 1, 1); - } - - { - weights_[i]->getParameterPtr()->incUpdate(callback); - } - } - } - - :code:`prefetch` 函数指出了在训练时需要从参数服务器取出的行。仅在远程稀疏训练时有效。使用远程稀疏方式训练时,完整的参数矩阵被分布在不同的参数服务器上。当网络层用一个批次做训练时,该批次的输入中仅有一个子集是非零的。因此,该层仅需要这些非零样本位置所对应的变换矩阵的那些行。 :code:`prefetch` 表明了这些行的标号。 - -大多数层不需要远程稀疏训练函数。这种情况下不需要重写该函数。 - -.. code-block:: c++ - - void FullyConnectedLayer::prefetch() { - for (size_t i = 0; i != inputLayers_.size(); ++i) { - auto* sparseParam = - dynamic_cast(weights_[i]->getW().get()); - if (sparseParam) { - MatrixPtr input = getInputValue(i); - sparseParam->addRows(input); - } - } - } - -最后,使用 :code:`REGISTER_LAYER(fc, FullyConnectedLayer);` 来注册该层。 :code:`fc` 是该层的标识符, :code:`FullyConnectedLayer` 是该层的类名。 - -.. code-block:: c++ - - namespace paddle { - REGISTER_LAYER(fc, FullyConnectedLayer); - } - -若 :code:`cpp` 被放在 :code:`paddle/gserver/layers` 目录下,其会自动被加入编译列表。 - - -写梯度检查单元测试 -=============================== - -写梯度检查单元测试是一个验证新实现的层是否正确的相对简单的办法。梯度检查单元测试通过有限差分法来验证一个层的梯度。首先对输入做一个小的扰动 :math:`\Delta x` ,然后观察到输出的变化为 :math:`\Delta y` ,那么,梯度就可以通过这个方程计算得到 :math:`\frac{\Delta y}{\Delta x }` 。之后,再用这个梯度去和 :code:`backward` 函数得到的梯度去对比,以保证梯度计算的正确性。需要注意的是梯度检查仅仅验证了梯度的计算,并不保证 :code:`forward` 和 :code:`backward` 函数的实现是正确的。你需要一些更复杂的单元测试来保证你实现的网络层是正确的。 - -所有网络层的梯度检查单测都位于 :code:`paddle/gserver/tests/test_LayerGrad.cpp` 。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: - -+ 生成网络层配置。网络层配置包含以下几项: - - 偏置参数的大小。(例子中是4096) - - 层的类型。(例子中是fc) - - 层的大小。(例子中是4096) - - 激活的类型。(例子中是softmax) - - dropout的比例。(例子中是0.1) -+ 配置网络层的输入。在这个例子里,我们仅有一个输入。 - - 输入的类型( :code:`INPUT_DATA` ),可以是以下几种: - - :code:`INPUT_DATA` :稠密向量。 - - :code:`INPUT_LABEL` :整数。 - - :code:`INPUT_DATA_TARGET` :稠密向量,但不用于计算梯度。 - - :code:`INPUT_SEQUENCE_DATA` :含有序列信息的稠密向量。 - - :code:`INPUT_HASSUB_SEQUENCE_DATA` :含有序列信息和子序列信息的稠密向量。 - - :code:`INPUT_SEQUENCE_LABEL` :含有序列信息的整数。 - - :code:`INPUT_SPARSE_NON_VALUE_DATA` :0-1稀疏数据。 - - :code:`INPUT_SPARSE_FLOAT_VALUE_DATA` :浮点稀疏数据。 - - 输入的名字。(例子中是 :code:`layer_0` ) - - 输入的大小。(例子中是8192) - - 非零数字的个数,仅对稀疏数据有效。 - - 稀疏数据的格式,仅对稀疏数据有效。 -+ 对每个输入,都需要调用一次 :code:`config.layerConfig.add_inputs();` 。 -+ 调用 :code:`testLayerGrad` 来做梯度检查。它包含以下参数。 - - 层和输入的配置。(例子中是 :code:`config` ) - - 网络层的类型。(例子中是 :code:`fc` ) - - 梯度检查的输入数据的批次大小。(例子中是100) - - 输入是否是转置的。大多数层需要设置为 :code:`false` 。(例子中是 :code:`false` ) - - 是否使用权重。有些层或者激活需要做归一化以保证它们的输出的和是一个常数。例如,softmax激活的输出的和总是1。在这种情况下,我们不能通过常规的梯度检查的方式来计算梯度。因此我们采用输出的加权和(非常数)来计算梯度。(例子中是 :code:`true` ,因为全连接层的激活可以是softmax) - -.. code-block:: c++ - - void testFcLayer(string format, size_t nnz) { - // Create layer configuration. - TestConfig config; - config.biasSize = 4096; - config.layerConfig.set_type("fc"); - config.layerConfig.set_size(4096); - config.layerConfig.set_active_type("softmax"); - config.layerConfig.set_drop_rate(0.1); - // Setup inputs. - config.inputDefs.push_back( - {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)}); - config.layerConfig.add_inputs(); - LOG(INFO) << config.inputDefs[0].sparse.sparse << " " - << config.inputDefs[0].sparse.format; - for (auto useGpu : {false, true}) { - testLayerGrad(config, "fc", 100, /* trans */ false, useGpu, - /* weight */ true); - } - } - -如果你要为了测试而增加新的文件,例如 :code:`paddle/gserver/tests/testFCGrad.cpp` ,你需要把该文件加入 :code:`paddle/gserver/tests/CMakeLists.txt` 中。下面给出了一个例子。当你执行命令 :code:`make tests` 时,所有的单测都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单测正确执行。你需要在配置cmake时将 :code:`WITH_DOUBLE` 设置为 `ON` 。 - -.. code-block:: bash - - add_unittest_without_exec(test_FCGrad - test_FCGrad.cpp - LayerGradUtil.cpp - TestUtil.cpp) - - add_test(NAME test_FCGrad - COMMAND test_FCGrad) - - -实现python封装 -======================== - -python封装的实现使得我们可以在配置文件中使用新实现的网络层。所有的python封装都在 :code:`python/paddle/trainer/config_parser.py` 中。全连接层python封装的例子中包含下面几步: - -- 所有的Python封装都使用 :code:`@config_layer('fc')` 这样的装饰器。网络层的标识符为 :code:`fc` 。 -- 实现构造函数 :code:`__init__` 。 - - 它首先调用基构造函数 :code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)` 。 :code:`FCLayer` 是Python封装的类名。 :code:`fc` 是网络层的标识符。为了封装能够正确工作,这些名字必须要写对。 - - 之后,计算变换矩阵的大小和格式(是否稀疏)。 - -.. code-block:: python - - @config_layer('fc') - class FCLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - bias=True, - **xargs): - super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs) - for input_index in xrange(len(self.inputs)): - input_layer = self.get_input_layer(input_index) - psize = self.config.size * input_layer.size - dims = [input_layer.size, self.config.size] - format = self.inputs[input_index].format - sparse = format == "csr" or format == "csc" - if sparse: - psize = self.inputs[input_index].nnz - self.create_input_parameter(input_index, psize, dims, sparse, format) - self.create_bias_parameter(bias, self.config.size) - -在网络配置中,网络层的细节可以通过下面这些代码片段来指定。这个类的参数包括: - -- :code:`name` 是网络层实例的名字标识符。 -- :code:`type` 是网络层的类型,通过网络层的标识符来指定。 -- :code:`size` 是网络层输出的大小。 -- :code:`bias` 表明这个层的一个实例是否需要偏置。 -- :code:`inputs` 说明这个层的输入,输入是由一个list中的网络层实例的名字组成的。 - -.. code-block:: python - - Layer( - name = "fc1", - type = "fc", - size = 64, - bias = True, - inputs = [Input("pool3")] - ) - -我们建议你为你的Python封装实现一个“助手”,使得搭模型时更方便。具体可以参考 :code:`python/paddle/trainer_config_helpers/layers.py` 。 +================ +实现新的网络层 +================ + +这份教程展示了如何在PaddlePaddle中实现一个自定义的网络层。在这里我们使用全连接层作为例子来展示实现新网络层所需要的四个步骤。 + +1. 推导该层前向和后向传递的方程。 +2. 实现该层的C++类。 +3. 增加梯度检测的单元测试,以保证梯度的正确计算。 +4. 封装该层的Python接口。 + +推导方程 +================ + +首先我们需要推导该网络层的*前向传播*和*后向传播*的方程。前向传播给定输入,计算输出。后向传播给定输出的梯度,计算输入和参数的梯度。 + +下图是一个全连接层的示意图。在全连接层中,每个输出节点都连接到所有的输入节点上。 + +.. image:: FullyConnected.jpg + :align: center + :scale: 60 % + +一个网络层的前向传播部分把输入转化为相应的输出。 +全连接层以一个维度为 :math:`D_i` 的稠密向量作为输入,使用一个尺度为 :math:`D_i \times D_o` 的变换矩阵 :math:`W` 把 :math:`x` 映射到一个维度为 :math:`D_o` 的向量,并在乘积结果上再加上维度为 :math:`D_o` 的偏置向量 :math:`b` 。 + +.. math:: + + y = f(W^T x + b) + +其中 :math:`f(.)` 是一个非线性的*激活方程*,例如sigmoid, tanh,以及Relu。 + +变换矩阵 :math:`W` 和偏置向量 :math:`b` 是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传根据输出的梯度,分别计算每个参数的梯度,以及输入的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 + +假设损失函数是 :math:`c(y)` ,那么 + +.. math:: + + \frac{\partial c(y)}{\partial x} = \frac{\partial c(y)}{\partial y} \frac{\partial y}{\partial x} + +假设 :math:`z = f(W^T x + b)` ,那么 + +.. math:: + + \frac{\partial y}{\partial z} = \frac{\partial f(z)}{\partial z} + +PaddlePaddle的base layer类可以自动计算上面的导数。 + +因此,对全连接层来说,我们需要计算: + +.. math:: + + \frac{\partial z}{\partial x} = W, \frac{\partial z_j}{\partial W_{ij}} = x_i, \frac{\partial z}{\partial b} = \mathbf 1 + +其中 :math:`\mathbf 1` 是一个全1的向量, :math:`W_{ij}` 是矩阵 :math:`W` 第i行第j列的数值, :math:`z_j` 是向量 :math:`z` 的第j个值, :math:`x_i` 是向量 :math:`x` 的第i个值。 + +最后我们使用链式法则计算 :math:`\frac{\partial z}{\partial x}` 以及 :math:`\frac{\partial z}{\partial W}` 。计算的细节将在下面的小节给出。 + +实现C++类 +=================== + +一个网络层的C++类需要实现初始化,前向和后向。全连接层的实现位于:code:`paddle/gserver/layers/FullyConnectedLayer.h`及:code:`paddle/gserver/layers/FullyConnectedLayer.cpp`。这里我们展示一份简化过的代码。 + +这个类需要继承 :code:`paddle::Layer` 这个基类,并且需要重写基类中的以下几个虚函数: + +- 类的构造函数和析构函数。 +- :code:`init` 函数。用于初始化参数和设置。 +- :code:`forward` 。实现网络层的前向传播。 +- :code:`backward` 。实现网络层的后向传播。 +- :code:`prefetch` 。用来从参数服务器预取参数矩阵相应的行。如果网络层不需要远程稀疏更新,则不需要重写该函数。(大多数网络层不需要支持远程稀疏更新) + + +头文件如下: + +.. code-block:: c++ + + namespace paddle { + /** + * 全连接层的每个输出都连接到上一层的所有的神经元上。 + * 它的输入与经过学习的参数做内积并加上偏置(可选)。 + * + * 配置文件接口是fc_layer。 + */ + + class FullyConnectedLayer : public Layer { + protected: + WeightList weights_; + std::unique_ptr biases_; + + public: + explicit FullyConnectedLayer(const LayerConfig& config) + : Layer(config) {} + ~FullyConnectedLayer() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + Weight& getWeight(int idx) { return *weights_[idx]; } + + void prefetch(); + void forward(PassType passType); + void backward(const UpdateCallback& callback = nullptr); + }; + } // namespace paddle + +头文件中把参数定义为类的成员变量。我们使用 :code:`Weight` 类作为参数的抽象,它支持多线程更新。该类的实现细节在“实现细节”中详细介绍。 + +- :code:`weights_` 是存有一系列变换矩阵的权重。在当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。每个权重对应一个输入。 +- :code:`biases_` 是存有偏置向量的权重。 + +全连接层没有网络层配置的超参数。如果一个网络层需要配置的话,通常的做法是将配置存于 :code:`LayerConfig& config` 中,并在类构建函数中把它放入一个类成员变量里。 + +下面的代码片段实现了 :code:`init` 函数。 + +- 首先,所有的 :code:`init` 函数必须先调用基类中的函数 :code:`Layer::init(layerMap, parameterMap);` 。该语句会为每个层初始化其所需要的变量和连接。 +- 之后初始化所有的权重矩阵 :math:`W` 。当前的实现方式下,网络层可以有多个输入。因此,它可能有不止一个权重。 +- 最后,初始化偏置向量。 + + +.. code-block:: c++ + + bool FullyConnectedLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* 初始化父类 */ + Layer::init(layerMap, parameterMap); + + /* 初始化权重表 */ + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + // 获得参数尺寸 + size_t height = inputLayers_[i]->getSize(); + size_t width = getSize(); + + // 新建一个权重 + if (parameters_[i]->isSparse()) { + CHECK_LE(parameters_[i]->getSize(), width * height); + } else { + CHECK_EQ(parameters_[i]->getSize(), width * height); + } + Weight* w = new Weight(height, width, parameters_[i]); + + // 将新建的权重加入权重表 + weights_.emplace_back(w); + } + + /* 初始化biases_ */ + if (biasParameter_.get() != NULL) { + biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + } + + return true; + } + +实现前向传播的部分有下面几个步骤。 + +- 每个层在其 :code:`forward` 函数的开头必须调用 :code:`Layer::forward(passType);` 。 +- 之后使用 :code:`reserveOutput(batchSize, size);` 为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。 :code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。 +- 之后使用矩阵运算函数来计算 :math:`\sum_i W_i x + b`。:code:`getInput(i).value` 返回第i个输入矩阵。每个输入都是一个 :math:`batchSize \times dim` 的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考 :code:`paddle/math/Matrix.h`和:code:`paddle/math/BaseMatrix.h` 。 +- 最终,使用 :code:`forwardActivation();` 进行激活操作。这会自动进行网络配置中声明的激活操作。 + + +.. code-block:: c++ + + void FullyConnectedLayer::forward(PassType passType) { + Layer::forward(passType); + + /* 若有必要,为output_申请内存 */ + int batchSize = getInput(0).getBatchSize(); + int size = getSize(); + + { + // 设置输出的尺寸 + reserveOutput(batchSize, size); + } + + MatrixPtr outV = getOutputValue(); + + // 对每个输入乘上变换矩阵 + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto input = getInput(i); + CHECK(input.value) << "The input of 'fc' layer must be matrix"; + i == 0 ? outV->mul(input.value, weights_[i]->getW(), 1, 0) + : outV->mul(input.value, weights_[i]->getW(), 1, 1); + } + + /* 加上偏置向量 */ + if (biases_.get() != NULL) { + outV->addBias(*(biases_->getW()), 1); + } + + /* 激活 */ { + forwardActivation(); + } + } + +实现后向传播的部分有下面几个步骤。 + +- :code:`backwardActivation()` 计算激活函数的梯度。通过 :code:`getOutputGrad()` 来获得输出的梯度,调用该函数后,梯度会就地(不使用额外空间)乘上输出的梯度。 +- 计算偏置的梯度。注意,我们使用 :code:`biases_->getWGrad()` 来得到某个特定参数的梯度矩阵。在一个参数的梯度被更新后,**必须**要调用 :code:`getParameterPtr()->incUpdate(callback);` 。这用于在多线程和多机上更新参数。 +- 最后,计算转换矩阵和输入的梯度,并对相应的参数调用 :code:`incUpdate` 。PaddlePaddle可以通过该机制判断是否已经收集齐所有的梯度,从而可以做一些与计算重叠的工作(例如,网络通信)。 + + +.. code-block:: c++ + + void FullyConnectedLayer::backward(const UpdateCallback& callback) { + /* 对激活求导 */ { + backwardActivation(); + } + + if (biases_ && biases_->getWGrad()) { + biases_->getWGrad()->collectBias(*getOutputGrad(), 1); + + biases_->getParameterPtr()->incUpdate(callback); + } + + bool syncFlag = hl_get_sync_flag(); + + for (size_t i = 0; i != inputLayers_.size(); ++i) { + /* 计算当前层权重的梯度 */ + if (weights_[i]->getWGrad()) { + MatrixPtr input_T = getInputValue(i)->getTranspose(); + MatrixPtr oGrad = getOutputGrad(); + { + weights_[i]->getWGrad()->mul(input_T, oGrad, 1, 1); + } + } + + + /* 计算输入层的偏差 */ + MatrixPtr preGrad = getInputGrad(i); + if (NULL != preGrad) { + MatrixPtr weights_T = weights_[i]->getW()->getTranspose(); + preGrad->mul(getOutputGrad(), weights_T, 1, 1); + } + + { + weights_[i]->getParameterPtr()->incUpdate(callback); + } + } + } + + :code:`prefetch` 函数指出了在训练时需要从参数服务器取出的行。仅在远程稀疏训练时有效。使用远程稀疏方式训练时,完整的参数矩阵被分布在不同的参数服务器上。当网络层用一个批次做训练时,该批次的输入中仅有一个子集是非零的。因此,该层仅需要这些非零样本位置所对应的变换矩阵的那些行。 :code:`prefetch` 表明了这些行的标号。 + +大多数层不需要远程稀疏训练函数。这种情况下不需要重写该函数。 + +.. code-block:: c++ + + void FullyConnectedLayer::prefetch() { + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto* sparseParam = + dynamic_cast(weights_[i]->getW().get()); + if (sparseParam) { + MatrixPtr input = getInputValue(i); + sparseParam->addRows(input); + } + } + } + +最后,使用 :code:`REGISTER_LAYER(fc, FullyConnectedLayer);` 来注册该层。 :code:`fc` 是该层的标识符, :code:`FullyConnectedLayer` 是该层的类名。 + +.. code-block:: c++ + + namespace paddle { + REGISTER_LAYER(fc, FullyConnectedLayer); + } + +若 :code:`cpp` 被放在 :code:`paddle/gserver/layers` 目录下,其会自动被加入编译列表。 + + +写梯度检查单元测试 +=============================== + +写梯度检查单元测试是一个验证新实现的层是否正确的相对简单的办法。梯度检查单元测试通过有限差分法来验证一个层的梯度。首先对输入做一个小的扰动 :math:`\Delta x` ,然后观察到输出的变化为 :math:`\Delta y` ,那么,梯度就可以通过这个方程计算得到 :math:`\frac{\Delta y}{\Delta x }` 。之后,再用这个梯度去和 :code:`backward` 函数得到的梯度去对比,以保证梯度计算的正确性。需要注意的是梯度检查仅仅验证了梯度的计算,并不保证 :code:`forward` 和 :code:`backward` 函数的实现是正确的。你需要一些更复杂的单元测试来保证你实现的网络层是正确的。 + +所有网络层的梯度检查单测都位于 :code:`paddle/gserver/tests/test_LayerGrad.cpp` 。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: + ++ 生成网络层配置。网络层配置包含以下几项: + - 偏置参数的大小。(例子中是4096) + - 层的类型。(例子中是fc) + - 层的大小。(例子中是4096) + - 激活的类型。(例子中是softmax) + - dropout的比例。(例子中是0.1) ++ 配置网络层的输入。在这个例子里,我们仅有一个输入。 + - 输入的类型( :code:`INPUT_DATA` ),可以是以下几种: + - :code:`INPUT_DATA` :稠密向量。 + - :code:`INPUT_LABEL` :整数。 + - :code:`INPUT_DATA_TARGET` :稠密向量,但不用于计算梯度。 + - :code:`INPUT_SEQUENCE_DATA` :含有序列信息的稠密向量。 + - :code:`INPUT_HASSUB_SEQUENCE_DATA` :含有序列信息和子序列信息的稠密向量。 + - :code:`INPUT_SEQUENCE_LABEL` :含有序列信息的整数。 + - :code:`INPUT_SPARSE_NON_VALUE_DATA` :0-1稀疏数据。 + - :code:`INPUT_SPARSE_FLOAT_VALUE_DATA` :浮点稀疏数据。 + - 输入的名字。(例子中是 :code:`layer_0` ) + - 输入的大小。(例子中是8192) + - 非零数字的个数,仅对稀疏数据有效。 + - 稀疏数据的格式,仅对稀疏数据有效。 ++ 对每个输入,都需要调用一次 :code:`config.layerConfig.add_inputs();` 。 ++ 调用 :code:`testLayerGrad` 来做梯度检查。它包含以下参数。 + - 层和输入的配置。(例子中是 :code:`config` ) + - 网络层的类型。(例子中是 :code:`fc` ) + - 梯度检查的输入数据的批次大小。(例子中是100) + - 输入是否是转置的。大多数层需要设置为 :code:`false` 。(例子中是 :code:`false` ) + - 是否使用权重。有些层或者激活需要做归一化以保证它们的输出的和是一个常数。例如,softmax激活的输出的和总是1。在这种情况下,我们不能通过常规的梯度检查的方式来计算梯度。因此我们采用输出的加权和(非常数)来计算梯度。(例子中是 :code:`true` ,因为全连接层的激活可以是softmax) + +.. code-block:: c++ + + void testFcLayer(string format, size_t nnz) { + // Create layer configuration. + TestConfig config; + config.biasSize = 4096; + config.layerConfig.set_type("fc"); + config.layerConfig.set_size(4096); + config.layerConfig.set_active_type("softmax"); + config.layerConfig.set_drop_rate(0.1); + // Setup inputs. + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)}); + config.layerConfig.add_inputs(); + LOG(INFO) << config.inputDefs[0].sparse.sparse << " " + << config.inputDefs[0].sparse.format; + for (auto useGpu : {false, true}) { + testLayerGrad(config, "fc", 100, /* trans */ false, useGpu, + /* weight */ true); + } + } + +如果你要为了测试而增加新的文件,例如 :code:`paddle/gserver/tests/testFCGrad.cpp` ,你需要把该文件加入 :code:`paddle/gserver/tests/CMakeLists.txt` 中。下面给出了一个例子。当你执行命令 :code:`make tests` 时,所有的单测都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单测正确执行。你需要在配置cmake时将 :code:`WITH_DOUBLE` 设置为 `ON` 。 + +.. code-block:: bash + + add_unittest_without_exec(test_FCGrad + test_FCGrad.cpp + LayerGradUtil.cpp + TestUtil.cpp) + + add_test(NAME test_FCGrad + COMMAND test_FCGrad) + + +实现python封装 +======================== + +python封装的实现使得我们可以在配置文件中使用新实现的网络层。所有的python封装都在 :code:`python/paddle/trainer/config_parser.py` 中。全连接层python封装的例子中包含下面几步: + +- 所有的Python封装都使用 :code:`@config_layer('fc')` 这样的装饰器。网络层的标识符为 :code:`fc` 。 +- 实现构造函数 :code:`__init__` 。 + - 它首先调用基构造函数 :code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)` 。 :code:`FCLayer` 是Python封装的类名。 :code:`fc` 是网络层的标识符。为了封装能够正确工作,这些名字必须要写对。 + - 之后,计算变换矩阵的大小和格式(是否稀疏)。 + +.. code-block:: python + + @config_layer('fc') + class FCLayer(LayerBase): + def __init__( + self, + name, + size, + inputs, + bias=True, + **xargs): + super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs) + for input_index in xrange(len(self.inputs)): + input_layer = self.get_input_layer(input_index) + psize = self.config.size * input_layer.size + dims = [input_layer.size, self.config.size] + format = self.inputs[input_index].format + sparse = format == "csr" or format == "csc" + if sparse: + psize = self.inputs[input_index].nnz + self.create_input_parameter(input_index, psize, dims, sparse, format) + self.create_bias_parameter(bias, self.config.size) + +在网络配置中,网络层的细节可以通过下面这些代码片段来指定。这个类的参数包括: + +- :code:`name` 是网络层实例的名字标识符。 +- :code:`type` 是网络层的类型,通过网络层的标识符来指定。 +- :code:`size` 是网络层输出的大小。 +- :code:`bias` 表明这个层的一个实例是否需要偏置。 +- :code:`inputs` 说明这个层的输入,输入是由一个list中的网络层实例的名字组成的。 + +.. code-block:: python + + Layer( + name = "fc1", + type = "fc", + size = 64, + bias = True, + inputs = [Input("pool3")] + ) + +我们建议你为你的Python封装实现一个“助手”,使得搭模型时更方便。具体可以参考 :code:`python/paddle/trainer_config_helpers/layers.py` 。 From 41d1765db88de67039ea3226a812b4f207673ac0 Mon Sep 17 00:00:00 2001 From: Zhizhong Su Date: Wed, 4 Jan 2017 13:35:01 +0000 Subject: [PATCH 23/51] a missing character in line 32 --- doc/howto/dev/new_layer_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/dev/new_layer_cn.rst b/doc/howto/dev/new_layer_cn.rst index 897a8be5b3..9489a921c7 100644 --- a/doc/howto/dev/new_layer_cn.rst +++ b/doc/howto/dev/new_layer_cn.rst @@ -29,7 +29,7 @@ 其中 :math:`f(.)` 是一个非线性的*激活方程*,例如sigmoid, tanh,以及Relu。 -变换矩阵 :math:`W` 和偏置向量 :math:`b` 是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传根据输出的梯度,分别计算每个参数的梯度,以及输入的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 +变换矩阵 :math:`W` 和偏置向量 :math:`b` 是该网络层的*参数*。一个网络层的参数是在*反向传播*时被训练的。反向传播根据输出的梯度,分别计算每个参数的梯度,以及输入的梯度。优化器则用链式法则来对每个参数计算损失函数的梯度。 假设损失函数是 :math:`c(y)` ,那么 From 8acd1ac31a741558725bebe1c97fcce7e28bdef5 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 00:23:23 +0800 Subject: [PATCH 24/51] Add extern python interp --- paddle/api/CMakeLists.txt | 32 ++++++++++++++++++- paddle/api/test/CMakeLists.txt | 2 +- paddle/api/test/run_tests.sh | 10 +++--- paddle/scripts/travis/build_and_test.sh | 9 ++---- paddle/setup.py.in | 2 ++ paddle/trainer/tests/CMakeLists.txt | 5 +-- paddle/trainer/tests/test_Trainer.cpp | 5 --- paddle/utils/.gitignore | 1 + paddle/utils/CMakeLists.txt | 2 ++ .../{PythonUtil.cpp => PythonUtil.cpp.in} | 2 ++ proto/CMakeLists.txt | 4 +-- python/CMakeLists.txt | 6 +--- .../tests/CMakeLists.txt | 8 ++--- .../tests/configs/generate_protostr.sh | 8 ++--- .../tests/configs/run_tests.sh | 2 +- 15 files changed, 62 insertions(+), 36 deletions(-) rename paddle/utils/{PythonUtil.cpp => PythonUtil.cpp.in} (98%) diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index da6dad10cd..dd617e3268 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -1,3 +1,21 @@ +FUNCTION(generate_python_api target_name) + ADD_CUSTOM_COMMAND(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + ${PROJ_ROOT}/paddle/Paddle_wrap.cxx + ${PROJ_ROOT}/paddle/Paddle_wrap.h + COMMAND ${SWIG_EXECUTABLE} -python -c++ -outcurrentdir -I../ api/Paddle.swig + && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig + ${PROJ_ROOT}/paddle/api/PaddleAPI.h + ${external_project_dependencies} + WORKING_DIRECTORY ${PROJ_ROOT}/paddle + COMMENT "Generate Python API from swig") + ADD_CUSTOM_TARGET(${target_name} ALL DEPENDS + ${PROJ_ROOT}/paddle/Paddle_wrap.cxx + ${PROJ_ROOT}/paddle/Paddle_wrap.h + ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py + ${external_project_dependencies}) +ENDFUNCTION(generate_python_api) + set(API_SOURCES Arguments.cpp ConfigParser.cpp @@ -42,7 +60,7 @@ file(GLOB PY_PADDLE_PYTHON_FILES ${PROJ_ROOT}/paddle/py_paddle/*.py) # TODO(yuyang18) : make wheel name calculated by cmake add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp - COMMAND ${PYTHON_EXECUTABLE} setup.py bdist_wheel + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch dist/.timestamp COMMAND rm -rf py_paddle.egg-info build WORKING_DIRECTORY ${PROJ_ROOT}/paddle @@ -76,5 +94,17 @@ add_dependencies(python_api_wheel python_swig_sources paddle_cuda) if(WITH_TESTING) + SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) + ExternalProject_Add(pip + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/pypa/pip.git + GIT_TAG 9.0.1 + PREFIX ${PIP_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools python_api_wheel + ) add_subdirectory(test) endif() diff --git a/paddle/api/test/CMakeLists.txt b/paddle/api/test/CMakeLists.txt index 08a0fe96a0..985df6f56e 100644 --- a/paddle/api/test/CMakeLists.txt +++ b/paddle/api/test/CMakeLists.txt @@ -1,2 +1,2 @@ add_test(NAME test_swig_api - COMMAND bash ${PROJ_ROOT}/paddle/api/test/run_tests.sh) + COMMAND bash ${PROJ_ROOT}/paddle/api/test/run_tests.sh ${PYTHON_EXECUTABLE} ${PYTHON_INSTALL_DIR}/bin/pip) diff --git a/paddle/api/test/run_tests.sh b/paddle/api/test/run_tests.sh index 2f12ba0264..f00ec2c967 100755 --- a/paddle/api/test/run_tests.sh +++ b/paddle/api/test/run_tests.sh @@ -20,11 +20,11 @@ popd > /dev/null cd $SCRIPTPATH -rm -rf .test_env -virtualenv .test_env -source .test_env/bin/activate +# rm -rf .test_env +# virtualenv .test_env +# source .test_env/bin/activate -pip --timeout 600 install ../../dist/*.whl +$1 -m pip install ../../dist/*.whl test_list="testArguments.py testGradientMachine.py testMatrix.py testVector.py testTrain.py testTrainer.py" @@ -33,7 +33,7 @@ export PYTHONPATH=$PWD/../../../python/ for fn in $test_list do echo "test $fn" - python $fn + $1 $fn if [ $? -ne 0 ]; then exit 1 fi diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index 9caeb21beb..fb21712188 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -1,15 +1,13 @@ #!/bin/bash -./build_submodules.sh source ./common.sh -CMAKE_EXTRA="" + if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then - CMAKE_EXTRA="-DPYTHON_LIBRARY=/usr/local/Cellar/python/2.7.12_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/config/libpython2.7.dylib" + CMAKE_EXTRA="-DWITH_SWIG_PY=OFF" else CMAKE_EXTRA="-DWITH_SWIG_PY=ON" fi - -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON -DON_COVERALLS=ON ${CMAKE_EXTRA} +cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON -DON_COVERALLS=ON ${CMAKE_EXTRA} NPROC=1 if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then @@ -22,6 +20,5 @@ elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" fi - sudo make install sudo paddle version diff --git a/paddle/setup.py.in b/paddle/setup.py.in index 464ad63286..e3650bf1c0 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -14,7 +14,9 @@ # This file is used to build paddle python binding package. # It will be invoked by Makefile that generated by COMAKE + from setuptools import setup, Extension + import numpy as np import api.paddle_ld_flags import platform diff --git a/paddle/trainer/tests/CMakeLists.txt b/paddle/trainer/tests/CMakeLists.txt index 28c3d6f263..22e07bd0e9 100644 --- a/paddle/trainer/tests/CMakeLists.txt +++ b/paddle/trainer/tests/CMakeLists.txt @@ -17,9 +17,10 @@ add_test(NAME test_Compare ################# test_Trainer ########################### add_unittest_without_exec(test_Trainer test_Trainer.cpp) -set(diy_dll_dir ${CMAKE_CURRENT_BINARY_DIR}/../../gserver/tests) add_test(NAME test_Trainer COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/paddle/trainer/tests/gen_proto_data.py && + ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ ${CMAKE_CURRENT_BINARY_DIR}/test_Trainer WORKING_DIRECTORY ${PROJ_ROOT}/paddle/) @@ -82,5 +83,5 @@ add_test(NAME test_PyDataProviderWrapper #################### test_config_parser ######################### add_test(NAME test_config_parser COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - python ${PROJ_ROOT}/paddle/trainer/tests/config_parser_test.py + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/paddle/trainer/tests/config_parser_test.py WORKING_DIRECTORY ${PROJ_ROOT}/paddle/) diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/trainer/tests/test_Trainer.cpp index 371282dd6b..264bc46ebc 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/trainer/tests/test_Trainer.cpp @@ -96,11 +96,6 @@ TEST(checkGradient, multi) { TEST(checkGradient, hsigmoid) { checkGradientTest(configFile2, false, false); } TEST(checkGradient, chunk) { -#if defined(__APPLE__) || defined(__OSX__) - EXPECT_EQ(0, system("python trainer/tests/gen_proto_data.py")); -#else - EXPECT_EQ(0, system("python2 trainer/tests/gen_proto_data.py")); -#endif checkGradientTest(configFile3, false, false); #ifndef PADDLE_ONLY_CPU checkGradientTest(configFile3, true, true); diff --git a/paddle/utils/.gitignore b/paddle/utils/.gitignore index f2cfd74094..956b606a18 100644 --- a/paddle/utils/.gitignore +++ b/paddle/utils/.gitignore @@ -1 +1,2 @@ enable_virtualenv.c +PythonUtil.cpp diff --git a/paddle/utils/CMakeLists.txt b/paddle/utils/CMakeLists.txt index 45240b5002..10d906ee16 100644 --- a/paddle/utils/CMakeLists.txt +++ b/paddle/utils/CMakeLists.txt @@ -1,5 +1,7 @@ # The utilities for paddle +configure_file(PythonUtil.cpp.in ${PROJ_ROOT}/paddle/utils/PythonUtil.cpp) + file(GLOB UTIL_HEADERS . *.h) file(GLOB UTIL_SOURCES . *.cpp) create_resources(enable_virtualenv.py enable_virtualenv.c) diff --git a/paddle/utils/PythonUtil.cpp b/paddle/utils/PythonUtil.cpp.in similarity index 98% rename from paddle/utils/PythonUtil.cpp rename to paddle/utils/PythonUtil.cpp.in index 7faeff55c2..e0caaf4cd6 100644 --- a/paddle/utils/PythonUtil.cpp +++ b/paddle/utils/PythonUtil.cpp.in @@ -195,6 +195,8 @@ extern const char enable_virtualenv_py[]; } void initPython(int argc, char** argv) { #ifndef PADDLE_NO_PYTHON + char PythonHome[] = "@PYTHON_INSTALL_DIR@"; // NOLINT + Py_SetPythonHome(PythonHome); Py_SetProgramName(argv[0]); Py_Initialize(); PySys_SetArgv(argc, argv); diff --git a/proto/CMakeLists.txt b/proto/CMakeLists.txt index c4e170b10f..e854b2b427 100644 --- a/proto/CMakeLists.txt +++ b/proto/CMakeLists.txt @@ -18,7 +18,7 @@ foreach(filename ${proto_filenames}) ${PROTO_GEN} ${CUR_PROTO_GEN}) add_custom_command(OUTPUT ${CUR_PROTO_GEN} - COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + COMMAND env ${py_env} ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${CMAKE_CURRENT_BINARY_DIR} --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} DEPENDS ${filename} ${external_project_dependencies}) @@ -29,7 +29,7 @@ foreach(filename ${proto_filenames}) ${CUR_PROTO_GEN_PY} ${PROTO_GEN_PY}) add_custom_command(OUTPUT ${CUR_PROTO_GEN_PY} - COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${PROJ_ROOT}/python/paddle/proto + COMMAND env ${py_env} ${PROTOBUF_PROTOC_EXECUTABLE} --python_out ${PROJ_ROOT}/python/paddle/proto --proto_path ${PROJ_ROOT}/proto ${PROJ_ROOT}/proto/${filename} DEPENDS ${filename} ${external_project_dependencies}) endforeach() diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 6b80e4d58e..0a3599a47a 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -14,17 +14,13 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp - COMMAND ${PYTHON_EXECUTABLE} setup.py bdist_wheel + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT_DIR}/.timestamp DEPENDS gen_proto_py ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS ${OUTPUT_DIR}/.timestamp) -find_python_module(pip REQUIRED) -find_python_module(wheel REQUIRED) -find_python_module(google.protobuf REQUIRED) - add_subdirectory(paddle/trainer_config_helpers/tests) install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dist/ diff --git a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt index d1a9843d32..403aafabe9 100644 --- a/python/paddle/trainer_config_helpers/tests/CMakeLists.txt +++ b/python/paddle/trainer_config_helpers/tests/CMakeLists.txt @@ -1,12 +1,12 @@ #################### test_config_parser ######################### add_test(NAME layers_test COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - python ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/layers_test.py + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/layers_test.py WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle) add_test(NAME test_reset_hook COMMAND ${PROJ_ROOT}/paddle/.set_python_path.sh -d ${PROJ_ROOT}/python/ - python ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py + ${PYTHON_EXECUTABLE} ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/test_reset_hook.py WORKING_DIRECTORY ${PROJ_ROOT}/python/paddle) if (PROTOBUF_3) @@ -14,12 +14,12 @@ if (PROTOBUF_3) ProtobufEqualMain.cpp) add_test(NAME test_layerHelpers COMMAND - ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh + ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/protobuf_equal ) else() add_test(NAME test_layerHelpers COMMAND - ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh + ${PROJ_ROOT}/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh ${PYTHON_EXECUTABLE} ) endif() diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index a54af94ce3..ee5961af75 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -10,13 +10,13 @@ protostr=$PWD/protostr for conf in ${configs[*]} do echo "Generating " $conf - python -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unittest - cat ${conf}.py |python test_config_parser_for_non_file_config.py > $protostr/$conf.protostr.non_file_config.unittest + $1 -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unittest + cat ${conf}.py |$1 test_config_parser_for_non_file_config.py > $protostr/$conf.protostr.non_file_config.unittest done for conf in ${whole_configs[*]} do echo "Generating " $conf - python -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unittest - cat ${conf}.py |python test_config_parser_for_non_file_config.py --whole > $protostr/$conf.protostr.non_file_config.unittest + $1 -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unittest + cat ${conf}.py |$1 test_config_parser_for_non_file_config.py --whole > $protostr/$conf.protostr.non_file_config.unittest done diff --git a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh index e984ee7062..a37eb6439e 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh @@ -7,7 +7,7 @@ protostr=`dirname $0`/protostr files=`ls $protostr | grep -v "unittest"` -./generate_protostr.sh +./generate_protostr.sh $1 . ./file_list.sh From 9e7f2b8de830335cb2d645cf993c8dd82b6450cc Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 00:24:08 +0800 Subject: [PATCH 25/51] Add system configure --- cmake/configure.cmake | 64 +++++++++++++++++++++++++++++++++++++++++++ cmake/system.cmake | 53 +++++++++++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 cmake/configure.cmake create mode 100644 cmake/system.cmake diff --git a/cmake/configure.cmake b/cmake/configure.cmake new file mode 100644 index 0000000000..ae0ec01d94 --- /dev/null +++ b/cmake/configure.cmake @@ -0,0 +1,64 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if(WITH_DSO) + add_definitions(-DPADDLE_USE_DSO) +endif(WITH_DSO) + +if(WITH_DOUBLE) + add_definitions(-DPADDLE_TYPE_DOUBLE) +endif(WITH_DOUBLE) + +if(NOT WITH_TIMER) + add_definitions(-DPADDLE_DISABLE_TIMER) +endif(NOT WITH_TIMER) + +if(NOT WITH_PROFILER) + add_definitions(-DPADDLE_DISABLE_PROFILER) +endif(NOT WITH_PROFILER) + +if(NOT WITH_GPU) + add_definitions(-DPADDLE_ONLY_CPU) + add_definitions(-DHPPL_STUB_FUNC) + + list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) +else() + FIND_PACKAGE(CUDA REQUIRED) + + if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) + message(FATAL_ERROR "Paddle need CUDA >= 7.0 to compile") + endif() + + if(NOT CUDNN_FOUND) + message(FATAL_ERROR "Paddle need cudnn to compile") + endif() + + if(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") + else(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") + endif(WITH_AVX) + + # Include cuda and cudnn + include_directories(${CUDNN_INCLUDE_DIR}) + include_directories(${CUDA_TOOLKIT_INCLUDE}) +endif(NOT WITH_GPU) + +if(WITH_AVX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") +else(WITH_AVX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") +endif(WITH_AVX) diff --git a/cmake/system.cmake b/cmake/system.cmake new file mode 100644 index 0000000000..788db404eb --- /dev/null +++ b/cmake/system.cmake @@ -0,0 +1,53 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +IF(WIN32) + SET(HOST_SYSTEM "win32") +ELSE(WIN32) + IF(APPLE) + EXEC_PROGRAM (sw_vers ARGS -productVersion OUTPUT_VARIABLE MACOSX_VERSION) + STRING(REGEX MATCH "[0-9]+.[0-9]+" VERSION "${MACOSX_VERSION}") + SET(MACOS_VERSION ${VERSION}) + SET(HOST_SYSTEM "macosx") + ELSE(APPLE) + IF(EXISTS "/etc/issue") + FILE(READ "/etc/issue" LINUX_ISSUE) + IF(LINUX_ISSUE MATCHES "CentOS") + SET(HOST_SYSTEM "centos") + ELSEIF(LINUX_ISSUE MATCHES "Debian") + SET(HOST_SYSTEM "debian") + ELSEIF(LINUX_ISSUE MATCHES "Ubuntu") + SET(HOST_SYSTEM "ubuntu") + ENDIF() + ENDIF(EXISTS "/etc/issue") + ENDIF(APPLE) +ENDIF(WIN32) + +# query number of logical cores +CMAKE_HOST_SYSTEM_INFORMATION(RESULT CPU_CORES QUERY NUMBER_OF_LOGICAL_CORES) + +MARK_AS_ADVANCED(HOST_SYSTEM CPU_CORES) + +MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}") +MESSAGE(STATUS "Found Paddle host system's CPU: ${CPU_CORES} cores") + +# external dependencies log output +SET(EXTERNAL_PROJECT_LOG_ARGS + LOG_DOWNLOAD 0 # Wrap download in script to log output + LOG_UPDATE 1 # Wrap update in script to log output + LOG_CONFIGURE 1 # Wrap configure in script to log output + LOG_BUILD 1 # Wrap build in script to log output + LOG_TEST 1 # Wrap test in script to log output + LOG_INSTALL 1 # Wrap install in script to log output +) From e2d0e09a8c92de7faf7d36bfcc0bdbeeda9184e0 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 00:25:04 +0800 Subject: [PATCH 26/51] Update external dependencies --- cmake/external/gflags.cmake | 23 ++-- cmake/external/glog.cmake | 24 ++-- cmake/external/gtest.cmake | 64 ++++----- cmake/external/numpy.cmake | 78 ----------- cmake/external/openblas.cmake | 50 +++---- cmake/external/protobuf.cmake | 51 +++---- cmake/external/python.cmake | 242 ++++++++++++++++++++++++++-------- cmake/external/swig.cmake | 124 +++++++---------- cmake/external/warpctc.cmake | 40 +++--- cmake/external/zlib.cmake | 27 ++-- 10 files changed, 379 insertions(+), 344 deletions(-) delete mode 100644 cmake/external/numpy.cmake diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 55f9a4c3e6..d38b7d1ba2 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -15,26 +15,25 @@ INCLUDE(ExternalProject) SET(GFLAGS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gflags) -SET(GFLAGS_INSTALL_DIR ${PROJECT_BINARY_DIR}/gflags) +SET(GFLAGS_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/gflags) +SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE) +IF(WIN32) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) +ELSE(WIN32) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) +ENDIF(WIN32) + +INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR}) ExternalProject_Add( gflags + ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/gflags/gflags.git" PREFIX ${GFLAGS_SOURCES_DIR} + UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR} CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DBUILD_TESTING=OFF - LOG_DOWNLOAD =ON - UPDATE_COMMAND "" ) -SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE) -INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR}) - -IF(WIN32) - set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) -ELSE(WIN32) - set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) -ENDIF(WIN32) - LIST(APPEND external_project_dependencies gflags) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 473071a72a..bec69f3ddf 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -15,27 +15,27 @@ INCLUDE(ExternalProject) SET(GLOG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/glog) -SET(GLOG_INSTALL_DIR ${PROJECT_BINARY_DIR}/glog) +SET(GLOG_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/glog) +SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE) + +IF(WIN32) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) +ELSE(WIN32) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) +ENDIF(WIN32) + +INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR}) ExternalProject_Add( glog + ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/google/glog.git" PREFIX ${GLOG_SOURCES_DIR} + UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR} CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DWITH_GFLAGS=OFF CMAKE_ARGS -DBUILD_TESTING=OFF - LOG_DOWNLOAD =ON - UPDATE_COMMAND "" ) -SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE) -INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR}) - -IF(WIN32) - SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) -ELSE(WIN32) - SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) -ENDIF(WIN32) - LIST(APPEND external_project_dependencies glog) diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index a6ed9e9b9f..2fcb7893fa 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -12,38 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -INCLUDE(ExternalProject) +IF(WITH_TESTING) + ENABLE_TESTING() + INCLUDE(ExternalProject) -SET(GTEST_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gtest) -SET(GTEST_INSTALL_DIR ${PROJECT_BINARY_DIR}/gtest) + SET(GTEST_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gtest) + SET(GTEST_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/gtest) + SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE) -ExternalProject_Add( - gtest - GIT_REPOSITORY "https://github.com/google/googletest.git" - GIT_TAG "release-1.8.0" - PREFIX ${GTEST_SOURCES_DIR} - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} - CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON - CMAKE_ARGS -DBUILD_GMOCK=ON - CMAKE_ARGS -Dgtest_disable_pthreads=ON - CMAKE_ARGS -Dgtest_force_shared_crt=ON - LOG_DOWNLOAD =ON - UPDATE_COMMAND "" -) + INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) -SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE) -INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) + IF(WIN32) + set(GTEST_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/gtest.lib" CACHE FILEPATH "gtest libraries." FORCE) + set(GTEST_MAIN_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest main libraries." FORCE) + ELSE(WIN32) + set(GTEST_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/libgtest.a" CACHE FILEPATH "gtest libraries." FORCE) + set(GTEST_MAIN_LIBRARIES + "${GTEST_INSTALL_DIR}/lib/libgtest_main.a" CACHE FILEPATH "gtest main libraries." FORCE) + ENDIF(WIN32) -IF(WIN32) - set(GTEST_LIBRARIES - "${GTEST_INSTALL_DIR}/lib/gtest.lib" - "${GTEST_INSTALL_DIR}/lib/gtest_main.lib" CACHE FILEPATH "gtest libraries." FORCE) -ELSE(WIN32) - set(GTEST_LIBRARIES - "${GTEST_INSTALL_DIR}/lib/libgtest.a" - "${GTEST_INSTALL_DIR}/lib/libgtest_main.a" CACHE FILEPATH "gtest libraries." FORCE) -ENDIF(WIN32) - -ENABLE_TESTING() - -LIST(APPEND external_project_dependencies gtest) + ExternalProject_Add( + gtest + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/google/googletest.git" + GIT_TAG "release-1.8.0" + PREFIX ${GTEST_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR} + CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON + CMAKE_ARGS -DBUILD_GMOCK=ON + CMAKE_ARGS -Dgtest_disable_pthreads=ON + CMAKE_ARGS -Dgtest_force_shared_crt=ON + ) + LIST(APPEND external_project_dependencies gtest) +ENDIF(WITH_TESTING) diff --git a/cmake/external/numpy.cmake b/cmake/external/numpy.cmake deleted file mode 100644 index d01cff9722..0000000000 --- a/cmake/external/numpy.cmake +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FIND_PACKAGE(NumPy) - -IF(NOT ${NUMPY_FOUND}) - - INCLUDE(ExternalProject) - - SET(NUMPY_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/numpy) - SET(NUMPY_INSTALL_DIR ${PROJECT_BINARY_DIR}/numpy) - set(NUMPY_VERSION "v1.11.3") - - ExternalProject_Add(setuptools - PREFIX ${PYTHON_SOURCES_DIR}/setuptools - URL http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz - URL_MD5 7df2a529a074f613b509fb44feefe74e - BUILD_IN_SOURCE 1 - UPDATE_COMMAND "" - PATCH_COMMAND "" - CONFIGURE_COMMAND "" - INSTALL_COMMAND "" - BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python zlib - ) - - ExternalProject_Add(cython - PREFIX ${PYTHON_SOURCES_DIR}/cython - GIT_REPOSITORY https://github.com/cython/cython.git - BUILD_IN_SOURCE 1 - CONFIGURE_COMMAND "" - UPDATE_COMMAND "" - PATCH_COMMAND "" - INSTALL_COMMAND "" - BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python - ) - - ExternalProject_Add(numpy - GIT_REPOSITORY https://github.com/numpy/numpy.git - GIT_TAG ${NUMPY_VERSION} - CONFIGURE_COMMAND "" - UPDATE_COMMAND "" - PREFIX ${NUMPY_SOURCES_DIR} - BUILD_COMMAND ${PYTHON_EXECUTABLE} setup.py build - INSTALL_COMMAND ${PYTHON_EXECUTABLE} setup.py install - BUILD_IN_SOURCE 1 - DEPENDS python setuptools cython - ) - - LIST(APPEND external_project_dependencies numpy) - - # find numpy include directory - FILE(WRITE ${PROJECT_BINARY_DIR}/FindNumpyPath.py - "try: import numpy; print(numpy.get_include())\nexcept:pass\n") - - EXEC_PROGRAM("${PYTHON_EXECUTABLE}" ${PROJECT_BINARY_DIR} - ARGS "FindNumpyPath.py" - OUTPUT_VARIABLE NUMPY_PATH) - - FIND_PATH(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h - HINTS "${NUMPY_PATH}" "${PYTHON_INCLUDE_PATH}") - -ENDIF() - -INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) - diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 2683153b49..677999cc9f 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -12,31 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -INCLUDE(ExternalProject) +INCLUDE(cblas) -SET(CBLAS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/openblas) -SET(CBLAS_INSTALL_DIR ${PROJECT_BINARY_DIR}/openblas) +IF(NOT ${CBLAS_FOUND}) + INCLUDE(ExternalProject) -ExternalProject_Add( - openblas - GIT_REPOSITORY "https://github.com/xianyi/OpenBLAS.git" - GIT_TAG v0.2.19 - PREFIX ${CBLAS_SOURCES_DIR} - INSTALL_DIR ${CBLAS_INSTALL_DIR} - BUILD_IN_SOURCE 1 - UPDATE_COMMAND "" - CONFIGURE_COMMAND "" - BUILD_COMMAND cd ${CBLAS_SOURCES_DIR}/src/openblas && make -j4 - INSTALL_COMMAND cd ${CBLAS_SOURCES_DIR}/src/openblas && make install PREFIX= -) + SET(CBLAS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/openblas) + SET(CBLAS_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/openblas) + SET(CBLAS_INC_DIR "${CBLAS_INSTALL_DIR}/include" CACHE PATH "openblas include directory." FORCE) -SET(CBLAS_INCLUDE_DIR "${CBLAS_INSTALL_DIR}/include" CACHE PATH "openblas include directory." FORCE) -INCLUDE_DIRECTORIES(${CBLAS_INCLUDE_DIR}) + IF(WIN32) + SET(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/openblas.lib" CACHE FILEPATH "openblas library." FORCE) + ELSE(WIN32) + SET(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/libopenblas.a" CACHE FILEPATH "openblas library" FORCE) + ENDIF(WIN32) -IF(WIN32) - set(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/openblas.lib" CACHE FILEPATH "openblas library." FORCE) -ELSE(WIN32) - set(CBLAS_LIBRARIES "${CBLAS_INSTALL_DIR}/lib/libopenblas.a" CACHE FILEPATH "openblas library" FORCE) -ENDIF(WIN32) + ExternalProject_Add( + openblas + ${EXTERNAL_PROJECT_LOG_ARGS} + URL "https://github.com/xianyi/OpenBLAS/archive/v0.2.19.tar.gz" + PREFIX ${CBLAS_SOURCES_DIR} + INSTALL_DIR ${CBLAS_INSTALL_DIR} + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND "" + BUILD_COMMAND make CC=${CMAKE_C_COMPILER} FC=${CMAKE_Fortran_COMPILER} + INSTALL_COMMAND make install PREFIX= + UPDATE_COMMAND "" + ) -LIST(APPEND external_project_dependencies openblas) + LIST(APPEND external_project_dependencies openblas) +ENDIF() + +INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index f42e42ef68..2f2769b4c6 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -15,24 +15,9 @@ INCLUDE(ExternalProject) SET(PROTOBUF_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) -SET(PROTOBUF_INSTALL_DIR ${PROJECT_BINARY_DIR}/protobuf) - -ExternalProject_Add( - protobuf - PREFIX ${PROTOBUF_SOURCES_DIR} - DEPENDS zlib - GIT_REPOSITORY "https://github.com/google/protobuf.git" -# GIT_TAG "v3.1.0" - CONFIGURE_COMMAND - ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake - -Dprotobuf_BUILD_TESTS=OFF - -DCMAKE_POSITION_INDEPENDENT_CODE=ON - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} - UPDATE_COMMAND "" -) - +SET(PROTOBUF_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/protobuf) SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) + INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) IF(WIN32) @@ -44,18 +29,34 @@ IF(WIN32) "${PROTOBUF_INSTALL_DIR}/lib/libprotoc.lib" CACHE FILEPATH "protoc library." FORCE) SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc.exe" CACHE FILEPATH "protobuf executable." FORCE) ELSE(WIN32) - FIND_PATH(PROTOBUF_LIBS_DIR libprotoc.a - ${PROTOBUF_INSTALL_DIR}/lib - ${PROTOBUF_INSTALL_DIR}/lib64 - NO_DEFAULT_PATH - ) + IF(${HOST_SYSTEM} STREQUAL "centos") + SET(LIB "lib64") + ELSE() + SET(LIB "lib") + ENDIF() SET(PROTOBUF_LITE_LIBRARY - "${PROTOBUF_LIBS_DIR}/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) + "${PROTOBUF_INSTALL_DIR}/${LIB}/libprotobuf-lite.a" CACHE FILEPATH "protobuf lite library." FORCE) SET(PROTOBUF_LIBRARY - "${PROTOBUF_LIBS_DIR}/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) + "${PROTOBUF_INSTALL_DIR}/${LIB}/libprotobuf.a" CACHE FILEPATH "protobuf library." FORCE) SET(PROTOBUF_PROTOC_LIBRARY - "${PROTOBUF_LIBS_DIR}/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) + "${PROTOBUF_INSTALL_DIR}/${LIB}/libprotoc.a" CACHE FILEPATH "protoc library." FORCE) SET(PROTOBUF_PROTOC_EXECUTABLE "${PROTOBUF_INSTALL_DIR}/bin/protoc" CACHE FILEPATH "protobuf executable." FORCE) ENDIF(WIN32) +ExternalProject_Add( + protobuf + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${PROTOBUF_SOURCES_DIR} + UPDATE_COMMAND "" + DEPENDS zlib + GIT_REPOSITORY "https://github.com/google/protobuf.git" + GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546" + CONFIGURE_COMMAND + ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake + -Dprotobuf_BUILD_TESTS=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} +) + LIST(APPEND external_project_dependencies protobuf) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index d6cdf535fe..479ec301b7 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -12,84 +12,212 @@ # See the License for the specific language governing permissions and # limitations under the License. -FIND_PACKAGE(PythonLibs 2.7) -FIND_PACKAGE(PythonInterp 2.7) +INCLUDE(ExternalProject) -IF((NOT ${PYTHONINTERP_FOUND}) OR (NOT ${PYTHONLIBS_FOUND})) - INCLUDE(ExternalProject) +##################################### PYTHON ######################################## +SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) +SET(PYTHON_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/python) +SET(_python_DIR ${PYTHON_INSTALL_DIR}) - SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/Python) - SET(PYTHON_INSTALL_DIR ${PROJECT_BINARY_DIR}/Python) +IF(UNIX) + SET(PYTHON_FOUND ON) + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/lib/libpython2.7.a" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) +ELSEIF(WIN32) + SET(PYTHON_FOUND ON) + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) +ELSE() + MESSAGE(FATAL_ERROR "Unknown system !") +ENDIF() + +SET(py_env + PATH=${PYTHON_INSTALL_DIR}/bin/:$ENV{PATH} + PYTHONHOME=${PYTHON_INSTALL_DIR} + PYTHONPATH=${PYTHON_INSTALL_DIR}/lib:${PYTHON_INSTALL_DIR}/lib/python2.7:${PY_SITE_PACKAGES_PATH}) - IF(MSVC) - LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_ARGS - PATCH_COMMAND ${CMAKE_COMMAND} - -DPYTHON_SRC_DIR:PATH=${_python_SOURCE_DIR} - -P ${CMAKE_CURRENT_LIST_DIR}/PythonPatch.cmake - ) - ENDIF() +INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) - IF(APPLE) +IF(APPLE) LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS - -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON - ) - ENDIF() + -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON + ) +ENDIF() - SET(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) +SET(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) - # Force Python build to "Release". - IF(CMAKE_CONFIGURATION_TYPES) +# Force Python build to "Release". +IF(CMAKE_CONFIGURATION_TYPES) SET(SAVED_CMAKE_CFG_INTDIR ${CMAKE_CFG_INTDIR}) SET(CMAKE_CFG_INTDIR "Release") - ELSE() +ELSE() LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS - -DCMAKE_BUILD_TYPE:STRING=Release) - ENDIF() + -DCMAKE_BUILD_TYPE:STRING=Release + ) +ENDIF() - ExternalProject_Add(python +ExternalProject_Add(python + ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/python-cmake-buildsystem/python-cmake-buildsystem.git" - GIT_TAG "ed5f9bcee540e47f82fa17f8360b820591aa6d66" PREFIX ${PYTHON_SOURCES_DIR} UPDATE_COMMAND "" + CMAKE_ARGS -DPYTHON_VERSION=2.7.12 + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} CMAKE_CACHE_ARGS - -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} - -DBUILD_SHARED:BOOL=OFF - -DBUILD_STATIC:BOOL=ON - -DUSE_SYSTEM_LIBRARIES:BOOL=OFF - -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} - -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} - -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} - -DDOWNLOAD_SOURCES:BOOL=ON - -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF - ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} - ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} + -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} + -DBUILD_LIBPYTHON_SHARED:BOOL=OFF + -DUSE_SYSTEM_LIBRARIES:BOOL=OFF + -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} + -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} + -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} + -DDOWNLOAD_SOURCES:BOOL=ON + -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} DEPENDS zlib - ) +) +#################################################################################### + +##################################### SETUPTOOLS ################################### +SET(SETUPTOOLS_SOURCES_DIR ${PYTHON_SOURCES_DIR}/setuptools) +ExternalProject_Add(setuptools + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${SETUPTOOLS_SOURCES_DIR} + URL "https://pypi.python.org/packages/source/s/setuptools/setuptools-18.3.2.tar.gz" + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python zlib +) +##################################################################################### + +##################################### SIX ########################################### +SET(SIX_SOURCES_DIR ${PYTHON_SOURCES_DIR}/six) +ExternalProject_Add(six + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${SIX_SOURCES_DIR} + URL https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python setuptools +) +##################################################################################### - SET(_python_DIR ${PYTHON_INSTALL_DIR}) +##################################### CYTHON ######################################## +SET(CYTHON_SOURCES_DIR ${PYTHON_SOURCES_DIR}/cython) +ExternalProject_Add(cython + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${CYTHON_SOURCES_DIR} + URL https://github.com/cython/cython/archive/0.25.2.tar.gz + GIT_TAG 0.25.2 + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND "" + PATCH_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python +) +#################################################################################### - IF(UNIX) - SET(_python_IMPORT_SUFFIX a) +##################################### NUMPY ######################################## +SET(NUMPY_SOURCES_DIR ${PYTHON_SOURCES_DIR}/numpy) +SET(NUMPY_TAG_VERSION "v1.11.3") +SET(NUMPY_VERSION "1.11.3") + +IF(WIN32) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}.egg") +ELSE(WIN32) IF(APPLE) - SET(_python_IMPORT_SUFFIX lib) - ENDIF() - SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) - SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/lib/libpython2.7.${_python_IMPORT_SUFFIX}" CACHE FILEPATH "Python library" FORCE) - SET(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) - SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) - ELSEIF(WIN32) - SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) - SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) - SET(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) - SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) - ELSE() - MESSAGE(FATAL_ERROR "Unknown system !") - ENDIF() + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}-${MACOS_VERSION}-x86_64.egg") + ELSE(APPLE) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux-x86_64.egg") + ENDIF(APPLE) +ENDIF(WIN32) -LIST(APPEND external_project_dependencies python) +SET(PYTHON_NUMPY_INCLUDE_DIR "${PY_SITE_PACKAGES_PATH}/${EGG_NAME}/numpy/core/include") +IF(${PYTHON_FOUND}) # local python + SET(PYTHON_NUMPY_INCLUDE_DIR + "${PY_SITE_PACKAGES_PATH}/${EGG_NAME}/numpy/core/include") +ELSE(${PYTHON_FOUND}) # global python + SET(PYTHON_NUMPY_INCLUDE_DIR "") + SET(PY_SITE_PACKAGES_DIR "") + FILE(WRITE ${PROJECT_BINARY_DIR}/FindNumpyPath.py + "try: import site; print(site.getsitepackages())\nexcept:pass\n") + EXEC_PROGRAM("env ${py_env} ${PYTHON_EXECUTABLE}" ${PROJECT_BINARY_DIR} + ARGS "FindNumpyPath.py" OUTPUT_VARIABLE NUMPY_PATH) -ENDIF() + STRING(REPLACE "[" "" NUMPY_PATH "${NUMPY_PATH}") + STRING(REPLACE "]" "" NUMPY_PATH "${NUMPY_PATH}") + STRING(REPLACE "'" "" NUMPY_PATH "${NUMPY_PATH}") + STRING(REPLACE ", " ";" SITE_DIRS "${NUMPY_PATH}") -INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) + FOREACH(SITE_DIR ${SITE_DIRS}) + IF(EXISTS ${SITE_DIR}) + LIST(APPEND PYTHON_NUMPY_INCLUDE_DIR + "${SITE_DIR}/${EGG_NAME}/numpy/core/include") + SET(PY_SITE_PACKAGES_DIR "${SITE_DIR}") + ENDIF() + ENDFOREACH() +ENDIF(${PYTHON_FOUND}) + +INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) + +ExternalProject_Add(numpy + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/numpy/numpy.git + GIT_TAG ${NUMPY_TAG_VERSION} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + PREFIX ${NUMPY_SOURCES_DIR} + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools cython +) +#################################################################################### + +##################################### WHEEL ######################################## +SET(WHEEL_SOURCES_DIR ${PYTHON_SOURCES_DIR}/wheel) +ExternalProject_Add(wheel + ${EXTERNAL_PROJECT_LOG_ARGS} + URL https://pypi.python.org/packages/source/w/wheel/wheel-0.29.0.tar.gz + PREFIX ${WHEEL_SOURCES_DIR} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools +) +#################################################################################### + +################################### PROTOBUF ####################################### +SET(PY_PROTOBUF_SOURCES_DIR ${PYTHON_SOURCES_DIR}/protobuf) +ExternalProject_Add(python-protobuf + ${EXTERNAL_PROJECT_LOG_ARGS} + URL https://pypi.python.org/packages/e0/b0/0a1b364fe8a7d177b4b7d4dca5b798500dc57a7273b93cca73931b305a6a/protobuf-3.1.0.post1.tar.gz + URL_MD5 38b5fb160c768d2f8444d0c6d637ff91 + PREFIX ${PY_PROTOBUF_SOURCES_DIR} + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND env PATH=${PROTOBUF_INSTALL_DIR}/bin:$ENV{PATH} ${py_env} ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND env PATH=${PROTOBUF_INSTALL_DIR}/bin:$ENV{PATH} ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python setuptools six +) +LIST(APPEND external_project_dependencies python setuptools six cython numpy wheel python-protobuf) diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake index 2da826d375..5460b02c37 100644 --- a/cmake/external/swig.cmake +++ b/cmake/external/swig.cmake @@ -12,83 +12,59 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Look for system swig -FIND_PACKAGE(SWIG) +# build swig as an external project +INCLUDE(ExternalProject) -IF(NOT ${SWIG_FOUND}) - # build swig as an external project - INCLUDE(ExternalProject) - SET(SWIG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/swig) - SET(SWIG_INSTALL_DIR ${PROJECT_BINARY_DIR}/swig) - SET(SWIG_TARGET_VERSION "3.0.2") - SET(SWIG_DOWNLOAD_SRC_MD5 "62f9b0d010cef36a13a010dc530d0d41") - SET(SWIG_DOWNLOAD_WIN_MD5 "3f18de4fc09ab9abb0d3be37c11fbc8f") +SET(SWIG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/swig) +SET(SWIG_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/swig) +SET(SWIG_TARGET_VERSION "3.0.2") +SET(SWIG_DOWNLOAD_SRC_MD5 "62f9b0d010cef36a13a010dc530d0d41") +SET(SWIG_DOWNLOAD_WIN_MD5 "3f18de4fc09ab9abb0d3be37c11fbc8f") - IF(WIN32) - # swig.exe available as pre-built binary on Windows: - ExternalProject_Add(swig - URL http://prdownloads.sourceforge.net/swig/swigwin-${SWIG_TARGET_VERSION}.zip - URL_MD5 ${SWIG_DOWNLOAD_WIN_MD5} - SOURCE_DIR ${SWIG_SOURCES_DIR} - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - ) - SET(SWIG_DIR ${SWIG_SOURCES_DIR} CACHE FILEPATH "SWIG Directory" FORCE) - SET(SWIG_EXECUTABLE ${SWIG_SOURCES_DIR}/swig.exe CACHE FILEPATH "SWIG Executable" FORCE) +IF(WIN32) + # swig.exe available as pre-built binary on Windows: + ExternalProject_Add(swig + URL http://prdownloads.sourceforge.net/swig/swigwin-${SWIG_TARGET_VERSION}.zip + URL_MD5 ${SWIG_DOWNLOAD_WIN_MD5} + SOURCE_DIR ${SWIG_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + UPDATE_COMMAND "" + ) + SET(SWIG_DIR ${SWIG_SOURCES_DIR} CACHE FILEPATH "SWIG Directory" FORCE) + SET(SWIG_EXECUTABLE ${SWIG_SOURCES_DIR}/swig.exe CACHE FILEPATH "SWIG Executable" FORCE) +ELSE(WIN32) + # From PCRE configure + ExternalProject_Add(pcre + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/svn2github/pcre.git + PREFIX ${SWIG_SOURCES_DIR}/pcre + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SWIG_INSTALL_DIR}/pcre + ) - ELSE(WIN32) - # From PCRE configure - ExternalProject_Add(pcre - GIT_REPOSITORY https://github.com/svn2github/pcre.git - PREFIX ${SWIG_SOURCES_DIR}/pcre - UPDATE_COMMAND "" - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SWIG_INSTALL_DIR}/pcre - ) + # swig uses bison find it by cmake and pass it down + FIND_PACKAGE(BISON) - # swig uses bison find it by cmake and pass it down - FIND_PACKAGE(BISON) + # From SWIG configure + ExternalProject_Add(swig + GIT_REPOSITORY https://github.com/swig/swig.git + GIT_TAG rel-3.0.10 + PREFIX ${SWIG_SOURCES_DIR} + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && ./autogen.sh + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && + env "PCRE_LIBS=${SWIG_INSTALL_DIR}/pcre/lib/libpcre.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcrecpp.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcreposix.a" + ./configure + --prefix=${SWIG_INSTALL_DIR} + --with-pcre-prefix=${SWIG_INSTALL_DIR}/pcre + BUILD_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make + INSTALL_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make install + UPDATE_COMMAND "" + DEPENDS pcre + ) - # From SWIG configure - ExternalProject_Add(swig - URL https://github.com/swig/swig/archive/rel-3.0.10.tar.gz - PREFIX ${SWIG_SOURCES_DIR} - UPDATE_COMMAND "" - CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && ./autogen.sh - CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && - env "PCRE_LIBS=${SWIG_INSTALL_DIR}/pcre/lib/libpcre.a \ - ${SWIG_INSTALL_DIR}/pcre/lib/libpcrecpp.a \ - ${SWIG_INSTALL_DIR}/pcre/lib/libpcreposix.a" - ./configure - --prefix=${SWIG_INSTALL_DIR} - --with-pcre-prefix=${SWIG_INSTALL_DIR}/pcre - BUILD_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make - INSTALL_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make install - DEPENDS pcre - ) + SET(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION}) + SET(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig) +ENDIF(WIN32) - set(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION} CACHE FILEPATH "SWIG Directory" FORCE) - set(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig CACHE FILEPATH "SWIG Executable" FORCE) - ENDIF(WIN32) - - LIST(APPEND external_project_dependencies swig) - -ENDIF() - -FUNCTION(generate_python_api target_name) - ADD_CUSTOM_COMMAND(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - COMMAND ${SWIG_EXECUTABLE} -python -c++ -outcurrentdir -I../ api/Paddle.swig - && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig - ${PROJ_ROOT}/paddle/api/PaddleAPI.h - ${external_project_dependencies} - WORKING_DIRECTORY ${PROJ_ROOT}/paddle - COMMENT "Generate Python API from swig") - ADD_CUSTOM_TARGET(${target_name} ALL DEPENDS - ${PROJ_ROOT}/paddle/Paddle_wrap.cxx - ${PROJ_ROOT}/paddle/Paddle_wrap.h - ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py - ${external_project_dependencies}) -ENDFUNCTION(generate_python_api) +LIST(APPEND external_project_dependencies swig) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 6a88c87df6..d90768b6f1 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -15,28 +15,13 @@ INCLUDE(ExternalProject) SET(WARPCTC_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/warpctc) -SET(WARPCTC_INSTALL_DIR ${PROJECT_BINARY_DIR}/warpctc) - -IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" ) - SET(USE_OMP ON) -ELSE() - SET(USE_OMP OFF) -ENDIF() - -ExternalProject_Add( - warpctc - GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" - PREFIX ${WARPCTC_SOURCES_DIR} - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} - CMAKE_ARGS -DWITH_GPU=${WITH_GPU} - CMAKE_ARGS -DWITH_OMP=${USE_OMP} - UPDATE_COMMAND "" -) - +SET(WARPCTC_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/warpctc) SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) + INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib" CACHE PATH "Warp-ctc Library Directory" FORCE) + IF(WIN32) SET(WARPCTC_LIBRARIES "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) @@ -51,4 +36,23 @@ ELSE(WIN32) "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.${_warpctc_SHARED_SUFFIX}" CACHE FILEPATH "Warp-ctc Library" FORCE) ENDIF(WIN32) +IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" ) + SET(USE_OMP OFF) +ELSE() + SET(USE_OMP ON) +ENDIF() + +ExternalProject_Add( + warpctc + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/gangliao/warp-ctc.git" + PREFIX ${WARPCTC_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} + CMAKE_ARGS -DWITH_GPU=${WITH_GPU} + CMAKE_ARGS -DWITH_OMP=${USE_OMP} +) + LIST(APPEND external_project_dependencies warpctc) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index ec44467aa7..916f6816aa 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -15,30 +15,29 @@ INCLUDE(ExternalProject) SET(ZLIB_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) -SET(ZLIB_INSTALL_DIR ${PROJECT_BINARY_DIR}/zlib) +SET(ZLIB_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/zlib) +SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) +SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) + +IF(WIN32) + SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/zlibstatic.lib" CACHE FILEPATH "zlib library." FORCE) +ELSE(WIN32) + set(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) +ENDIF(WIN32) + +INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) ExternalProject_Add( zlib + ${EXTERNAL_PROJECT_LOG_ARGS} GIT_REPOSITORY "https://github.com/madler/zlib.git" GIT_TAG "v1.2.8" PREFIX ${ZLIB_SOURCES_DIR} + UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR} CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON CMAKE_ARGS -DCMAKE_MACOSX_RPATH=ON - LOG_DOWNLOAD =ON - UPDATE_COMMAND "" ) -SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE PATH "zlib root directory." FORCE) - -SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) -INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) - -IF(WIN32) - SET(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/zlibstatic.lib" CACHE FILEPATH "zlib library." FORCE) -ELSE(WIN32) - set(ZLIB_LIBRARIES "${ZLIB_INSTALL_DIR}/lib/libz.a" CACHE FILEPATH "zlib library." FORCE) -ENDIF(WIN32) - LIST(APPEND external_project_dependencies zlib) From 642dc356aef25b13c08b5fc2d64c9bbe600f707d Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 00:25:46 +0800 Subject: [PATCH 27/51] clean travis ci --- .travis.yml | 9 ++------- paddle/scripts/travis/before_install.osx.sh | 4 +--- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/.travis.yml b/.travis.yml index b49d4638d7..61d31132b7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,15 +24,10 @@ addons: - wget - git - build-essential - - python + - libatlas-base-dev - python-pip - - python2.7-dev - - python-numpy - - python-wheel - curl - - lcov - graphviz - - swig - clang-format-3.8 - automake - libtool @@ -50,7 +45,7 @@ before_install: fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - - pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme pre-commit requests==2.9.2 LinkChecker + - pip install protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker script: - paddle/scripts/travis/main.sh notifications: diff --git a/paddle/scripts/travis/before_install.osx.sh b/paddle/scripts/travis/before_install.osx.sh index 89742d67f5..fd113d313e 100755 --- a/paddle/scripts/travis/before_install.osx.sh +++ b/paddle/scripts/travis/before_install.osx.sh @@ -1,6 +1,4 @@ #!/bin/bash brew update brew tap homebrew/science -brew install python -sudo pip install --upgrade protobuf -brew install cmake python wget md5sha1sum +brew install openblas md5sha1sum From 572d8254ea63a88b9c382ba2e91e53e90994e620 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 00:25:58 +0800 Subject: [PATCH 28/51] Clean cmake --- CMakeLists.txt | 26 +++++++++++----- cmake/cblas.cmake | 24 ++++++++------- cmake/check_packages.cmake | 14 --------- cmake/cpplint.cmake | 2 +- cmake/definitions.cmake | 62 -------------------------------------- cmake/util.cmake | 4 +-- paddle/cuda/CMakeLists.txt | 2 +- 7 files changed, 36 insertions(+), 98 deletions(-) delete mode 100644 cmake/check_packages.cmake delete mode 100644 cmake/definitions.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 784876f089..9ed757bd1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,18 @@ -cmake_minimum_required(VERSION 2.8) +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +cmake_minimum_required(VERSION 3.0) project(paddle CXX C) @@ -6,11 +20,11 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set(PROJ_ROOT ${CMAKE_SOURCE_DIR}) find_package(Sphinx) -find_package(Doxygen) find_package(CUDA QUIET) find_package(Git REQUIRED) find_package(Threads REQUIRED) +include(system) include(simd) ###################### Configurations ############################ @@ -35,9 +49,8 @@ include(external/gflags) # download, build, install gflags include(external/glog) # download, build, install glog include(external/gtest) # download, build, install gtest include(external/protobuf) # download, build, install protobuf -include(external/openblas) # download, build, install openblas include(external/python) # download, build, install python -include(external/numpy) # download, build, install numpy +include(external/openblas) # download, build, install openblas include(external/swig) # download, build, install swig include(external/warpctc) # download, build, install warpctc @@ -52,8 +65,7 @@ include(version) # set PADDLE_VERSION include(coveralls) # set code coverage include(python_module) # set python module -include(check_packages) # check configuration -include(definitions) # add paddle definitions +include(configure) # add paddle env configuration include_directories("${PROJ_ROOT}") include_directories("${PROJ_ROOT}/paddle/cuda/include") @@ -64,7 +76,7 @@ set(EXTERNAL_LIBS ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES} ${CBLAS_LIBRARIES} - ${PROTOBUF_LIBRARIES} + ${PROTOBUF_LIBRARY} ${ZLIB_LIBRARIES} ) diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 685334c658..4e1ae7dc81 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -13,6 +13,7 @@ # system paths. # +set(CBLAS_FOUND OFF) ## Find MKL First. set(MKL_ROOT $ENV{MKL_ROOT} CACHE PATH "Folder contains MKL") @@ -35,11 +36,12 @@ find_library(MKL_INTEL_LP64 NAMES mkl_intel_lp64 PATHS if(MKL_INCLUDE_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64) set(CBLAS_PROVIDER MKL) set(CBLAS_INC_DIR ${MKL_INCLUDE_DIR}) - set(CBLAS_LIBS ${MKL_INTEL_LP64} + set(CBLAS_LIBRARIES ${MKL_INTEL_LP64} ${MKL_SEQUENTIAL_LIB} ${MKL_CORE_LIB}) add_definitions(-DPADDLE_USE_MKL) - message(STATUS "Found MKL (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + message(STATUS "Found MKL (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + set(CBLAS_FOUND ON) return() # return file. endif() @@ -68,9 +70,10 @@ find_library(ATLAS_LIB NAMES lapack_atlas liblapack_atlas.so.3 if(ATLAS_INC_DIR AND ATLAS_CBLAS_LIB AND ATLAS_LIB) set(CBLAS_PROVIDER ATLAS) set(CBLAS_INC_DIR ${ATLAS_INC_DIR} ${ATLAS_CLAPACK_INC_DIR}) - set(CBLAS_LIBS ${ATLAS_LIB} ${ATLAS_CBLAS_LIB}) + set(CBLAS_LIBRARIES ${ATLAS_LIB} ${ATLAS_CBLAS_LIB}) add_definitions(-DPADDLE_USE_ATLAS) - message(STATUS "Found Atlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + message(STATUS "Found Atlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + set(CBLAS_FOUND ON) return() endif() @@ -98,8 +101,9 @@ find_library(OPENBLAS_LIB NAMES openblas if(OPENBLAS_INC_DIR AND OPENBLAS_LIB) set(CBLAS_PROVIDER OPENBLAS) set(CBLAS_INC_DIR ${OPENBLAS_INC_DIR}) - set(CBLAS_LIBS ${OPENBLAS_LIB}) - message(STATUS "Found OpenBlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + set(CBLAS_LIBRARIES ${OPENBLAS_LIB}) + message(STATUS "Found OpenBlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + set(CBLAS_FOUND ON) return() endif() @@ -130,9 +134,7 @@ find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS if (REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) set(CBLAS_PROVIDER REFERENCE) set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) - set(CBLAS_LIBS ${REFERENCE_CBLAS_LIBRARY}) - return() + set(CBLAS_LIBRARIES ${REFERENCE_CBLAS_LIBRARY}) + message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") + set(CBLAS_FOUND ON) endif() - -message(FATAL_ERROR "CBlas must be set. Paddle support MKL, ATLAS, OpenBlas, reference-cblas." - " Try set MKL_ROOT, ATLAS_ROOT, OPENBLAS_ROOT or REFERENCE_CBLAS_ROOT.") diff --git a/cmake/check_packages.cmake b/cmake/check_packages.cmake deleted file mode 100644 index 8f0ed26256..0000000000 --- a/cmake/check_packages.cmake +++ /dev/null @@ -1,14 +0,0 @@ -# Check package for each cmake option - -if(WITH_GPU) - find_package(CUDA REQUIRED) # CUDA is required when use gpu -endif(WITH_GPU) - -if(WITH_DOC) - find_package(Sphinx REQUIRED) - find_python_module(recommonmark REQUIRED) -endif(WITH_DOC) - -if(WITH_SWIG_PY) - find_python_module(wheel REQUIRED) # package wheel -endif(WITH_SWIG_PY) diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake index 241af9a083..38c636b30e 100644 --- a/cmake/cpplint.cmake +++ b/cmake/cpplint.cmake @@ -53,7 +53,7 @@ macro(add_style_check_target TARGET_NAME) if(LINT MATCHES ON) add_custom_command(TARGET ${TARGET_NAME} PRE_BUILD - COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py" + COMMAND env ${py_env} "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py" "--filter=${STYLE_FILTER}" ${filename} WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}) endif() diff --git a/cmake/definitions.cmake b/cmake/definitions.cmake deleted file mode 100644 index 99a52ad764..0000000000 --- a/cmake/definitions.cmake +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if(WITH_DSO) - add_definitions(-DPADDLE_USE_DSO) -endif(WITH_DSO) - -if(WITH_DOUBLE) - add_definitions(-DPADDLE_TYPE_DOUBLE) -endif(WITH_DOUBLE) - -if(NOT WITH_TIMER) - add_definitions(-DPADDLE_DISABLE_TIMER) -endif(NOT WITH_TIMER) - -if(NOT WITH_PROFILER) - add_definitions(-DPADDLE_DISABLE_PROFILER) -endif(NOT WITH_PROFILER) - -if(NOT WITH_GPU) - add_definitions(-DPADDLE_ONLY_CPU) - add_definitions(-DHPPL_STUB_FUNC) - - list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) -else() - if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) - message(FATAL_ERROR "Paddle need CUDA >= 7.0 to compile") - endif() - - if(NOT CUDNN_FOUND) - message(FATAL_ERROR "Paddle need cudnn to compile") - endif() - - if(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") - else(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") - endif(WITH_AVX) - - # Include cuda and cudnn - include_directories(${CUDNN_INCLUDE_DIR}) - include_directories(${CUDA_TOOLKIT_INCLUDE}) -endif(NOT WITH_GPU) - -if(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") -else(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") -endif(WITH_AVX) diff --git a/cmake/util.cmake b/cmake/util.cmake index b8d20266f4..c9b48e5f8f 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -24,7 +24,7 @@ function(target_circle_link_libraries TARGET_NAME) list(APPEND libsInArgn ${arg}) endif() endforeach() - if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") list(APPEND LIBS "-undefined dynamic_lookup") endif() list(REVERSE libsInArgn) @@ -105,7 +105,7 @@ function(link_paddle_exe TARGET_NAME) if(WITH_PYTHON) target_link_libraries(${TARGET_NAME} - ${PYTHON_LIBRARIES}) + ${PYTHON_LIBRARIES} util) endif() if(WITH_GPU) diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt index 0a05897854..57fb89608f 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -88,7 +88,7 @@ else() ${CUDA_CXX_SOURCES}) endif() -add_dependencies(paddle_cuda warpctc) +add_dependencies(paddle_cuda ${external_project_dependencies}) add_style_check_target(paddle_cuda ${CUDA_SOURCES} From fc47492f4189ead6be33b6512a46663dc2541b69 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 01:06:16 +0800 Subject: [PATCH 29/51] Fix merge conflict bug and glog --- paddle/function/CMakeLists.txt | 2 - paddle/function/ContextProjectionOp.cpp | 52 ++++++++++----------- paddle/function/Function.cpp | 12 +++-- paddle/gserver/layers/ContextProjection.cpp | 6 ++- warp-ctc | 1 - 5 files changed, 38 insertions(+), 35 deletions(-) delete mode 160000 warp-ctc diff --git a/paddle/function/CMakeLists.txt b/paddle/function/CMakeLists.txt index cfa45e117c..b133d2419a 100644 --- a/paddle/function/CMakeLists.txt +++ b/paddle/function/CMakeLists.txt @@ -12,8 +12,6 @@ endif() add_library(paddle_function STATIC ${cpp_files} ${cu_objs}) add_dependencies(paddle_function ${external_project_dependencies}) -add_library(paddle_test_main STATIC TestMain.cpp) -add_dependencies(paddle_test_main ${external_project_dependencies}) if(WITH_GPU) if(WITH_TESTING) diff --git a/paddle/function/ContextProjectionOp.cpp b/paddle/function/ContextProjectionOp.cpp index bd367a859e..07907fc1ba 100644 --- a/paddle/function/ContextProjectionOp.cpp +++ b/paddle/function/ContextProjectionOp.cpp @@ -85,15 +85,15 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(3, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(3, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); CHECK(outputs[0].getData() && inputs[0].getData() && inputs[2].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 2); - CHECK_EQ(inputs[2].dims_.size(), 1); + CHECK_EQ(static_cast(outputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[1].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[2].dims_.size()), 1); /// dim of output = dim of input * context_length CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); /// dim of input == dim of weight @@ -202,15 +202,15 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(3, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(3, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); CHECK(outputs[0].getData() && inputs[2].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 2); - CHECK_EQ(inputs[2].dims_.size(), 1); + CHECK_EQ(static_cast(outputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[1].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[2].dims_.size()), 1); /// dim of input == dim of weight CHECK_EQ(inputs[0].dims_[1], inputs[1].dims_[1]); @@ -269,13 +269,13 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(2, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(2, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 1); + CHECK_EQ(static_cast(outputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[1].dims_.size()), 1); CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); /// input and output has the same batch_size CHECK_EQ(inputs[0].dims_[0], outputs[0].dims_[0]); @@ -317,14 +317,14 @@ public: void calc(const Arguments& inputs, const Arguments& outputs, const Arguments& inouts) override { - CHECK_EQ(2, inputs.size()); - CHECK_EQ(1, outputs.size()); - CHECK_EQ(0, inouts.size()); + CHECK_EQ(2, static_cast(inputs.size())); + CHECK_EQ(1, static_cast(outputs.size())); + CHECK_EQ(0, static_cast(inouts.size())); CHECK(inputs[0].getData() && outputs[0].getData() && inputs[1].getData()); - CHECK_EQ(outputs[0].dims_.size(), 2); - CHECK_EQ(inputs[0].dims_.size(), 2); - CHECK_EQ(inputs[1].dims_.size(), 1); + CHECK_EQ(static_cast(outputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[0].dims_.size()), 2); + CHECK_EQ(static_cast(inputs[1].dims_.size()), 1); CHECK_EQ(outputs[0].dims_[1], inputs[0].dims_[1] * context_length_); auto out_grad_mat = std::make_shared::type>( diff --git a/paddle/function/Function.cpp b/paddle/function/Function.cpp index 6f82a8d053..614e76b8ac 100644 --- a/paddle/function/Function.cpp +++ b/paddle/function/Function.cpp @@ -46,28 +46,32 @@ bool FuncConfig::get(const std::string& key) const { template <> FuncConfig& FuncConfig::set(const std::string& key, size_t v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].s = v; return *this; } template <> FuncConfig& FuncConfig::set(const std::string& key, real v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].r = v; return *this; } template <> FuncConfig& FuncConfig::set(const std::string& key, int v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].i = v; return *this; } template <> FuncConfig& FuncConfig::set(const std::string& key, bool v) { - CHECK_EQ(valueMap_.count(key), 0) << "Duplicated value: " << key; + CHECK_EQ(static_cast(valueMap_.count(key)), 0) << "Duplicated value: " + << key; valueMap_[key].b = v; return *this; } diff --git a/paddle/gserver/layers/ContextProjection.cpp b/paddle/gserver/layers/ContextProjection.cpp index e947b2b9ec..ee4db21989 100644 --- a/paddle/gserver/layers/ContextProjection.cpp +++ b/paddle/gserver/layers/ContextProjection.cpp @@ -111,7 +111,8 @@ void ContextProjection::forward() { size_t dim = out_->value->getWidth(); CHECK_EQ(dim, input_dim * config_.context_length()); size_t batch_size = in_->value->getHeight(); - CHECK_EQ(forward_.size(), 1) << "Only one forward function here"; + CHECK_EQ(static_cast(forward_.size()), 1) + << "Only one forward function here"; REGISTER_TIMER_INFO("ContextProjectionForward", getName().c_str()); bool is_padding = config_.trainable_padding(); @@ -154,7 +155,8 @@ void ContextProjection::backward(const UpdateCallback& callback) { CHECK_EQ(dim, input_dim * config_.context_length()); size_t batch_size = in_->value->getHeight(); CHECK_EQ(batch_size, out_->value->getHeight()); - CHECK_EQ(backward_.size(), 1) << "Only one backward function here"; + CHECK_EQ(static_cast(backward_.size()), 1) + << "Only one backward function here"; REGISTER_TIMER_INFO("ContextProjectionBackward", getName().c_str()); bool is_padding = config_.trainable_padding(); diff --git a/warp-ctc b/warp-ctc deleted file mode 160000 index bd535c8d44..0000000000 --- a/warp-ctc +++ /dev/null @@ -1 +0,0 @@ -Subproject commit bd535c8d44e03c8ebd2d768e06c8c05fdccd11d2 From 3ecc63ad165913b4b62f41a009c9e1c2406a3441 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 11:08:22 +0800 Subject: [PATCH 30/51] Add python in travis ci for paddle version command --- .travis.yml | 2 ++ paddle/api/CMakeLists.txt | 24 ++++++++++++------------ paddle/api/test/CMakeLists.txt | 2 +- paddle/api/test/run_tests.sh | 4 ---- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/.travis.yml b/.travis.yml index 61d31132b7..27a4be38f5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,7 +25,9 @@ addons: - git - build-essential - libatlas-base-dev + - python - python-pip + - python2.7-dev - curl - graphviz - clang-format-3.8 diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index dd617e3268..3ac50e34bb 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -94,17 +94,17 @@ add_dependencies(python_api_wheel python_swig_sources paddle_cuda) if(WITH_TESTING) - SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) - ExternalProject_Add(pip - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY https://github.com/pypa/pip.git - GIT_TAG 9.0.1 - PREFIX ${PIP_SOURCES_DIR} - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install - BUILD_IN_SOURCE 1 - DEPENDS python setuptools python_api_wheel - ) + SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) + ExternalProject_Add(pip + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/pypa/pip.git + GIT_TAG 9.0.1 + PREFIX ${PIP_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools python_api_wheel + ) add_subdirectory(test) endif() diff --git a/paddle/api/test/CMakeLists.txt b/paddle/api/test/CMakeLists.txt index 985df6f56e..a2fa623c80 100644 --- a/paddle/api/test/CMakeLists.txt +++ b/paddle/api/test/CMakeLists.txt @@ -1,2 +1,2 @@ add_test(NAME test_swig_api - COMMAND bash ${PROJ_ROOT}/paddle/api/test/run_tests.sh ${PYTHON_EXECUTABLE} ${PYTHON_INSTALL_DIR}/bin/pip) + COMMAND bash ${PROJ_ROOT}/paddle/api/test/run_tests.sh ${PYTHON_EXECUTABLE}) diff --git a/paddle/api/test/run_tests.sh b/paddle/api/test/run_tests.sh index f00ec2c967..bcf06afa86 100755 --- a/paddle/api/test/run_tests.sh +++ b/paddle/api/test/run_tests.sh @@ -20,10 +20,6 @@ popd > /dev/null cd $SCRIPTPATH -# rm -rf .test_env -# virtualenv .test_env -# source .test_env/bin/activate - $1 -m pip install ../../dist/*.whl test_list="testArguments.py testGradientMachine.py testMatrix.py testVector.py testTrain.py testTrainer.py" From 0407902592d61addceeb5b82313e63225911e6df Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 11:59:09 +0800 Subject: [PATCH 31/51] Add wheel for paddle version command --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 27a4be38f5..4c65b8c7e9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,7 +47,7 @@ before_install: fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - - pip install protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker + - pip install wheel protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker script: - paddle/scripts/travis/main.sh notifications: From 7b9c9696fc007115dd6c8dfba843fa9220a98937 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Wed, 4 Jan 2017 21:00:32 -0800 Subject: [PATCH 32/51] =?UTF-8?q?remove=20double=20spaces,=20remove=20chin?= =?UTF-8?q?ese=20character=20"=EF=BC=8C"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/howto/usage/k8s/k8s_aws_en.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/howto/usage/k8s/k8s_aws_en.md b/doc/howto/usage/k8s/k8s_aws_en.md index 422dc3bd81..b04bfba590 100644 --- a/doc/howto/usage/k8s/k8s_aws_en.md +++ b/doc/howto/usage/k8s/k8s_aws_en.md @@ -4,10 +4,10 @@ To use AWS, we need to sign up an AWS account on Amazon's Web site. An AWS account allows us to login to the AWS Console Web interface to -create IAM users and user groups. Usually, we create a user group with +create IAM users and user groups. Usually, we create a user group with privileges required to run PaddlePaddle, and we create users for those who are going to run PaddlePaddle and add these users into the -group. IAM users can identify themselves using password and tokens, +group. IAM users can identify themselves using password and tokens, where passwords allows users to log in to the AWS Console, and tokens make it easy for users to submit and inspect jobs from the command line. @@ -360,7 +360,7 @@ In one time of distributed training, user will confirm the PaddlePaddle node num ####Create PaddlePaddle Node -After Kubernetes master gets the request, it will parse the yaml file and create several pods (defined by PaddlePaddle's node number), Kubernetes will allocate these pods onto cluster's node. A pod represents a PaddlePaddle node, when pod is successfully allocated onto one physical/virtual machine, Kubernetes will startup the container in the pod, and this container will use the environment variables in yaml file and start up `paddle pserver` and `paddle trainer` processes. +After Kubernetes master gets the request, it will parse the yaml file and create several pods (defined by PaddlePaddle's node number), Kubernetes will allocate these pods onto cluster's node. A pod represents a PaddlePaddle node, when pod is successfully allocated onto one physical/virtual machine, Kubernetes will startup the container in the pod, and this container will use the environment variables in yaml file and start up `paddle pserver` and `paddle trainer` processes. ####Start up Training @@ -661,6 +661,6 @@ Sometimes we might need to create or manage the cluster on AWS manually with lim ### Some Presumptions * Instances run on CoreOS, the official IAM. -* Kubernetes node use instance storage, no EBS get mounted. Etcd is running on additional node. +* Kubernetes node use instance storage, no EBS get mounted. Etcd is running on additional node. * For networking, we use Flannel network at this moment, we will use Calico solution later on. * When you create a service with Type=LoadBalancer, Kubernetes will create and ELB, and create a security group for the ELB. From 19dc8df41d69396c9810402f830cbc01dd49f19e Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 14:47:21 +0800 Subject: [PATCH 33/51] Update travis ci pip --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 4c65b8c7e9..ba2f8482b5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,6 +47,7 @@ before_install: fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi + - pip install --upgrade pip - pip install wheel protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker script: - paddle/scripts/travis/main.sh From be8b12684508396987a972eb1188e38a9253b10f Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 16:56:49 +0800 Subject: [PATCH 34/51] Move Execepts into arch/osx dir --- .gitignore | 1 + paddle/api/PaddleAPI.h | 2 +- paddle/api/Util.cpp | 3 +-- paddle/gserver/dataproviders/DataProvider.h | 2 +- paddle/gserver/dataproviders/PyDataProvider.cpp | 3 +-- paddle/gserver/layers/GruCompute.h | 2 +- paddle/gserver/layers/LstmCompute.h | 2 +- paddle/gserver/layers/MultinomialSampler.h | 2 +- paddle/math/BaseMatrix.h | 2 +- paddle/math/Matrix.h | 2 +- paddle/math/TensorExpression.h | 2 +- paddle/math/Vector.h | 2 +- paddle/math/tests/test_FPException.cpp | 4 ++-- paddle/parameter/ParallelParameter.h | 2 +- paddle/parameter/Parameter.h | 2 +- paddle/parameter/ParameterUpdateFunctions.h | 2 +- paddle/pserver/BaseClient.h | 2 +- paddle/pserver/ParameterClient2.h | 2 +- paddle/pserver/ParameterServer2.h | 2 +- paddle/trainer/Trainer.cpp | 3 +-- paddle/trainer/TrainerMain.cpp | 3 +-- paddle/utils/CpuId.h | 2 +- paddle/utils/Excepts.h | 2 ++ paddle/utils/Locks.h | 2 +- paddle/utils/Util.h | 2 +- paddle/utils/Version.h | 2 +- paddle/utils/{ => arch/osx}/Excepts.cpp | 4 +--- paddle/utils/common.h | 2 ++ 28 files changed, 31 insertions(+), 32 deletions(-) rename paddle/utils/{ => arch/osx}/Excepts.cpp (97%) diff --git a/.gitignore b/.gitignore index 1c9730a5ad..f963c2660d 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ Makefile *~ bazel-* +third_party/ diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 81c9eed0bc..364d19f941 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -19,8 +19,8 @@ limitations under the License. */ #include #include #include +#include "paddle/utils/Common.h" #include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/common.h" /// Import PaddlePaddle's enumeration into global namespace. using namespace paddle::enumeration_wrapper; // NOLINT diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index c3f739568f..54d67aa62f 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -15,12 +15,11 @@ limitations under the License. */ #include "PaddleAPI.h" #include "paddle/parameter/Parameter.h" -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Flags.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Util.h" -#include #include #include #include diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index 5f031fc7c0..9a2ad7567f 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -30,12 +30,12 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Argument.h" #include "paddle/utils/ClassRegistrar.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Queue.h" #include "paddle/utils/ThreadLocal.h" #include "paddle/utils/Util.h" -#include "paddle/utils/common.h" namespace paddle { /** diff --git a/paddle/gserver/dataproviders/PyDataProvider.cpp b/paddle/gserver/dataproviders/PyDataProvider.cpp index 5bdd55309c..b53790e764 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider.cpp @@ -13,8 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PyDataProvider.h" -#include -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Util.h" diff --git a/paddle/gserver/layers/GruCompute.h b/paddle/gserver/layers/GruCompute.h index a56af21317..3340e38e62 100644 --- a/paddle/gserver/layers/GruCompute.h +++ b/paddle/gserver/layers/GruCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/gserver/layers/LstmCompute.h b/paddle/gserver/layers/LstmCompute.h index 0d65b4158e..2588fad279 100644 --- a/paddle/gserver/layers/LstmCompute.h +++ b/paddle/gserver/layers/LstmCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/gserver/layers/MultinomialSampler.h b/paddle/gserver/layers/MultinomialSampler.h index b48073c80b..546ef9c1f2 100644 --- a/paddle/gserver/layers/MultinomialSampler.h +++ b/paddle/gserver/layers/MultinomialSampler.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/math/BaseMatrix.h b/paddle/math/BaseMatrix.h index 8f9bc9e823..8691c87ac3 100644 --- a/paddle/math/BaseMatrix.h +++ b/paddle/math/BaseMatrix.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "TensorExpression.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 4865a081a5..ceac0212d2 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -26,8 +26,8 @@ limitations under the License. */ #include "BaseMatrix.h" #include "MemoryHandle.h" #include "Vector.h" +#include "paddle/utils/Common.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/TensorExpression.h b/paddle/math/TensorExpression.h index f3d60e4003..6fd60e7f3c 100644 --- a/paddle/math/TensorExpression.h +++ b/paddle/math/TensorExpression.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "hl_tensor_ops.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Logging.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/Vector.h b/paddle/math/Vector.h index b4347a70f8..9af6e30c9e 100644 --- a/paddle/math/Vector.h +++ b/paddle/math/Vector.h @@ -21,8 +21,8 @@ limitations under the License. */ #include "BaseMatrix.h" #include "MemoryHandle.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Thread.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/math/tests/test_FPException.cpp b/paddle/math/tests/test_FPException.cpp index 6aa5891bce..3836f7fc0f 100644 --- a/paddle/math/tests/test_FPException.cpp +++ b/paddle/math/tests/test_FPException.cpp @@ -28,10 +28,10 @@ limitations under the License. */ * so we can add some tricks to prevent exp calculate an excessive value. * */ -#include + #include #include "paddle/math/Matrix.h" -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" using namespace paddle; // NOLINT diff --git a/paddle/parameter/ParallelParameter.h b/paddle/parameter/ParallelParameter.h index 1ee220d2dc..2e7c18b808 100644 --- a/paddle/parameter/ParallelParameter.h +++ b/paddle/parameter/ParallelParameter.h @@ -26,9 +26,9 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterUpdateFunctions.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Flags.h" #include "paddle/utils/Locks.h" -#include "paddle/utils/common.h" #include "ParameterConfig.pb.h" diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index e05137b315..72c8336799 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -26,11 +26,11 @@ limitations under the License. */ #include "ParameterUpdaterHook.h" #include "paddle/math/Matrix.h" #include "paddle/math/Vector.h" +#include "paddle/utils/Common.h" #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/Locks.h" #include "paddle/utils/ThreadLocal.h" #include "paddle/utils/Util.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdateFunctions.h b/paddle/parameter/ParameterUpdateFunctions.h index 2cb3798717..0fca280149 100644 --- a/paddle/parameter/ParameterUpdateFunctions.h +++ b/paddle/parameter/ParameterUpdateFunctions.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/math/Vector.h" -#include "paddle/utils/common.h" +#include "paddle/utils/Common.h" namespace paddle { diff --git a/paddle/pserver/BaseClient.h b/paddle/pserver/BaseClient.h index ccf05ae1ca..11d7a147bf 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/pserver/BaseClient.h @@ -17,8 +17,8 @@ limitations under the License. */ #include "ParameterService.pb.h" #include "paddle/math/Matrix.h" #include "paddle/pserver/ProtoServer.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Queue.h" -#include "paddle/utils/common.h" namespace paddle { diff --git a/paddle/pserver/ParameterClient2.h b/paddle/pserver/ParameterClient2.h index 70cfc6d700..89b3ddd502 100644 --- a/paddle/pserver/ParameterClient2.h +++ b/paddle/pserver/ParameterClient2.h @@ -23,11 +23,11 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/pserver/BaseClient.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Flags.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Queue.h" #include "paddle/utils/Util.h" -#include "paddle/utils/common.h" #include "ParameterService.pb.h" diff --git a/paddle/pserver/ParameterServer2.h b/paddle/pserver/ParameterServer2.h index 79d1eb97ff..0f5a589590 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/pserver/ParameterServer2.h @@ -29,10 +29,10 @@ limitations under the License. */ #include "paddle/math/Vector.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterOptimizer.h" +#include "paddle/utils/Common.h" #include "paddle/utils/Locks.h" #include "paddle/utils/Stat.h" #include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/common.h" #include "ParameterService.pb.h" diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp index 09e0a213ab..8465addaf9 100644 --- a/paddle/trainer/Trainer.cpp +++ b/paddle/trainer/Trainer.cpp @@ -14,7 +14,6 @@ limitations under the License. */ #include "Trainer.h" -#include #include #include @@ -24,7 +23,7 @@ limitations under the License. */ #include -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Stat.h" diff --git a/paddle/trainer/TrainerMain.cpp b/paddle/trainer/TrainerMain.cpp index 947f9cadcc..e2fbd21e14 100644 --- a/paddle/trainer/TrainerMain.cpp +++ b/paddle/trainer/TrainerMain.cpp @@ -12,9 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "paddle/pserver/ParameterServer2.h" -#include "paddle/utils/Excepts.h" +#include "paddle/utils/Common.h" #include "paddle/utils/PythonUtil.h" #include "paddle/utils/StringUtil.h" diff --git a/paddle/utils/CpuId.h b/paddle/utils/CpuId.h index 1218e8194c..0f3985cc7b 100644 --- a/paddle/utils/CpuId.h +++ b/paddle/utils/CpuId.h @@ -11,7 +11,7 @@ limitations under the License. */ #pragma once -#include "common.h" +#include "Common.h" namespace paddle { diff --git a/paddle/utils/Excepts.h b/paddle/utils/Excepts.h index dc3369b7e8..5c2c504f53 100644 --- a/paddle/utils/Excepts.h +++ b/paddle/utils/Excepts.h @@ -15,6 +15,8 @@ limitations under the License. */ #ifndef EXCEPTS_H_ #define EXCEPTS_H_ +#include + #if defined(__APPLE__) || defined(__OSX__) int fegetexcept(void); diff --git a/paddle/utils/Locks.h b/paddle/utils/Locks.h index a21872e89e..e87abb9139 100644 --- a/paddle/utils/Locks.h +++ b/paddle/utils/Locks.h @@ -19,7 +19,7 @@ limitations under the License. */ #include #include -#include "common.h" +#include "Common.h" namespace paddle { diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index dc15ada586..613844669d 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -26,9 +26,9 @@ limitations under the License. */ #include #include +#include "Common.h" #include "Logging.h" #include "TrainerConfig.pb.h" -#include "common.h" #include "Flags.h" #include "hl_gpu.h" diff --git a/paddle/utils/Version.h b/paddle/utils/Version.h index aa5df32438..f53d6420bb 100644 --- a/paddle/utils/Version.h +++ b/paddle/utils/Version.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include -#include "common.h" +#include "Common.h" namespace paddle { diff --git a/paddle/utils/Excepts.cpp b/paddle/utils/arch/osx/Excepts.cpp similarity index 97% rename from paddle/utils/Excepts.cpp rename to paddle/utils/arch/osx/Excepts.cpp index 4ddce35ed3..c8e904d8f9 100644 --- a/paddle/utils/Excepts.cpp +++ b/paddle/utils/arch/osx/Excepts.cpp @@ -12,12 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "Excepts.h" +#include "paddle/utils/Excepts.h" #if defined(__APPLE__) || defined(__OSX__) -#include - int fegetexcept(void) { static fenv_t fenv; return fegetenv(&fenv) ? -1 : (fenv.__control & FE_ALL_EXCEPT); diff --git a/paddle/utils/common.h b/paddle/utils/common.h index 202a9d980d..1f1d0255a5 100644 --- a/paddle/utils/common.h +++ b/paddle/utils/common.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include "Excepts.h" + /** * Disable copy macro. */ From 72b95533a120080908bec3fbc9f3c2607a4f6004 Mon Sep 17 00:00:00 2001 From: gangliao Date: Thu, 5 Jan 2017 17:17:18 +0800 Subject: [PATCH 35/51] Revise common to Common --- paddle/utils/{common.h => Common.h} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename paddle/utils/{common.h => Common.h} (100%) diff --git a/paddle/utils/common.h b/paddle/utils/Common.h similarity index 100% rename from paddle/utils/common.h rename to paddle/utils/Common.h From d0a5ce290a4f9ecdb984a465ed90a790e30d47a5 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 5 Jan 2017 18:06:00 +0800 Subject: [PATCH 36/51] Clean travis ci --- .travis.yml | 2 +- cmake/external/python.cmake | 35 ++++++------------------- paddle/scripts/travis/build_and_test.sh | 8 +++--- python/CMakeLists.txt | 2 +- 4 files changed, 15 insertions(+), 32 deletions(-) diff --git a/.travis.yml b/.travis.yml index ba2f8482b5..426f0eb746 100644 --- a/.travis.yml +++ b/.travis.yml @@ -47,7 +47,7 @@ before_install: fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - - pip install --upgrade pip + - pip install --upgrade pip - pip install wheel protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker script: - paddle/scripts/travis/main.sh diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 479ec301b7..e4c570479f 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -138,41 +138,22 @@ SET(NUMPY_SOURCES_DIR ${PYTHON_SOURCES_DIR}/numpy) SET(NUMPY_TAG_VERSION "v1.11.3") SET(NUMPY_VERSION "1.11.3") +SET(EGG_NAME "") +SET(PYTHON_NUMPY_INCLUDE_DIR "") IF(WIN32) SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}.egg") ELSE(WIN32) IF(APPLE) - SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}-${MACOS_VERSION}-x86_64.egg") + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}-${MACOS_VERSION}") ELSE(APPLE) - SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux-x86_64.egg") + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") ENDIF(APPLE) -ENDIF(WIN32) -SET(PYTHON_NUMPY_INCLUDE_DIR "${PY_SITE_PACKAGES_PATH}/${EGG_NAME}/numpy/core/include") -IF(${PYTHON_FOUND}) # local python - SET(PYTHON_NUMPY_INCLUDE_DIR - "${PY_SITE_PACKAGES_PATH}/${EGG_NAME}/numpy/core/include") -ELSE(${PYTHON_FOUND}) # global python - SET(PYTHON_NUMPY_INCLUDE_DIR "") - SET(PY_SITE_PACKAGES_DIR "") - FILE(WRITE ${PROJECT_BINARY_DIR}/FindNumpyPath.py - "try: import site; print(site.getsitepackages())\nexcept:pass\n") - EXEC_PROGRAM("env ${py_env} ${PYTHON_EXECUTABLE}" ${PROJECT_BINARY_DIR} - ARGS "FindNumpyPath.py" OUTPUT_VARIABLE NUMPY_PATH) - - STRING(REPLACE "[" "" NUMPY_PATH "${NUMPY_PATH}") - STRING(REPLACE "]" "" NUMPY_PATH "${NUMPY_PATH}") - STRING(REPLACE "'" "" NUMPY_PATH "${NUMPY_PATH}") - STRING(REPLACE ", " ";" SITE_DIRS "${NUMPY_PATH}") - - FOREACH(SITE_DIR ${SITE_DIRS}) - IF(EXISTS ${SITE_DIR}) - LIST(APPEND PYTHON_NUMPY_INCLUDE_DIR - "${SITE_DIR}/${EGG_NAME}/numpy/core/include") - SET(PY_SITE_PACKAGES_DIR "${SITE_DIR}") - ENDIF() + FOREACH(suffix x86_64 intel fat64 fat32 universal) + LIST(APPEND PYTHON_NUMPY_INCLUDE_DIR ${PY_SITE_PACKAGES_PATH}/${EGG_NAME}-${suffix}.egg/numpy/core/include) ENDFOREACH() -ENDIF(${PYTHON_FOUND}) +ENDIF(WIN32) INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index fb21712188..ffc48eae66 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -1,6 +1,8 @@ #!/bin/bash source ./common.sh +python -c 'import pip; print(pip.pep425tags.get_supported())' + if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CMAKE_EXTRA="-DWITH_SWIG_PY=OFF" else @@ -14,11 +16,11 @@ if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then NRPOC=`nproc` make -j $NPROC make coveralls + sudo make install elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then NPROC=`sysctl -n hw.ncpu` make -j $NPROC env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" + sudo make install + sudo paddle version fi - -sudo make install -sudo paddle version diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 0a3599a47a..1cda4762eb 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -16,7 +16,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in add_custom_command(OUTPUT ${OUTPUT_DIR}/.timestamp COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT_DIR}/.timestamp - DEPENDS gen_proto_py ${PY_FILES}) + DEPENDS gen_proto_py ${PY_FILES} ${external_project_dependencies}) add_custom_target(paddle_python ALL DEPENDS ${OUTPUT_DIR}/.timestamp) From b49745cd56eb882ad990c626c6c44846a8dbf247 Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 6 Jan 2017 13:52:24 +0800 Subject: [PATCH 37/51] Add find system's swig --- .travis.yml | 1 + cmake/external/swig.cmake | 104 ++++++++++++++++++++------------------ 2 files changed, 55 insertions(+), 50 deletions(-) diff --git a/.travis.yml b/.travis.yml index 426f0eb746..bc91855a85 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,6 +29,7 @@ addons: - python-pip - python2.7-dev - curl + - swig - graphviz - clang-format-3.8 - automake diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake index 5460b02c37..40088c65ef 100644 --- a/cmake/external/swig.cmake +++ b/cmake/external/swig.cmake @@ -12,59 +12,63 @@ # See the License for the specific language governing permissions and # limitations under the License. -# build swig as an external project -INCLUDE(ExternalProject) +FIND_PACKAGE(SWIG) -SET(SWIG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/swig) -SET(SWIG_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/swig) -SET(SWIG_TARGET_VERSION "3.0.2") -SET(SWIG_DOWNLOAD_SRC_MD5 "62f9b0d010cef36a13a010dc530d0d41") -SET(SWIG_DOWNLOAD_WIN_MD5 "3f18de4fc09ab9abb0d3be37c11fbc8f") +IF(NOT SWIG_FOUND) + # build swig as an external project + INCLUDE(ExternalProject) -IF(WIN32) - # swig.exe available as pre-built binary on Windows: - ExternalProject_Add(swig - URL http://prdownloads.sourceforge.net/swig/swigwin-${SWIG_TARGET_VERSION}.zip - URL_MD5 ${SWIG_DOWNLOAD_WIN_MD5} - SOURCE_DIR ${SWIG_SOURCES_DIR} - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - UPDATE_COMMAND "" - ) - SET(SWIG_DIR ${SWIG_SOURCES_DIR} CACHE FILEPATH "SWIG Directory" FORCE) - SET(SWIG_EXECUTABLE ${SWIG_SOURCES_DIR}/swig.exe CACHE FILEPATH "SWIG Executable" FORCE) -ELSE(WIN32) - # From PCRE configure - ExternalProject_Add(pcre - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY https://github.com/svn2github/pcre.git - PREFIX ${SWIG_SOURCES_DIR}/pcre - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SWIG_INSTALL_DIR}/pcre - ) + SET(SWIG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/swig) + SET(SWIG_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/swig) + SET(SWIG_TARGET_VERSION "3.0.2") + SET(SWIG_DOWNLOAD_SRC_MD5 "62f9b0d010cef36a13a010dc530d0d41") + SET(SWIG_DOWNLOAD_WIN_MD5 "3f18de4fc09ab9abb0d3be37c11fbc8f") - # swig uses bison find it by cmake and pass it down - FIND_PACKAGE(BISON) + IF(WIN32) + # swig.exe available as pre-built binary on Windows: + ExternalProject_Add(swig + URL http://prdownloads.sourceforge.net/swig/swigwin-${SWIG_TARGET_VERSION}.zip + URL_MD5 ${SWIG_DOWNLOAD_WIN_MD5} + SOURCE_DIR ${SWIG_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + UPDATE_COMMAND "" + ) + SET(SWIG_DIR ${SWIG_SOURCES_DIR} CACHE FILEPATH "SWIG Directory" FORCE) + SET(SWIG_EXECUTABLE ${SWIG_SOURCES_DIR}/swig.exe CACHE FILEPATH "SWIG Executable" FORCE) + ELSE(WIN32) + # From PCRE configure + ExternalProject_Add(pcre + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/svn2github/pcre.git + PREFIX ${SWIG_SOURCES_DIR}/pcre + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SWIG_INSTALL_DIR}/pcre + ) - # From SWIG configure - ExternalProject_Add(swig - GIT_REPOSITORY https://github.com/swig/swig.git - GIT_TAG rel-3.0.10 - PREFIX ${SWIG_SOURCES_DIR} - CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && ./autogen.sh - CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && - env "PCRE_LIBS=${SWIG_INSTALL_DIR}/pcre/lib/libpcre.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcrecpp.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcreposix.a" - ./configure - --prefix=${SWIG_INSTALL_DIR} - --with-pcre-prefix=${SWIG_INSTALL_DIR}/pcre - BUILD_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make - INSTALL_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make install - UPDATE_COMMAND "" - DEPENDS pcre - ) + # swig uses bison find it by cmake and pass it down + FIND_PACKAGE(BISON) - SET(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION}) - SET(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig) -ENDIF(WIN32) + # From SWIG configure + ExternalProject_Add(swig + GIT_REPOSITORY https://github.com/swig/swig.git + GIT_TAG rel-3.0.10 + PREFIX ${SWIG_SOURCES_DIR} + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && ./autogen.sh + CONFIGURE_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && + env "PCRE_LIBS=${SWIG_INSTALL_DIR}/pcre/lib/libpcre.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcrecpp.a ${SWIG_INSTALL_DIR}/pcre/lib/libpcreposix.a" + ./configure + --prefix=${SWIG_INSTALL_DIR} + --with-pcre-prefix=${SWIG_INSTALL_DIR}/pcre + BUILD_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make + INSTALL_COMMAND cd ${SWIG_SOURCES_DIR}/src/swig && make install + UPDATE_COMMAND "" + DEPENDS pcre + ) -LIST(APPEND external_project_dependencies swig) + SET(SWIG_DIR ${SWIG_INSTALL_DIR}/share/swig/${SWIG_TARGET_VERSION}) + SET(SWIG_EXECUTABLE ${SWIG_INSTALL_DIR}/bin/swig) + ENDIF(WIN32) + + LIST(APPEND external_project_dependencies swig) +ENDIF(NOT SWIG_FOUND) From 411e234808ff9d0e23d2a65faf8a63cab71f3b52 Mon Sep 17 00:00:00 2001 From: chengxingyi Date: Fri, 6 Jan 2017 16:22:32 +0800 Subject: [PATCH 38/51] A traffic demo for ASC17 --- demo/traffic_prediction/README | 7 +++ demo/traffic_prediction/data/get_data.sh | 34 ++++++++++ demo/traffic_prediction/dataprovider.py | 77 +++++++++++++++++++++++ demo/traffic_prediction/gen_result.py | 47 ++++++++++++++ demo/traffic_prediction/predict.sh | 30 +++++++++ demo/traffic_prediction/train.sh | 30 +++++++++ demo/traffic_prediction/trainer_config.py | 43 +++++++++++++ 7 files changed, 268 insertions(+) create mode 100644 demo/traffic_prediction/README create mode 100755 demo/traffic_prediction/data/get_data.sh create mode 100644 demo/traffic_prediction/dataprovider.py create mode 100644 demo/traffic_prediction/gen_result.py create mode 100755 demo/traffic_prediction/predict.sh create mode 100755 demo/traffic_prediction/train.sh create mode 100755 demo/traffic_prediction/trainer_config.py diff --git a/demo/traffic_prediction/README b/demo/traffic_prediction/README new file mode 100644 index 0000000000..4c95188583 --- /dev/null +++ b/demo/traffic_prediction/README @@ -0,0 +1,7 @@ +run by: +cd ./data +sh get_data.sh +cd .. +sh train.sh +sh predict.sh + diff --git a/demo/traffic_prediction/data/get_data.sh b/demo/traffic_prediction/data/get_data.sh new file mode 100755 index 0000000000..52cf6608df --- /dev/null +++ b/demo/traffic_prediction/data/get_data.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +DIR="$( cd "$(dirname "$0")" ; pwd -P )" +cd $DIR + +#download the dataset +echo "Downloading traffic data..." +wget http://paddlepaddle.bj.bcebos.com/demo/traffic/traffic_data.tar.gz + +#extract package +echo "Unzipping..." +tar -zxvf traffic_data.tar.gz + +echo "data/speeds.csv" >> train.list +echo "data/speeds.csv" >> test.list +echo "data/speeds.csv" >> pred.list + +echo "Done." diff --git a/demo/traffic_prediction/dataprovider.py b/demo/traffic_prediction/dataprovider.py new file mode 100644 index 0000000000..bea0259b03 --- /dev/null +++ b/demo/traffic_prediction/dataprovider.py @@ -0,0 +1,77 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer.PyDataProvider2 import * +import sys +import numpy as np +TERM_NUM = 24 +FORECASTING_NUM = 25 +LABEL_VALUE_NUM = 4 +def initHook(settings, file_list, **kwargs): + """ + Init hook is invoked before process data. It will set obj.slots and store data meta. + + :param settings: global object. It will passed to process routine. + :type obj: object + :param file_list: the meta file object, which passed from trainer_config.py,but unused in this function. + :param kwargs: unused other arguments. + """ + del kwargs #unused + + settings.pool_size = sys.maxint + #Use a time seires of the past as feature. + #Dense_vector's expression form is [float,float,...,float] + settings.slots = [dense_vector(TERM_NUM)] + #There are next FORECASTING_NUM fragments you need predict. + #Every predicted condition at time point has four states. + for i in range(FORECASTING_NUM): + settings.slots.append(integer_value(LABEL_VALUE_NUM)) + +@provider(init_hook=initHook, cache=CacheType.CACHE_PASS_IN_MEM, should_shuffle=True) +def process(settings, file_name): + with open(file_name) as f: + #abandon fields name + f.next() + for row_num, line in enumerate(f): + speeds = map(int,line.rstrip('\r\n').split(",")[1:]) + # Get the max index. + end_time = len(speeds) + # Scanning and generating samples + for i in range(TERM_NUM,end_time - FORECASTING_NUM): + # For dense slot + pre_spd = map(float,speeds[i-TERM_NUM:i]) + + # Integer value need predicting, values start from 0, so every one minus 1. + fol_spd = [i-1 for i in speeds[i:i + FORECASTING_NUM]] + + # Predicting label is missing, abandon the sample. + if -1 in fol_spd: + continue + yield [pre_spd] + fol_spd + +def predict_initHook(settings, file_list, **kwargs): + settings.pool_size = sys.maxint + settings.slots = [dense_vector(TERM_NUM)] + +@provider(init_hook=predict_initHook,should_shuffle=False) +def process_predict(settings, file_name): + with open(file_name) as f: + #abandon fields name + f.next() + for row_num, line in enumerate(f): + speeds = map(int,line.rstrip('\r\n').split(",")) + end_time = len(speeds) + pre_spd = map(float,speeds[end_time-TERM_NUM:end_time]) + yield pre_spd + diff --git a/demo/traffic_prediction/gen_result.py b/demo/traffic_prediction/gen_result.py new file mode 100644 index 0000000000..78e5bd7003 --- /dev/null +++ b/demo/traffic_prediction/gen_result.py @@ -0,0 +1,47 @@ +res = [] +with open('./rank-00000') as f: + for line in f: + pred = map(int,line.strip('\r\n;').split(";")) + #raw prediction range from 0 to 3 + res.append([i+1 for i in pred]) + +file_name = open('./data/pred.list').read().strip('\r\n') + +FORECASTING_NUM=24 +header=['id', + '201604200805', + '201604200810', + '201604200815', + '201604200820', + '201604200825', + '201604200830', + '201604200835', + '201604200840', + '201604200845', + '201604200850', + '201604200855', + '201604200900', + '201604200905', + '201604200910', + '201604200915', + '201604200920', + '201604200925', + '201604200930', + '201604200935', + '201604200940', + '201604200945', + '201604200950', + '201604200955', + '201604201000', + ] +################### +## To CSV format ## +################### +with open(file_name) as f: + f.next() + print ','.join(header) + for row_num, line in enumerate(f): + fields = line.rstrip('\r\n').split(',') + linkid = fields[0] + print linkid+','+','.join(map(str,res[row_num])) + diff --git a/demo/traffic_prediction/predict.sh b/demo/traffic_prediction/predict.sh new file mode 100755 index 0000000000..2cc709f109 --- /dev/null +++ b/demo/traffic_prediction/predict.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +cfg=trainer_config.py +# pass choice +model="output/pass-00000" +paddle train \ + --config=$cfg \ + --use_gpu=false \ + --job=test \ + --init_model_path=$model \ + --config_args=is_predict=1 \ + --predict_output_dir=. + +python gen_result.py > result.txt + +rm -rf rank-00000 diff --git a/demo/traffic_prediction/train.sh b/demo/traffic_prediction/train.sh new file mode 100755 index 0000000000..bd1a1036b8 --- /dev/null +++ b/demo/traffic_prediction/train.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +cfg=trainer_config.py +#TRAINER_BIN="./paddle_trainer" +paddle train \ + --config=$cfg \ + --save_dir=./output \ + --trainer_count=4 \ + --log_period=1000 \ + --dot_period=10 \ + --num_passes=10 \ + --use_gpu=false \ + --show_parameter_stats_period=3000 \ + --test_wait=1 + #--test_all_data_in_one_period=1 \ + 2>&1 | tee 'train.log' diff --git a/demo/traffic_prediction/trainer_config.py b/demo/traffic_prediction/trainer_config.py new file mode 100755 index 0000000000..835b1d688c --- /dev/null +++ b/demo/traffic_prediction/trainer_config.py @@ -0,0 +1,43 @@ +#!/usr/bin/env/python +#-*python-*- +from paddle.trainer_config_helpers import * + + +################################### DATA Configuration ############################################# +is_predict = get_config_arg('is_predict', bool, False) +trn = './data/train.list' if not is_predict else None +tst = './data/test.list' if not is_predict else './data/pred.list' +process = 'process' if not is_predict else 'process_predict' +define_py_data_sources2(train_list=trn, + test_list=tst, + module="dataprovider", + obj=process) +################################### Parameter Configuaration ####################################### +TERM_NUM=24 +FORECASTING_NUM= 25 +emb_size=16 +batch_size=128 if not is_predict else 1 +settings( + batch_size = batch_size, + learning_rate = 1e-3, + learning_method = RMSPropOptimizer() +) +################################### Algorithm Configuration ######################################## + +output_label = [] + +link_encode = data_layer(name='link_encode', size=TERM_NUM) +for i in xrange(FORECASTING_NUM): + # Each task share same weight. + link_param = ParamAttr(name='_link_vec.w', initial_max=1.0, initial_min=-1.0) + link_vec = fc_layer(input=link_encode,size=emb_size, param_attr=link_param) + score = fc_layer(input=link_vec, size=4, act=SoftmaxActivation()) + if is_predict: + maxid = maxid_layer(score) + output_label.append(maxid) + else: + # Multi-task training. + label = data_layer(name='label_%dmin'%((i+1)*5), size=4) + cls = classification_cost(input=score,name="cost_%dmin"%((i+1)*5), label=label) + output_label.append(cls) +outputs(output_label) From 82bee14dec310426d553c634bcfffc6293ad2f05 Mon Sep 17 00:00:00 2001 From: chengxingyi Date: Fri, 6 Jan 2017 16:46:04 +0800 Subject: [PATCH 39/51] A traffic prediction demo for ASC17 --- demo/traffic_prediction/data/get_data.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/demo/traffic_prediction/data/get_data.sh b/demo/traffic_prediction/data/get_data.sh index 52cf6608df..716faac76f 100755 --- a/demo/traffic_prediction/data/get_data.sh +++ b/demo/traffic_prediction/data/get_data.sh @@ -27,8 +27,8 @@ wget http://paddlepaddle.bj.bcebos.com/demo/traffic/traffic_data.tar.gz echo "Unzipping..." tar -zxvf traffic_data.tar.gz -echo "data/speeds.csv" >> train.list -echo "data/speeds.csv" >> test.list -echo "data/speeds.csv" >> pred.list +echo "data/speeds.csv" > train.list +echo "data/speeds.csv" > test.list +echo "data/speeds.csv" > pred.list echo "Done." From a74f53651e133e79fdae5b290cc1a14c851c576f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 6 Jan 2017 17:45:51 +0800 Subject: [PATCH 40/51] Format code --- demo/traffic_prediction/dataprovider.py | 31 +++++++----- demo/traffic_prediction/gen_result.py | 62 +++++++++++------------ demo/traffic_prediction/trainer_config.py | 32 ++++++------ 3 files changed, 64 insertions(+), 61 deletions(-) diff --git a/demo/traffic_prediction/dataprovider.py b/demo/traffic_prediction/dataprovider.py index bea0259b03..b915067260 100644 --- a/demo/traffic_prediction/dataprovider.py +++ b/demo/traffic_prediction/dataprovider.py @@ -18,6 +18,8 @@ import numpy as np TERM_NUM = 24 FORECASTING_NUM = 25 LABEL_VALUE_NUM = 4 + + def initHook(settings, file_list, **kwargs): """ Init hook is invoked before process data. It will set obj.slots and store data meta. @@ -27,8 +29,8 @@ def initHook(settings, file_list, **kwargs): :param file_list: the meta file object, which passed from trainer_config.py,but unused in this function. :param kwargs: unused other arguments. """ - del kwargs #unused - + del kwargs #unused + settings.pool_size = sys.maxint #Use a time seires of the past as feature. #Dense_vector's expression form is [float,float,...,float] @@ -38,40 +40,43 @@ def initHook(settings, file_list, **kwargs): for i in range(FORECASTING_NUM): settings.slots.append(integer_value(LABEL_VALUE_NUM)) -@provider(init_hook=initHook, cache=CacheType.CACHE_PASS_IN_MEM, should_shuffle=True) + +@provider( + init_hook=initHook, cache=CacheType.CACHE_PASS_IN_MEM, should_shuffle=True) def process(settings, file_name): with open(file_name) as f: #abandon fields name f.next() - for row_num, line in enumerate(f): - speeds = map(int,line.rstrip('\r\n').split(",")[1:]) + for row_num, line in enumerate(f): + speeds = map(int, line.rstrip('\r\n').split(",")[1:]) # Get the max index. end_time = len(speeds) # Scanning and generating samples - for i in range(TERM_NUM,end_time - FORECASTING_NUM): + for i in range(TERM_NUM, end_time - FORECASTING_NUM): # For dense slot - pre_spd = map(float,speeds[i-TERM_NUM:i]) + pre_spd = map(float, speeds[i - TERM_NUM:i]) # Integer value need predicting, values start from 0, so every one minus 1. - fol_spd = [i-1 for i in speeds[i:i + FORECASTING_NUM]] - + fol_spd = [i - 1 for i in speeds[i:i + FORECASTING_NUM]] + # Predicting label is missing, abandon the sample. if -1 in fol_spd: continue yield [pre_spd] + fol_spd + def predict_initHook(settings, file_list, **kwargs): settings.pool_size = sys.maxint settings.slots = [dense_vector(TERM_NUM)] -@provider(init_hook=predict_initHook,should_shuffle=False) + +@provider(init_hook=predict_initHook, should_shuffle=False) def process_predict(settings, file_name): with open(file_name) as f: #abandon fields name f.next() for row_num, line in enumerate(f): - speeds = map(int,line.rstrip('\r\n').split(",")) + speeds = map(int, line.rstrip('\r\n').split(",")) end_time = len(speeds) - pre_spd = map(float,speeds[end_time-TERM_NUM:end_time]) + pre_spd = map(float, speeds[end_time - TERM_NUM:end_time]) yield pre_spd - diff --git a/demo/traffic_prediction/gen_result.py b/demo/traffic_prediction/gen_result.py index 78e5bd7003..cb8f6e6832 100644 --- a/demo/traffic_prediction/gen_result.py +++ b/demo/traffic_prediction/gen_result.py @@ -1,39 +1,40 @@ res = [] with open('./rank-00000') as f: for line in f: - pred = map(int,line.strip('\r\n;').split(";")) + pred = map(int, line.strip('\r\n;').split(";")) #raw prediction range from 0 to 3 - res.append([i+1 for i in pred]) + res.append([i + 1 for i in pred]) file_name = open('./data/pred.list').read().strip('\r\n') -FORECASTING_NUM=24 -header=['id', - '201604200805', - '201604200810', - '201604200815', - '201604200820', - '201604200825', - '201604200830', - '201604200835', - '201604200840', - '201604200845', - '201604200850', - '201604200855', - '201604200900', - '201604200905', - '201604200910', - '201604200915', - '201604200920', - '201604200925', - '201604200930', - '201604200935', - '201604200940', - '201604200945', - '201604200950', - '201604200955', - '201604201000', - ] +FORECASTING_NUM = 24 +header = [ + 'id', + '201604200805', + '201604200810', + '201604200815', + '201604200820', + '201604200825', + '201604200830', + '201604200835', + '201604200840', + '201604200845', + '201604200850', + '201604200855', + '201604200900', + '201604200905', + '201604200910', + '201604200915', + '201604200920', + '201604200925', + '201604200930', + '201604200935', + '201604200940', + '201604200945', + '201604200950', + '201604200955', + '201604201000', +] ################### ## To CSV format ## ################### @@ -43,5 +44,4 @@ with open(file_name) as f: for row_num, line in enumerate(f): fields = line.rstrip('\r\n').split(',') linkid = fields[0] - print linkid+','+','.join(map(str,res[row_num])) - + print linkid + ',' + ','.join(map(str, res[row_num])) diff --git a/demo/traffic_prediction/trainer_config.py b/demo/traffic_prediction/trainer_config.py index 835b1d688c..c8755f7f3c 100755 --- a/demo/traffic_prediction/trainer_config.py +++ b/demo/traffic_prediction/trainer_config.py @@ -2,26 +2,22 @@ #-*python-*- from paddle.trainer_config_helpers import * - ################################### DATA Configuration ############################################# is_predict = get_config_arg('is_predict', bool, False) trn = './data/train.list' if not is_predict else None tst = './data/test.list' if not is_predict else './data/pred.list' process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider", - obj=process) +define_py_data_sources2( + train_list=trn, test_list=tst, module="dataprovider", obj=process) ################################### Parameter Configuaration ####################################### -TERM_NUM=24 -FORECASTING_NUM= 25 -emb_size=16 -batch_size=128 if not is_predict else 1 +TERM_NUM = 24 +FORECASTING_NUM = 25 +emb_size = 16 +batch_size = 128 if not is_predict else 1 settings( - batch_size = batch_size, - learning_rate = 1e-3, - learning_method = RMSPropOptimizer() -) + batch_size=batch_size, + learning_rate=1e-3, + learning_method=RMSPropOptimizer()) ################################### Algorithm Configuration ######################################## output_label = [] @@ -29,15 +25,17 @@ output_label = [] link_encode = data_layer(name='link_encode', size=TERM_NUM) for i in xrange(FORECASTING_NUM): # Each task share same weight. - link_param = ParamAttr(name='_link_vec.w', initial_max=1.0, initial_min=-1.0) - link_vec = fc_layer(input=link_encode,size=emb_size, param_attr=link_param) + link_param = ParamAttr( + name='_link_vec.w', initial_max=1.0, initial_min=-1.0) + link_vec = fc_layer(input=link_encode, size=emb_size, param_attr=link_param) score = fc_layer(input=link_vec, size=4, act=SoftmaxActivation()) if is_predict: maxid = maxid_layer(score) output_label.append(maxid) else: # Multi-task training. - label = data_layer(name='label_%dmin'%((i+1)*5), size=4) - cls = classification_cost(input=score,name="cost_%dmin"%((i+1)*5), label=label) + label = data_layer(name='label_%dmin' % ((i + 1) * 5), size=4) + cls = classification_cost( + input=score, name="cost_%dmin" % ((i + 1) * 5), label=label) output_label.append(cls) outputs(output_label) From 3403c0068aeecd9a136c15081e5cb45cf50b64cd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 6 Jan 2017 17:50:53 +0800 Subject: [PATCH 41/51] tiny fixes --- demo/traffic_prediction/dataprovider.py | 8 ++++---- demo/traffic_prediction/gen_result.py | 14 ++++++++++++++ demo/traffic_prediction/trainer_config.py | 15 +++++++++++++-- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/demo/traffic_prediction/dataprovider.py b/demo/traffic_prediction/dataprovider.py index b915067260..19719350f2 100644 --- a/demo/traffic_prediction/dataprovider.py +++ b/demo/traffic_prediction/dataprovider.py @@ -34,11 +34,11 @@ def initHook(settings, file_list, **kwargs): settings.pool_size = sys.maxint #Use a time seires of the past as feature. #Dense_vector's expression form is [float,float,...,float] - settings.slots = [dense_vector(TERM_NUM)] + settings.input_types = [dense_vector(TERM_NUM)] #There are next FORECASTING_NUM fragments you need predict. #Every predicted condition at time point has four states. for i in range(FORECASTING_NUM): - settings.slots.append(integer_value(LABEL_VALUE_NUM)) + settings.input_types.append(integer_value(LABEL_VALUE_NUM)) @provider( @@ -57,7 +57,7 @@ def process(settings, file_name): pre_spd = map(float, speeds[i - TERM_NUM:i]) # Integer value need predicting, values start from 0, so every one minus 1. - fol_spd = [i - 1 for i in speeds[i:i + FORECASTING_NUM]] + fol_spd = [j - 1 for j in speeds[i:i + FORECASTING_NUM]] # Predicting label is missing, abandon the sample. if -1 in fol_spd: @@ -67,7 +67,7 @@ def process(settings, file_name): def predict_initHook(settings, file_list, **kwargs): settings.pool_size = sys.maxint - settings.slots = [dense_vector(TERM_NUM)] + settings.input_types = [dense_vector(TERM_NUM)] @provider(init_hook=predict_initHook, should_shuffle=False) diff --git a/demo/traffic_prediction/gen_result.py b/demo/traffic_prediction/gen_result.py index cb8f6e6832..d6c1b03370 100644 --- a/demo/traffic_prediction/gen_result.py +++ b/demo/traffic_prediction/gen_result.py @@ -1,3 +1,17 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + res = [] with open('./rank-00000') as f: for line in f: diff --git a/demo/traffic_prediction/trainer_config.py b/demo/traffic_prediction/trainer_config.py index c8755f7f3c..bb6a4ac987 100755 --- a/demo/traffic_prediction/trainer_config.py +++ b/demo/traffic_prediction/trainer_config.py @@ -1,5 +1,16 @@ -#!/usr/bin/env/python -#-*python-*- +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from paddle.trainer_config_helpers import * ################################### DATA Configuration ############################################# From f45b45e2441854f1d026b0e1b030ff6b057cb70b Mon Sep 17 00:00:00 2001 From: chengxingyi Date: Fri, 6 Jan 2017 19:46:49 +0800 Subject: [PATCH 42/51] A traffic prediction demo for ASC17 --- demo/traffic_prediction/data/get_data.sh | 4 ++-- demo/traffic_prediction/dataprovider.py | 4 ++-- demo/traffic_prediction/gen_result.py | 2 +- demo/traffic_prediction/predict.sh | 2 +- demo/traffic_prediction/train.sh | 5 +---- demo/traffic_prediction/trainer_config.py | 4 ++-- 6 files changed, 9 insertions(+), 12 deletions(-) diff --git a/demo/traffic_prediction/data/get_data.sh b/demo/traffic_prediction/data/get_data.sh index 716faac76f..f2fa548d47 100755 --- a/demo/traffic_prediction/data/get_data.sh +++ b/demo/traffic_prediction/data/get_data.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ cd $DIR #download the dataset echo "Downloading traffic data..." -wget http://paddlepaddle.bj.bcebos.com/demo/traffic/traffic_data.tar.gz +wget http://paddlepaddle.cdn.bcebos.com/demo/traffic/traffic_data.tar.gz #extract package echo "Unzipping..." diff --git a/demo/traffic_prediction/dataprovider.py b/demo/traffic_prediction/dataprovider.py index 19719350f2..c7883b6950 100644 --- a/demo/traffic_prediction/dataprovider.py +++ b/demo/traffic_prediction/dataprovider.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ from paddle.trainer.PyDataProvider2 import * import sys import numpy as np TERM_NUM = 24 -FORECASTING_NUM = 25 +FORECASTING_NUM = 24 LABEL_VALUE_NUM = 4 diff --git a/demo/traffic_prediction/gen_result.py b/demo/traffic_prediction/gen_result.py index d6c1b03370..3da70b3031 100644 --- a/demo/traffic_prediction/gen_result.py +++ b/demo/traffic_prediction/gen_result.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/demo/traffic_prediction/predict.sh b/demo/traffic_prediction/predict.sh index 2cc709f109..cec35dce11 100755 --- a/demo/traffic_prediction/predict.sh +++ b/demo/traffic_prediction/predict.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/demo/traffic_prediction/train.sh b/demo/traffic_prediction/train.sh index bd1a1036b8..48dfc5604f 100755 --- a/demo/traffic_prediction/train.sh +++ b/demo/traffic_prediction/train.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ set -e cfg=trainer_config.py -#TRAINER_BIN="./paddle_trainer" paddle train \ --config=$cfg \ --save_dir=./output \ @@ -25,6 +24,4 @@ paddle train \ --num_passes=10 \ --use_gpu=false \ --show_parameter_stats_period=3000 \ - --test_wait=1 - #--test_all_data_in_one_period=1 \ 2>&1 | tee 'train.log' diff --git a/demo/traffic_prediction/trainer_config.py b/demo/traffic_prediction/trainer_config.py index bb6a4ac987..52d678624a 100755 --- a/demo/traffic_prediction/trainer_config.py +++ b/demo/traffic_prediction/trainer_config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# Copyright (c) 2016 PaddlePaddle Authors, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ define_py_data_sources2( train_list=trn, test_list=tst, module="dataprovider", obj=process) ################################### Parameter Configuaration ####################################### TERM_NUM = 24 -FORECASTING_NUM = 25 +FORECASTING_NUM = 24 emb_size = 16 batch_size = 128 if not is_predict else 1 settings( From c24e94c8a4c29f35019fad353760926c291ab3d6 Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 8 Jan 2017 09:25:38 +0800 Subject: [PATCH 43/51] Check python if system already equipped one --- .travis.yml | 6 +- cmake/configure.cmake | 4 + cmake/external/python.cmake | 386 ++++++++++---------- paddle/api/CMakeLists.txt | 26 +- paddle/scripts/travis/before_install.osx.sh | 4 +- paddle/scripts/travis/build_and_test.sh | 16 +- paddle/utils/PythonUtil.cpp.in | 6 +- 7 files changed, 236 insertions(+), 212 deletions(-) diff --git a/.travis.yml b/.travis.yml index bc91855a85..eecf5e81f0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,13 +21,14 @@ addons: packages: - gcc-4.8 - g++-4.8 - - wget - git - build-essential - libatlas-base-dev - python - python-pip - python2.7-dev + - python-numpy + - python-wheel - curl - swig - graphviz @@ -48,8 +49,7 @@ before_install: fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi - - pip install --upgrade pip - - pip install wheel protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker + - pip install numpy wheel protobuf sphinx recommonmark sphinx_rtd_theme virtualenv pre-commit requests==2.9.2 LinkChecker script: - paddle/scripts/travis/main.sh notifications: diff --git a/cmake/configure.cmake b/cmake/configure.cmake index ae0ec01d94..0bb016201d 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +if(NOT WITH_PYTHON) + add_definitions(-DPADDLE_NO_PYTHON) +endif(NOT WITH_PYTHON) + if(WITH_DSO) add_definitions(-DPADDLE_USE_DSO) endif(WITH_DSO) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index e4c570479f..357ee901ce 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -13,192 +13,210 @@ # limitations under the License. INCLUDE(ExternalProject) +INCLUDE(python_module) + +FIND_PACKAGE(PythonInterp 2.7) +FIND_PACKAGE(PythonLibs 2.7) + +SET(py_env PATH=${PATH} PYTHONHOME=${PYTHONHOME} PYTHONPATH=${PYTHONPATH}) + +IF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) + find_python_module(pip REQUIRED) + find_python_module(numpy REQUIRED) + find_python_module(wheel REQUIRED) + find_python_module(google.protobuf REQUIRED) + FIND_PACKAGE(NumPy REQUIRED) +ELSE(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) + ##################################### PYTHON ######################################## + SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) + SET(PYTHON_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/python) + SET(_python_DIR ${PYTHON_INSTALL_DIR}) + + IF(UNIX) + SET(PYTHON_FOUND ON) + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/lib/libpython2.7.a" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) + ELSEIF(WIN32) + SET(PYTHON_FOUND ON) + SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) + SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) + SET(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) + SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) + ELSE() + MESSAGE(FATAL_ERROR "Unknown system !") + ENDIF() - -##################################### PYTHON ######################################## -SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) -SET(PYTHON_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/python) -SET(_python_DIR ${PYTHON_INSTALL_DIR}) - -IF(UNIX) - SET(PYTHON_FOUND ON) - SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include/python2.7" CACHE PATH "Python include dir" FORCE) - SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/lib/libpython2.7.a" CACHE FILEPATH "Python library" FORCE) - SET(PYTHON_EXECUTABLE ${PYTHON_INSTALL_DIR}/bin/python CACHE FILEPATH "Python executable" FORCE) - SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/lib/python2.7/site-packages" CACHE PATH "Python site-packages path" FORCE) -ELSEIF(WIN32) - SET(PYTHON_FOUND ON) - SET(PYTHON_INCLUDE_DIR "${PYTHON_INSTALL_DIR}/include" CACHE PATH "Python include dir" FORCE) - SET(PYTHON_LIBRARIES "${PYTHON_INSTALL_DIR}/libs/python27.lib" CACHE FILEPATH "Python library" FORCE) - SET(PYTHON_EXECUTABLE "${PYTHON_INSTALL_DIR}/bin/python.exe" CACHE FILEPATH "Python executable" FORCE) - SET(PY_SITE_PACKAGES_PATH "${PYTHON_INSTALL_DIR}/Lib/site-packages" CACHE PATH "Python site-packages path" FORCE) -ELSE() - MESSAGE(FATAL_ERROR "Unknown system !") -ENDIF() - -SET(py_env - PATH=${PYTHON_INSTALL_DIR}/bin/:$ENV{PATH} - PYTHONHOME=${PYTHON_INSTALL_DIR} - PYTHONPATH=${PYTHON_INSTALL_DIR}/lib:${PYTHON_INSTALL_DIR}/lib/python2.7:${PY_SITE_PACKAGES_PATH}) - -INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) - -IF(APPLE) - LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS - -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON - ) -ENDIF() - -SET(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) - -# Force Python build to "Release". -IF(CMAKE_CONFIGURATION_TYPES) - SET(SAVED_CMAKE_CFG_INTDIR ${CMAKE_CFG_INTDIR}) - SET(CMAKE_CFG_INTDIR "Release") -ELSE() - LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS - -DCMAKE_BUILD_TYPE:STRING=Release - ) -ENDIF() - -ExternalProject_Add(python - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/python-cmake-buildsystem/python-cmake-buildsystem.git" - PREFIX ${PYTHON_SOURCES_DIR} - UPDATE_COMMAND "" - CMAKE_ARGS -DPYTHON_VERSION=2.7.12 - CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - CMAKE_CACHE_ARGS - -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} - -DBUILD_LIBPYTHON_SHARED:BOOL=OFF - -DUSE_SYSTEM_LIBRARIES:BOOL=OFF - -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} - -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} - -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} - -DDOWNLOAD_SOURCES:BOOL=ON - -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF - ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} - ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} - DEPENDS zlib -) -#################################################################################### - -##################################### SETUPTOOLS ################################### -SET(SETUPTOOLS_SOURCES_DIR ${PYTHON_SOURCES_DIR}/setuptools) -ExternalProject_Add(setuptools - ${EXTERNAL_PROJECT_LOG_ARGS} - PREFIX ${SETUPTOOLS_SOURCES_DIR} - URL "https://pypi.python.org/packages/source/s/setuptools/setuptools-18.3.2.tar.gz" - BUILD_IN_SOURCE 1 - PATCH_COMMAND "" - UPDATE_COMMAND "" - CONFIGURE_COMMAND "" - INSTALL_COMMAND "" - BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python zlib -) -##################################################################################### - -##################################### SIX ########################################### -SET(SIX_SOURCES_DIR ${PYTHON_SOURCES_DIR}/six) -ExternalProject_Add(six - ${EXTERNAL_PROJECT_LOG_ARGS} - PREFIX ${SIX_SOURCES_DIR} - URL https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz - BUILD_IN_SOURCE 1 - PATCH_COMMAND "" - UPDATE_COMMAND "" - CONFIGURE_COMMAND "" - INSTALL_COMMAND "" - BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python setuptools -) -##################################################################################### - -##################################### CYTHON ######################################## -SET(CYTHON_SOURCES_DIR ${PYTHON_SOURCES_DIR}/cython) -ExternalProject_Add(cython - ${EXTERNAL_PROJECT_LOG_ARGS} - PREFIX ${CYTHON_SOURCES_DIR} - URL https://github.com/cython/cython/archive/0.25.2.tar.gz - GIT_TAG 0.25.2 - BUILD_IN_SOURCE 1 - CONFIGURE_COMMAND "" - PATCH_COMMAND "" - UPDATE_COMMAND "" - INSTALL_COMMAND "" - BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python -) -#################################################################################### - -##################################### NUMPY ######################################## -SET(NUMPY_SOURCES_DIR ${PYTHON_SOURCES_DIR}/numpy) -SET(NUMPY_TAG_VERSION "v1.11.3") -SET(NUMPY_VERSION "1.11.3") - -SET(EGG_NAME "") -SET(PYTHON_NUMPY_INCLUDE_DIR "") -IF(WIN32) - SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}.egg") -ELSE(WIN32) IF(APPLE) - SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}-${MACOS_VERSION}") - ELSE(APPLE) - SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") - SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") - ENDIF(APPLE) - - FOREACH(suffix x86_64 intel fat64 fat32 universal) - LIST(APPEND PYTHON_NUMPY_INCLUDE_DIR ${PY_SITE_PACKAGES_PATH}/${EGG_NAME}-${suffix}.egg/numpy/core/include) - ENDFOREACH() -ENDIF(WIN32) + LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS + -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON + ) + ENDIF() + + SET(EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS) + + # Force Python build to "Release". + IF(CMAKE_CONFIGURATION_TYPES) + SET(SAVED_CMAKE_CFG_INTDIR ${CMAKE_CFG_INTDIR}) + SET(CMAKE_CFG_INTDIR "Release") + ELSE() + LIST(APPEND EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS + -DCMAKE_BUILD_TYPE:STRING=Release + ) + ENDIF() + + ExternalProject_Add(python + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/python-cmake-buildsystem/python-cmake-buildsystem.git" + PREFIX ${PYTHON_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DPYTHON_VERSION=2.7.12 + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + CMAKE_CACHE_ARGS + -DCMAKE_INSTALL_PREFIX:PATH=${PYTHON_INSTALL_DIR} + -DBUILD_LIBPYTHON_SHARED:BOOL=OFF + -DUSE_SYSTEM_LIBRARIES:BOOL=OFF + -DZLIB_ROOT:FILEPATH=${ZLIB_ROOT} + -DZLIB_INCLUDE_DIR:PATH=${ZLIB_INCLUDE_DIR} + -DZLIB_LIBRARY:FILEPATH=${ZLIB_LIBRARIES} + -DDOWNLOAD_SOURCES:BOOL=ON + -DINSTALL_WINDOWS_TRADITIONAL:BOOL=OFF + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_CACHE_ARGS} + ${EXTERNAL_PROJECT_OPTIONAL_CMAKE_ARGS} + DEPENDS zlib + ) + + SET(py_env + PATH=${PYTHON_INSTALL_DIR}/bin + PYTHONHOME=${PYTHON_INSTALL_DIR} + PYTHONPATH=${PYTHON_INSTALL_DIR}/lib:${PYTHON_INSTALL_DIR}/lib/python2.7:${PY_SITE_PACKAGES_PATH}) + #################################################################################### + + ##################################### SETUPTOOLS ################################### + SET(SETUPTOOLS_SOURCES_DIR ${PYTHON_SOURCES_DIR}/setuptools) + ExternalProject_Add(setuptools + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${SETUPTOOLS_SOURCES_DIR} + URL "https://pypi.python.org/packages/source/s/setuptools/setuptools-18.3.2.tar.gz" + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python zlib + ) + ##################################################################################### + + ##################################### SIX ########################################### + SET(SIX_SOURCES_DIR ${PYTHON_SOURCES_DIR}/six) + ExternalProject_Add(six + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${SIX_SOURCES_DIR} + URL https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python setuptools + ) + ##################################################################################### + + ##################################### CYTHON ######################################## + SET(CYTHON_SOURCES_DIR ${PYTHON_SOURCES_DIR}/cython) + ExternalProject_Add(cython + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${CYTHON_SOURCES_DIR} + URL https://github.com/cython/cython/archive/0.25.2.tar.gz + GIT_TAG 0.25.2 + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND "" + PATCH_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python + ) + #################################################################################### + + ##################################### NUMPY ######################################## + SET(NUMPY_SOURCES_DIR ${PYTHON_SOURCES_DIR}/numpy) + SET(NUMPY_TAG_VERSION "v1.11.3") + SET(NUMPY_VERSION "1.11.3") + + SET(EGG_NAME "") + SET(PYTHON_NUMPY_INCLUDE_DIR "") + IF(WIN32) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}.egg") + ELSE(WIN32) + IF(APPLE) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-${HOST_SYSTEM}-${MACOS_VERSION}") + ELSE(APPLE) + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") + SET(EGG_NAME "numpy-${NUMPY_VERSION}-py2.7-linux") + ENDIF(APPLE) + + FOREACH(suffix x86_64 intel fat64 fat32 universal) + LIST(APPEND PYTHON_NUMPY_INCLUDE_DIR ${PY_SITE_PACKAGES_PATH}/${EGG_NAME}-${suffix}.egg/numpy/core/include) + ENDFOREACH() + ENDIF(WIN32) + + ExternalProject_Add(numpy + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/numpy/numpy.git + GIT_TAG ${NUMPY_TAG_VERSION} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + PREFIX ${NUMPY_SOURCES_DIR} + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools cython + ) + #################################################################################### + + ##################################### WHEEL ######################################## + SET(WHEEL_SOURCES_DIR ${PYTHON_SOURCES_DIR}/wheel) + ExternalProject_Add(wheel + ${EXTERNAL_PROJECT_LOG_ARGS} + URL https://pypi.python.org/packages/source/w/wheel/wheel-0.29.0.tar.gz + PREFIX ${WHEEL_SOURCES_DIR} + CONFIGURE_COMMAND "" + UPDATE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools + ) + #################################################################################### + + ################################### PROTOBUF ####################################### + SET(PY_PROTOBUF_SOURCES_DIR ${PYTHON_SOURCES_DIR}/protobuf) + ExternalProject_Add(python-protobuf + ${EXTERNAL_PROJECT_LOG_ARGS} + URL https://pypi.python.org/packages/e0/b0/0a1b364fe8a7d177b4b7d4dca5b798500dc57a7273b93cca73931b305a6a/protobuf-3.1.0.post1.tar.gz + URL_MD5 38b5fb160c768d2f8444d0c6d637ff91 + PREFIX ${PY_PROTOBUF_SOURCES_DIR} + BUILD_IN_SOURCE 1 + PATCH_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py build + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + DEPENDS python setuptools six + ) + #################################################################################### + + LIST(APPEND external_project_dependencies python setuptools six cython wheel python-protobuf numpy) + +ENDIF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) +INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR}) -ExternalProject_Add(numpy - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY https://github.com/numpy/numpy.git - GIT_TAG ${NUMPY_TAG_VERSION} - CONFIGURE_COMMAND "" - UPDATE_COMMAND "" - PREFIX ${NUMPY_SOURCES_DIR} - BUILD_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py build - INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install - BUILD_IN_SOURCE 1 - DEPENDS python setuptools cython -) -#################################################################################### - -##################################### WHEEL ######################################## -SET(WHEEL_SOURCES_DIR ${PYTHON_SOURCES_DIR}/wheel) -ExternalProject_Add(wheel - ${EXTERNAL_PROJECT_LOG_ARGS} - URL https://pypi.python.org/packages/source/w/wheel/wheel-0.29.0.tar.gz - PREFIX ${WHEEL_SOURCES_DIR} - CONFIGURE_COMMAND "" - UPDATE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install - BUILD_IN_SOURCE 1 - DEPENDS python setuptools -) -#################################################################################### - -################################### PROTOBUF ####################################### -SET(PY_PROTOBUF_SOURCES_DIR ${PYTHON_SOURCES_DIR}/protobuf) -ExternalProject_Add(python-protobuf - ${EXTERNAL_PROJECT_LOG_ARGS} - URL https://pypi.python.org/packages/e0/b0/0a1b364fe8a7d177b4b7d4dca5b798500dc57a7273b93cca73931b305a6a/protobuf-3.1.0.post1.tar.gz - URL_MD5 38b5fb160c768d2f8444d0c6d637ff91 - PREFIX ${PY_PROTOBUF_SOURCES_DIR} - BUILD_IN_SOURCE 1 - PATCH_COMMAND "" - CONFIGURE_COMMAND "" - BUILD_COMMAND env PATH=${PROTOBUF_INSTALL_DIR}/bin:$ENV{PATH} ${py_env} ${PYTHON_EXECUTABLE} setup.py build - INSTALL_COMMAND env PATH=${PROTOBUF_INSTALL_DIR}/bin:$ENV{PATH} ${py_env} ${PYTHON_EXECUTABLE} setup.py install - DEPENDS python setuptools six -) - -LIST(APPEND external_project_dependencies python setuptools six cython numpy wheel python-protobuf) +MESSAGE("[Paddle] Python Executable: ${PYTHON_EXECUTABLE}") +MESSAGE("[Paddle] Python Include: ${PYTHON_INCLUDE_DIRS}") +MESSAGE("[Paddle] Python Libraries: ${PYTHON_LIBRARIES}") diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 3ac50e34bb..6e8fcd114d 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -94,17 +94,19 @@ add_dependencies(python_api_wheel python_swig_sources paddle_cuda) if(WITH_TESTING) - SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) - ExternalProject_Add(pip - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY https://github.com/pypa/pip.git - GIT_TAG 9.0.1 - PREFIX ${PIP_SOURCES_DIR} - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install - BUILD_IN_SOURCE 1 - DEPENDS python setuptools python_api_wheel - ) + IF(NOT PY_PIP_FOUND) + SET(PIP_SOURCES_DIR ${PYTHON_SOURCES_DIR}/pip) + ExternalProject_Add(pip + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY https://github.com/pypa/pip.git + GIT_TAG 9.0.1 + PREFIX ${PIP_SOURCES_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py install + BUILD_IN_SOURCE 1 + DEPENDS python setuptools python_api_wheel + ) + ENDIF() add_subdirectory(test) endif() diff --git a/paddle/scripts/travis/before_install.osx.sh b/paddle/scripts/travis/before_install.osx.sh index fd113d313e..7036f971fd 100755 --- a/paddle/scripts/travis/before_install.osx.sh +++ b/paddle/scripts/travis/before_install.osx.sh @@ -1,4 +1,6 @@ #!/bin/bash brew update brew tap homebrew/science -brew install openblas md5sha1sum +brew install python +sudo pip install --upgrade protobuf +brew install swig openblas md5sha1sum protobuf diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index ffc48eae66..07624ec719 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -1,23 +1,19 @@ #!/bin/bash source ./common.sh -python -c 'import pip; print(pip.pep425tags.get_supported())' - -if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then - CMAKE_EXTRA="-DWITH_SWIG_PY=OFF" -else - CMAKE_EXTRA="-DWITH_SWIG_PY=ON" -fi - -cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON -DON_COVERALLS=ON ${CMAKE_EXTRA} - NPROC=1 if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + export PYTHONPATH=/opt/python/2.7.12/lib/python2.7/site-packages + export PYTHONHOME=/opt/python/2.7.12 + export PATH=/opt/python/2.7.12/bin:${PATH} + cmake .. -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON NRPOC=`nproc` make -j $NPROC make coveralls sudo make install elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + export PYTHONPATH=/usr/local/lib/python2.7/site-packages + cmake .. -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON -DWITH_SWIG_PY=ON NPROC=`sysctl -n hw.ncpu` make -j $NPROC env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" diff --git a/paddle/utils/PythonUtil.cpp.in b/paddle/utils/PythonUtil.cpp.in index e0caaf4cd6..66b5795e29 100644 --- a/paddle/utils/PythonUtil.cpp.in +++ b/paddle/utils/PythonUtil.cpp.in @@ -195,8 +195,10 @@ extern const char enable_virtualenv_py[]; } void initPython(int argc, char** argv) { #ifndef PADDLE_NO_PYTHON - char PythonHome[] = "@PYTHON_INSTALL_DIR@"; // NOLINT - Py_SetPythonHome(PythonHome); + char pyHome[] = "@PYTHON_INSTALL_DIR@"; // NOLINT + if (strlen(pyHome)) { + Py_SetPythonHome(pyHome); + } Py_SetProgramName(argv[0]); Py_Initialize(); PySys_SetArgv(argc, argv); From 5788a879d4480a010810cd0d6a6900a1e50c9853 Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 8 Jan 2017 09:25:55 +0800 Subject: [PATCH 44/51] Polish cmake configuration --- CMakeLists.txt | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9ed757bd1b..ede4af3e3f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,21 +28,21 @@ include(system) include(simd) ###################### Configurations ############################ -option(WITH_DSO "Compile PaddlePaddle with dynamic linked libraries" ON) -option(WITH_GPU "Compile PaddlePaddle with gpu" ${CUDA_FOUND}) -option(WITH_DOUBLE "Compile PaddlePaddle with double precision, otherwise use single precision" OFF) -option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND}) -option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) -option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ON) -option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF) -option(WITH_TIMER "Compile PaddlePaddle use timer" OFF) -option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF) -option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ON) -option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) -option(WITH_SWIG_PY "Compile PaddlePaddle with py PaddlePaddle prediction api" ON) -option(ON_TRAVIS "Running test on travis-ci or not." OFF) -option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF) -option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON) +option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND}) +option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) +option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) +option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) +option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) +option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON) +option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) +option(WITH_DOUBLE "Compile PaddlePaddle with double precision" OFF) +option(WITH_RDMA "Compile PaddlePaddle with RDMA support" OFF) +option(WITH_TIMER "Compile PaddlePaddle with stats timer" OFF) +option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler" OFF) +option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) +option(ON_COVERALLS "Compile PaddlePaddle with code coverage" OFF) +option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) +option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) include(external/zlib) # download, build, install zlib include(external/gflags) # download, build, install gflags @@ -63,7 +63,6 @@ include(flags) # set paddle compile flags include(cudnn) # set cudnn libraries include(version) # set PADDLE_VERSION include(coveralls) # set code coverage -include(python_module) # set python module include(configure) # add paddle env configuration From 635d4622a7750a7d5640c1539c79dc8c0a1b1f73 Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 8 Jan 2017 09:33:11 +0800 Subject: [PATCH 45/51] Clean py_env --- cmake/external/python.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 357ee901ce..cbb6940221 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -18,7 +18,7 @@ INCLUDE(python_module) FIND_PACKAGE(PythonInterp 2.7) FIND_PACKAGE(PythonLibs 2.7) -SET(py_env PATH=${PATH} PYTHONHOME=${PYTHONHOME} PYTHONPATH=${PYTHONPATH}) +SET(py_env "") IF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) find_python_module(pip REQUIRED) From 9a7df696d97b9e1acdd5d41ad6175e2cb21757fd Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 8 Jan 2017 11:53:05 +0800 Subject: [PATCH 46/51] Clean travis ci --- paddle/scripts/travis/build_and_test.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index 07624ec719..33eb0207ea 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -16,7 +16,4 @@ elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then cmake .. -DON_TRAVIS=ON -DON_COVERALLS=ON -DCOVERALLS_UPLOAD=ON -DWITH_SWIG_PY=ON NPROC=`sysctl -n hw.ncpu` make -j $NPROC - env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" - sudo make install - sudo paddle version fi From 934ba0bf3985472135df219bfbd13783d161411d Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 8 Jan 2017 12:59:33 +0800 Subject: [PATCH 47/51] Disable dynamic linked cuda libs default --- CMakeLists.txt | 2 +- cmake/util.cmake | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9ed757bd1b..804fe43592 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,7 +28,7 @@ include(system) include(simd) ###################### Configurations ############################ -option(WITH_DSO "Compile PaddlePaddle with dynamic linked libraries" ON) +option(WITH_DSO "Compile PaddlePaddle with dynamic linked libraries" OFF) option(WITH_GPU "Compile PaddlePaddle with gpu" ${CUDA_FOUND}) option(WITH_DOUBLE "Compile PaddlePaddle with double precision, otherwise use single precision" OFF) option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND}) diff --git a/cmake/util.cmake b/cmake/util.cmake index a19bf2a799..a6cb74853e 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -120,6 +120,11 @@ function(link_paddle_exe TARGET_NAME) target_link_libraries(${TARGET_NAME} rt) endif() endif() + + if(NOT WITH_DSO) + target_link_libraries(${TARGET_NAME} ${WARPCTC_LIBRARIES}) + endif() + add_dependencies(${TARGET_NAME} ${external_project_dependencies}) endfunction() From 057c216e000f3728f1fb3b585e5b2bd930e9dc77 Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 8 Jan 2017 14:41:11 +0800 Subject: [PATCH 48/51] Fix warpctc static libs and torch --- cmake/external/warpctc.cmake | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 34397dca7a..f924aa193f 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -24,16 +24,10 @@ SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib" CACHE PATH "Warp-ctc Library Di IF(WIN32) SET(WARPCTC_LIBRARIES - "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) + "${WARPCTC_INSTALL_DIR}/lib/warpctc.lib" CACHE FILEPATH "Warp-ctc Library" FORCE) ELSE(WIN32) - IF(APPLE) - SET(_warpctc_SHARED_SUFFIX dylib) - ELSE(APPLE) - SET(_warpctc_SHARED_SUFFIX so) - ENDIF(APPLE) - SET(WARPCTC_LIBRARIES - "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.${_warpctc_SHARED_SUFFIX}" CACHE FILEPATH "Warp-ctc Library" FORCE) + "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.a" CACHE FILEPATH "Warp-ctc Library" FORCE) ENDIF(WIN32) IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" ) @@ -53,6 +47,7 @@ ExternalProject_Add( CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR} CMAKE_ARGS -DWITH_GPU=${WITH_GPU} CMAKE_ARGS -DWITH_OMP=${USE_OMP} + CMAKE_ARGS -DWITH_TORCH=OFF CMAKE_ARGS -DBUILD_SHARED=OFF ) From 425f9515f03cda2e4f44d10c55a7371fa7c08817 Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 8 Jan 2017 15:33:46 +0800 Subject: [PATCH 49/51] Fix warpctc bugs --- cmake/external/warpctc.cmake | 12 +++++++++--- paddle/gserver/tests/CMakeLists.txt | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index f924aa193f..7386d935b8 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -24,10 +24,16 @@ SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib" CACHE PATH "Warp-ctc Library Di IF(WIN32) SET(WARPCTC_LIBRARIES - "${WARPCTC_INSTALL_DIR}/lib/warpctc.lib" CACHE FILEPATH "Warp-ctc Library" FORCE) + "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE) ELSE(WIN32) + IF(APPLE) + SET(_warpctc_SHARED_SUFFIX dylib) + ELSE(APPLE) + SET(_warpctc_SHARED_SUFFIX so) + ENDIF(APPLE) + SET(WARPCTC_LIBRARIES - "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.a" CACHE FILEPATH "Warp-ctc Library" FORCE) + "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.${_warpctc_SHARED_SUFFIX}" CACHE FILEPATH "Warp-ctc Library" FORCE) ENDIF(WIN32) IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" ) @@ -48,7 +54,7 @@ ExternalProject_Add( CMAKE_ARGS -DWITH_GPU=${WITH_GPU} CMAKE_ARGS -DWITH_OMP=${USE_OMP} CMAKE_ARGS -DWITH_TORCH=OFF - CMAKE_ARGS -DBUILD_SHARED=OFF + CMAKE_ARGS -DBUILD_SHARED=ON ) LIST(APPEND external_project_dependencies warpctc) diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index 286c66b996..0caa5e1e11 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -80,7 +80,7 @@ if(NOT WITH_DOUBLE) test_WarpCTCLayer.cpp) add_test(NAME test_WarpCTCLayer - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_WarpCTCLayer --warpctc_dir=${WARPCTC_LIB_DIR} WORKING_DIRECTORY ${PROJ_ROOT}/paddle) endif() From 4d6aca4b33de65615eb48b7c86070917b637ff22 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 9 Jan 2017 10:50:21 +0800 Subject: [PATCH 50/51] Warpctc only support dynamic load --- cmake/external/openblas.cmake | 1 + cmake/util.cmake | 4 ---- paddle/cuda/src/hl_warpctc_wrap.cc | 10 ---------- 3 files changed, 1 insertion(+), 14 deletions(-) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 677999cc9f..66a72cd243 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -15,6 +15,7 @@ INCLUDE(cblas) IF(NOT ${CBLAS_FOUND}) + MESSAGE(FATAL_ERROR "Please install OpenBlas, MKL or ATLAS.") INCLUDE(ExternalProject) SET(CBLAS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/openblas) diff --git a/cmake/util.cmake b/cmake/util.cmake index a6cb74853e..7da52bb758 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -121,10 +121,6 @@ function(link_paddle_exe TARGET_NAME) endif() endif() - if(NOT WITH_DSO) - target_link_libraries(${TARGET_NAME} ${WARPCTC_LIBRARIES}) - endif() - add_dependencies(${TARGET_NAME} ${external_project_dependencies}) endfunction() diff --git a/paddle/cuda/src/hl_warpctc_wrap.cc b/paddle/cuda/src/hl_warpctc_wrap.cc index 9ae8bc0f22..55b940ca67 100644 --- a/paddle/cuda/src/hl_warpctc_wrap.cc +++ b/paddle/cuda/src/hl_warpctc_wrap.cc @@ -29,7 +29,6 @@ void* warpctc_dso_handle = nullptr; * false, you need to add the path of libwarp-ctc.so to * the linked-libs of paddle or to LD_PRELOAD. */ -#ifdef PADDLE_USE_DSO #define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ struct DynLoad__##__name { \ template \ @@ -41,15 +40,6 @@ void* warpctc_dso_handle = nullptr; return reinterpret_cast(p_##_name)(args...); \ } \ } __name; // struct DynLoad__##__name -#else -#define DYNAMIC_LOAD_WARPCTC_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - return __name(args...); \ - } \ - } __name; // struct DynLoad__##__name -#endif // include all needed warp-ctc functions DYNAMIC_LOAD_WARPCTC_WRAP(get_warpctc_version) From f30c25fe2d5a3f3d86f9f6bcfadfbc446a3c9071 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Jan 2017 15:19:07 +0800 Subject: [PATCH 51/51] Add THIRD_PARTY_PATH --- CMakeLists.txt | 10 ++++++++++ cmake/external/glog.cmake | 4 ++-- cmake/external/gtest.cmake | 4 ++-- cmake/external/openblas.cmake | 4 ++-- cmake/external/protobuf.cmake | 4 ++-- cmake/external/python.cmake | 4 ++-- cmake/external/swig.cmake | 4 ++-- cmake/external/warpctc.cmake | 4 ++-- cmake/external/zlib.cmake | 4 ++-- cmake/flags.cmake | 6 ------ 10 files changed, 26 insertions(+), 22 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index abe7b5228c..8f53abacb4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,6 +43,16 @@ option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) option(ON_COVERALLS "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) + +# CMAKE_BUILD_TYPE +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING + "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel" + FORCE) +endif() + +set(THIRD_PARTY_PATH "${PROJ_ROOT}/third_party" CACHE STRING + "A path setting third party libraries download & build directories.") ######################################################################################## include(external/zlib) # download, build, install zlib diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index bec69f3ddf..71e20c8527 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -14,8 +14,8 @@ INCLUDE(ExternalProject) -SET(GLOG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/glog) -SET(GLOG_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/glog) +SET(GLOG_SOURCES_DIR ${THIRD_PARTY_PATH}/glog) +SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog) SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE) IF(WIN32) diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index 2fcb7893fa..11d829a9e2 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -16,8 +16,8 @@ IF(WITH_TESTING) ENABLE_TESTING() INCLUDE(ExternalProject) - SET(GTEST_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gtest) - SET(GTEST_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/gtest) + SET(GTEST_SOURCES_DIR ${THIRD_PARTY_PATH}/gtest) + SET(GTEST_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gtest) SET(GTEST_INCLUDE_DIR "${GTEST_INSTALL_DIR}/include" CACHE PATH "gtest include directory." FORCE) INCLUDE_DIRECTORIES(${GTEST_INCLUDE_DIR}) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 66a72cd243..0e8c29c831 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -18,8 +18,8 @@ IF(NOT ${CBLAS_FOUND}) MESSAGE(FATAL_ERROR "Please install OpenBlas, MKL or ATLAS.") INCLUDE(ExternalProject) - SET(CBLAS_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/openblas) - SET(CBLAS_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/openblas) + SET(CBLAS_SOURCES_DIR ${THIRD_PARTY_PATH}/openblas) + SET(CBLAS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/openblas) SET(CBLAS_INC_DIR "${CBLAS_INSTALL_DIR}/include" CACHE PATH "openblas include directory." FORCE) IF(WIN32) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 2f2769b4c6..c0cf2719f9 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -14,8 +14,8 @@ INCLUDE(ExternalProject) -SET(PROTOBUF_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf) -SET(PROTOBUF_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/protobuf) +SET(PROTOBUF_SOURCES_DIR ${THIRD_PARTY_PATH}/protobuf) +SET(PROTOBUF_INSTALL_DIR ${THIRD_PARTY_PATH}/install/protobuf) SET(PROTOBUF_INCLUDE_DIR "${PROTOBUF_INSTALL_DIR}/include" CACHE PATH "protobuf include directory." FORCE) INCLUDE_DIRECTORIES(${PROTOBUF_INCLUDE_DIR}) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index cbb6940221..55787f75f8 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -28,8 +28,8 @@ IF(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) FIND_PACKAGE(NumPy REQUIRED) ELSE(PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND) ##################################### PYTHON ######################################## - SET(PYTHON_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/python) - SET(PYTHON_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/python) + SET(PYTHON_SOURCES_DIR ${THIRD_PARTY_PATH}/python) + SET(PYTHON_INSTALL_DIR ${THIRD_PARTY_PATH}/install/python) SET(_python_DIR ${PYTHON_INSTALL_DIR}) IF(UNIX) diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake index 40088c65ef..63e8bd2546 100644 --- a/cmake/external/swig.cmake +++ b/cmake/external/swig.cmake @@ -18,8 +18,8 @@ IF(NOT SWIG_FOUND) # build swig as an external project INCLUDE(ExternalProject) - SET(SWIG_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/swig) - SET(SWIG_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/swig) + SET(SWIG_SOURCES_DIR ${THIRD_PARTY_PATH}/swig) + SET(SWIG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/swig) SET(SWIG_TARGET_VERSION "3.0.2") SET(SWIG_DOWNLOAD_SRC_MD5 "62f9b0d010cef36a13a010dc530d0d41") SET(SWIG_DOWNLOAD_WIN_MD5 "3f18de4fc09ab9abb0d3be37c11fbc8f") diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 7386d935b8..f5e4b3e1eb 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -14,8 +14,8 @@ INCLUDE(ExternalProject) -SET(WARPCTC_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/warpctc) -SET(WARPCTC_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/warpctc) +SET(WARPCTC_SOURCES_DIR ${THIRD_PARTY_PATH}/warpctc) +SET(WARPCTC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/warpctc) SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE) INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 916f6816aa..47fa8817fb 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -14,8 +14,8 @@ INCLUDE(ExternalProject) -SET(ZLIB_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib) -SET(ZLIB_INSTALL_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/install/zlib) +SET(ZLIB_SOURCES_DIR ${THIRD_PARTY_PATH}/zlib) +SET(ZLIB_INSTALL_DIR ${THIRD_PARTY_PATH}/install/zlib) SET(ZLIB_ROOT ${ZLIB_INSTALL_DIR} CACHE FILEPATH "zlib root directory." FORCE) SET(ZLIB_INCLUDE_DIR "${ZLIB_INSTALL_DIR}/include" CACHE PATH "zlib include directory." FORCE) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 0983d83b73..0d1ef5cd84 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -3,12 +3,6 @@ include(CheckCXXCompilerFlag) include(CheckCCompilerFlag) include(CheckCXXSymbolExists) -if(NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING - "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel" - FORCE) -endif() - function(CheckCompilerCXX11Flag) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8)