diff --git a/.gitignore b/.gitignore
index 9622ab78e0..351b820410 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,7 +22,9 @@ cmake-build-*
# generated while compiling
python/paddle/v2/framework/core.so
+paddle/pybind/pybind.h
CMakeFiles
cmake_install.cmake
paddle/.timestamp
python/paddlepaddle.egg-info/
+paddle/pybind/pybind.h
diff --git a/.travis.yml b/.travis.yml
index e217c8f5a7..d0e2696f10 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -36,10 +36,6 @@ before_install:
# protobuf version.
- sudo pip install -r $TRAVIS_BUILD_DIR/python/requirements.txt
- sudo pip install wheel sphinx==1.5.6 recommonmark sphinx-rtd-theme==0.1.9 virtualenv pre-commit LinkChecker
- - curl https://glide.sh/get | bash
- - eval "$(GIMME_GO_VERSION=1.8.3 gimme)"
- - go get -u github.com/alecthomas/gometalinter
- - gometalinter --install
- |
function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; }
script:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 08237cd850..4921226ec1 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -27,7 +27,7 @@ if(NOT CMAKE_CROSSCOMPILING)
endif(NOT CMAKE_CROSSCOMPILING)
find_package(Git REQUIRED)
find_package(Threads REQUIRED)
-if(NOT ANDROID)
+if(NOT ANDROID AND NOT IOS)
find_package(Boost QUIET)
endif()
@@ -64,24 +64,29 @@ if(NOT CMAKE_BUILD_TYPE)
FORCE)
endif()
-if(ANDROID)
- if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
- message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
+if(ANDROID OR IOS)
+ if(ANDROID)
+ if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16")
+ message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16")
+ elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21")
+ # TODO: support glog for Android api 16 ~ 19 in the future
+ message(WARNING "Using the unofficial git repository instead")
+ endif()
endif()
set(WITH_GPU OFF CACHE STRING
- "Disable GPU when cross-compiling for Android" FORCE)
+ "Disable GPU when cross-compiling for Android and iOS" FORCE)
set(WITH_AVX OFF CACHE STRING
- "Disable AVX when cross-compiling for Android" FORCE)
+ "Disable AVX when cross-compiling for Android and iOS" FORCE)
set(WITH_PYTHON OFF CACHE STRING
- "Disable PYTHON when cross-compiling for Android" FORCE)
+ "Disable PYTHON when cross-compiling for Android and iOS" FORCE)
set(WITH_RDMA OFF CACHE STRING
- "Disable RDMA when cross-compiling for Android" FORCE)
+ "Disable RDMA when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLDNN OFF CACHE STRING
- "Disable MKLDNN when cross-compiling for Android" FORCE)
+ "Disable MKLDNN when cross-compiling for Android and iOS" FORCE)
set(WITH_MKLML OFF CACHE STRING
- "Disable MKLML package when cross-compiling for Android" FORCE)
-endif(ANDROID)
+ "Disable MKLML package when cross-compiling for Android and iOS" FORCE)
+endif()
set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
"A path setting third party libraries download & build directories.")
diff --git a/Dockerfile.android b/Dockerfile.android
index 452aa15745..9d13a414f6 100644
--- a/Dockerfile.android
+++ b/Dockerfile.android
@@ -6,13 +6,14 @@ RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ub
# ENV variables
ARG ANDROID_ABI
+ARG ANDROID_API
ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"}
+ENV ANDROID_API=${ANDROID_API:-21}
ENV HOME=/root \
ANDROID_NDK_HOME=/opt/android-ndk-linux \
- ANDROID_ARM_STANDALONE_TOOLCHAIN=/opt/arm-toolchain \
- ANDROID_ARM64_STANDALONE_TOOLCHAIN=/opt/arm64-toolchain
+ ANDROID_TOOLCHAINS_DIR=/opt/toolchains
RUN apt-get update && \
apt-get install -y \
@@ -42,14 +43,12 @@ RUN pip install --upgrade pip && \
pip install pre-commit
# Android NDK
-RUN mkdir /opt/android-ndk-tmp && \
+RUN mkdir -p ${ANDROID_TOOLCHAINS_DIR} && \
+ mkdir -p /opt/android-ndk-tmp && \
cd /opt/android-ndk-tmp && \
wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \
unzip -q android-ndk-r14b-linux-x86_64.zip && \
mv android-ndk-r14b ${ANDROID_NDK_HOME} && \
- ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-23 --install-dir=${ANDROID_ARM_STANDALONE_TOOLCHAIN} && \
- ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm64 --platform=android-23 --install-dir=${ANDROID_ARM64_STANDALONE_TOOLCHAIN} && \
- rm -rf /opt/android-ndk-tmp && \
- rm -rf ${ANDROID_NDK_HOME}
+ rm -rf /opt/android-ndk-tmp
CMD ["bash", "/paddle/paddle/scripts/docker/build_android.sh"]
diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake
index 854066fd1d..8fdc382f0c 100644
--- a/cmake/cblas.cmake
+++ b/cmake/cblas.cmake
@@ -171,3 +171,10 @@ if (REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY)
add_definitions(-DPADDLE_USE_REFERENCE_CBLAS)
message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})")
endif()
+
+if(IOS_USE_VECLIB_FOR_BLAS AND VECLIB_FOUND)
+ set(CBLAS_FOUND ON)
+ set(CBLAS_PROVIDER vecLib)
+ set(CBLAS_INC_DIR ${VECLIB_INC_DIR})
+ add_definitions(-DPADDLE_USE_VECLIB)
+endif()
diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake
index 8d5d533126..4823dc3e91 100644
--- a/cmake/cpplint.cmake
+++ b/cmake/cpplint.cmake
@@ -26,9 +26,9 @@ set(IGNORE_PATTERN
.*ImportanceSampler.*
.*cblas\\.h.*
.*\\.pb\\.txt
- .*LtrDataProvider.*
.*MultiDataProvider.*
- .*pb.*)
+ .*pb.*
+ .*pybind.h)
# add_style_check_target
#
diff --git a/cmake/cross_compiling/ios.cmake b/cmake/cross_compiling/ios.cmake
new file mode 100644
index 0000000000..0b38943952
--- /dev/null
+++ b/cmake/cross_compiling/ios.cmake
@@ -0,0 +1,350 @@
+# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is a toolchain file for cross-compiling for iOS, and the
+# configuration largely refers to public toolchain file:
+# https://raw.githubusercontent.com/leetal/ios-cmake/master/ios.toolchain.cmake
+# and
+# https://github.com/cristeab/ios-cmake
+#
+# Supports options:
+# IOS_PLATFORM = OS (default) or SIMULATOR
+# This decides if SDKS will be selected from the iPhoneOS.platform or iPhoneSimulator.platform folders
+# OS - the default, used to build for iPhone and iPad physical devices, which have an arm arch.
+# SIMULATOR - used to build for the Simulator platforms, which have an x86 arch.
+# IOS_ARCH
+# The archectures wanted to support, such "arm64", "armv7;arm64"
+# IOS_DEPLOYMENT_TARGET
+# The minimum iOS deployment version, such as "7.0"
+# IOS_ENABLE_BITCODE = ON (default) or OFF
+# IOS_USE_VECLIB_FOR_BLAS = OFF (default) or ON
+# IOS_DEVELOPER_ROOT = automatic(default) or /path/to/platform/Developer folder
+# By default this location is automatcially chosen based on the IOS_PLATFORM value above.
+# If set manually, it will override the default location and force the user of a particular Developer Platform
+# IOS_SDK_ROOT = automatic(default) or /path/to/platform/Developer/SDKs/SDK folder
+# By default this location is automatcially chosen based on the IOS_DEVELOPER_ROOT value.
+# In this case it will always be the most up-to-date SDK found in the IOS_DEVELOPER_ROOT path.
+# If set manually, this will force the use of a specific SDK version
+
+# Macros:
+# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE)
+# A convenience macro for setting xcode specific properties on targets
+# example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1")
+# find_host_package (PROGRAM ARGS)
+# A macro used to find executable programs on the host system, not within the iOS environment.
+# Thanks to the android-cmake project for providing the command
+
+if(NOT IOS)
+ return()
+endif()
+
+set(CMAKE_SYSTEM_NAME Darwin)
+
+# Get the Xcode version being used.
+execute_process(COMMAND xcodebuild -version
+ OUTPUT_VARIABLE XCODE_VERSION
+ RESULT_VARIABLE XCODE_VERSION_RESULT
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+if(NOT ${XCODE_VERSION_RESULT})
+ string(REGEX MATCH "Xcode [0-9\\.]+" XCODE_VERSION "${XCODE_VERSION}")
+ string(REGEX REPLACE "Xcode ([0-9\\.]+)" "\\1" XCODE_VERSION "${XCODE_VERSION}")
+ message(STATUS "Building with Xcode version: ${XCODE_VERSION}")
+else()
+ message(FATAL_ERROR "Cannot execute xcodebuild, please check whether xcode is installed.")
+endif()
+
+# Required as of cmake 2.8.10
+set(CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING "Force unset of the deployment target for iOS" FORCE)
+
+# Setup iOS platform unless specified manually with IOS_PLATFORM
+if(NOT DEFINED IOS_PLATFORM)
+ set(IOS_PLATFORM "OS")
+endif()
+set(IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING "Type of iOS Platform")
+
+# Set the architecture for iOS
+if(NOT DEFINED IOS_ARCH)
+ if(IOS_PLATFORM STREQUAL "OS")
+ # FIXME(liuyiqun): support "armv7;armv7s;arm64" future
+ set(IOS_ARCH "arm64")
+ elseif(IOS_PLATFORM STREQUAL "SIMULATOR")
+ set(IOS_ARCH "i386;x86_64")
+ elseif(IOS_PLATFORM STREQUAL "WATCHOS")
+ set(IOS_ARCH armv7k)
+ endif()
+endif()
+set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS")
+
+# Specify minimum iOS deployment version
+if(NOT DEFINED IOS_DEPLOYMENT_TARGET)
+ set(IOS_DEPLOYMENT_TARGET "7.0")
+endif()
+set(IOS_DEPLOYMENT_TARGET ${IOS_DEPLOYMENT_TARGET} CACHE STRING "Minimum iOS version")
+
+# Whether to enable bitcode
+if(NOT DEFINED IOS_ENABLE_BITCODE)
+ set(IOS_ENABLE_BITCODE ON)
+endif()
+set(IOS_ENABLE_BITCODE ${IOS_ENABLE_BITCODE} CACHE BOOL "Whether to enable bitcode")
+
+if(NOT DEFINED IOS_USE_VECLIB_FOR_BLAS)
+ set(IOS_USE_VECLIB_FOR_BLAS OFF)
+endif()
+set(IOS_USE_VECLIB_FOR_BLAS ${IOS_UES_VECLIB_FOR_BLAS} CACHE BOOL "Whether to use veclib")
+
+# Check the platform selection and setup for developer root
+if(${IOS_PLATFORM} STREQUAL "OS")
+ set(IOS_PLATFORM_LOCATION "iPhoneOS.platform")
+ set(XCODE_IOS_PLATFORM iphoneos)
+
+ # This causes the installers to properly locate the output libraries
+ set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphoneos")
+elseif(${IOS_PLATFORM} STREQUAL "SIMULATOR")
+ set(IOS_PLATFORM_LOCATION "iPhoneSimulator.platform")
+ set(XCODE_IOS_PLATFORM iphonesimulator)
+
+ # This causes the installers to properly locate the output libraries
+ set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphonesimulator")
+elseif(${IOS_PLATFORM} STREQUAL "WATCHOS")
+ set(IOS_PLATFORM_LOCATION "WatchOS.platform")
+ set(XCODE_IOS_PLATFORM watchos)
+
+ # This causes the installers to properly locate the output libraries
+ set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-watchos")
+else(${IOS_PLATFORM} STREQUAL "OS")
+ message(FATAL_ERROR "Unsupported IOS_PLATFORM value selected. Please set to\n"
+ "\t OS, SIMULATOR, or WATCHOS.")
+endif()
+
+# Check iOS developer toolchain
+if(NOT DEFINED IOS_DEVELOPER_ROOT)
+ # Setup iOS developer location
+ execute_process(COMMAND xcode-select -print-path
+ OUTPUT_VARIABLE XCODE_DEVELOPER_DIR
+ RESULT_VARIABLE XCODE_DEVELOPER_DIR_RESULT
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ # Xcode 4.3 changed the installation location, choose the most recent one available
+ if(${XCODE_VERSION} VERSION_LESS "4.3.0")
+ set(IOS_DEVELOPER_ROOT "/Developer/Platforms/${IOS_PLATFORM_LOCATION}/Developer")
+ else()
+ set(IOS_DEVELOPER_ROOT "${XCODE_DEVELOPER_DIR}/Platforms/${IOS_PLATFORM_LOCATION}/Developer")
+ endif()
+endif()
+if(EXISTS ${IOS_DEVELOPER_ROOT})
+ set(IOS_DEVELOPER_ROOT ${IOS_DEVELOPER_ROOT} CACHE PATH "Location of iOS Platform")
+else()
+ message(FATAL_ERROR "Invalid IOS_DEVELOPER_ROOT: ${IOS_DEVELOPER_ROOT} does not exist.")
+endif()
+
+# Check iOS SDK
+if(NOT DEFINED IOS_SDK_ROOT)
+ # Find and use the most recent iOS sdk
+ file(GLOB IOS_SDK_LISTS "${IOS_DEVELOPER_ROOT}/SDKs/*")
+ if(IOS_SDK_LISTS)
+ list(SORT IOS_SDK_LISTS)
+ list(REVERSE IOS_SDK_LISTS)
+ list(GET IOS_SDK_LISTS 0 IOS_SDK_ROOT)
+ else(IOS_SDK_LISTS)
+ message(FATAL_ERROR "No iOS SDK's found in default search path ${IOS_DEVELOPER_ROOT}."
+ " Please manually set IOS_SDK_ROOT or install the iOS SDK.")
+ endif(IOS_SDK_LISTS)
+endif()
+if(EXISTS ${IOS_SDK_ROOT})
+ set(IOS_SDK_ROOT ${IOS_SDK_ROOT} CACHE PATH "Location of the selected iOS SDK")
+ message(STATUS "iOS toolchain: ${IOS_SDK_ROOT}")
+else()
+ message(FATAL_ERROR "Invalid IOS_SDK_ROOT: ${IOS_SDK_ROOT} does not exist.")
+endif()
+
+# Set the sysroot default to the most recent SDK
+set(CMAKE_OSX_SYSROOT ${IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS support")
+
+# Get version of iOS SDK
+execute_process(COMMAND xcodebuild -sdk ${CMAKE_OSX_SYSROOT} -version SDKVersion
+ OUTPUT_VARIABLE IOS_SDK_VERSION
+ RESULT_VARIABLE IOS_SDK_VERSION_RESULT
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+if(${IOS_SDK_VERSION_RESULT})
+ string(REGEX MATCH "(([0-9]+)\\.)+([0-9]+)" IOS_SDK_VERSION "${IOS_SDK_ROOT}")
+endif()
+if(NOT IOS_SDK_VERSION)
+ message(WARNING "Cannot get SDK's version.")
+ set(IOS_SDK_VERSION 1)
+endif()
+set(CMAKE_SYSTEM_VERSION ${IOS_SDK_VERSION})
+
+# Find the C & C++ compilers for the specified SDK.
+if(NOT CMAKE_C_COMPILER)
+ # Default to use clang
+ execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang
+ OUTPUT_VARIABLE IOS_C_COMPILER
+ RESULT_VARIABLE IOS_C_COMPILER_RESULT
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(${IOS_C_COMPILER_RESULT})
+ get_filename_component(IOS_C_COMPILER clang PROGRAM)
+ endif()
+else(NOT CMAKE_C_COMPILER)
+ # User can set it in cmake command
+ get_filename_component(IOS_C_COMPILER ${CMAKE_C_COMPILER} PROGRAM)
+endif(NOT CMAKE_C_COMPILER)
+if(NOT EXISTS ${IOS_C_COMPILER})
+ message(FATAL_ERROR "Cannot find C compiler: ${IOS_C_COMPILER}")
+endif()
+
+if(NOT CMAKE_CXX_COMPILER)
+ # Default to use clang++
+ execute_process(COMMAND xcrun -sdk ${CMAKE_OSX_SYSROOT} -find clang++
+ OUTPUT_VARIABLE IOS_CXX_COMPILER
+ RESULT_VARIABLE IOS_CXX_COMPILER_RESULT
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(${IOS_CXX_COMPILER_RESULT})
+ get_filename_component(IOS_CXX_COMPILER clang++ PROGRAM)
+ endif()
+else(NOT CMAKE_CXX_COMPILER)
+ # User can set it in cmake command
+ get_filename_component(IOS_CXX_COMPILER ${CMAKE_CXX_COMPILER} PROGRAM)
+endif(NOT CMAKE_CXX_COMPILER)
+if(NOT EXISTS ${IOS_CXX_COMPILER})
+ message(FATAL_ERROR "Cannot find CXX compiler: ${IOS_CXX_COMPILER}")
+endif()
+
+set(CMAKE_C_COMPILER ${IOS_C_COMPILER} CACHE PATH "C compiler" FORCE)
+set(CMAKE_CXX_COMPILER ${IOS_CXX_COMPILER} CACHE PATH "CXX compiler" FORCE)
+
+set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ")
+set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ")
+set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}")
+set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}")
+
+# Set iOS specific C/C++ flags
+if(IOS_PLATFORM STREQUAL "OS")
+ if(XCODE_VERSION VERSION_LESS "7.0")
+ set(XCODE_IOS_PLATFORM_VERSION_FLAGS "-mios-version-min=${IOS_DEPLOYMENT_TARGET}")
+ else()
+ # Xcode 7.0+ uses flags we can build directly from XCODE_IOS_PLATFORM.
+ set(XCODE_IOS_PLATFORM_VERSION_FLAGS "-m${XCODE_IOS_PLATFORM}-version-min=${IOS_DEPLOYMENT_TARGET}")
+ endif()
+else()
+ set(XCODE_IOS_FLATFORM_VERSION_FLAGS "-mios-simulator-version-min=${IOS_DEPLOYMENT_TARGET}")
+endif()
+
+if(IOS_ENABLE_BITCODE)
+ set(XCODE_IOS_BITCODE_FLAGS "${IOS_COMPILER_FLAGS} -fembed-bitcode")
+else()
+ set(XCODE_IOS_BITCODE_FLAGS "")
+endif()
+
+set(IOS_COMPILER_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} ${XCODE_IOS_BITCODE_FLAGS}")
+
+# Hidden visibilty is required for cxx on iOS
+set(CMAKE_C_FLAGS "${IOS_COMPILER_FLAGS} ${CMAKE_C_FLAGS}" CACHE STRING "C flags")
+set(CMAKE_CXX_FLAGS "${IOS_COMPILER_FLAGS} -fvisibility-inlines-hidden ${CMAKE_CXX_FLAGS}" CACHE STRING "CXX flags")
+
+set(IOS_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first")
+
+if(IOS_USE_VECLIB_FOR_BLAS)
+ # Find vecLib for iOS
+ set(VECLIB_SEARCH_DIRS
+ ${IOS_SDK_ROOT}/System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks
+ ${IOS_SDK_ROOT}/System/Library/Frameworks/Accelerate.framework/Frameworks
+ )
+ find_path(VECLIB_INC_DIR vecLib.h PATHS ${VECLIB_SEARCH_DIRS}/vecLib.framework/Headers)
+
+ include(FindPackageHandleStandardArgs)
+ find_package_handle_standard_args(vecLib DEFAULT_MSG VECLIB_INC_DIR)
+
+ if(VECLIB_FOUND)
+ if(VECLIB_INC_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*")
+ set(IOS_LINK_FLAGS ${IOS_LINK_FLAGS} -lcblas "-framework vecLib")
+ message(STATUS "Found standalone vecLib.framework")
+ else()
+ set(IOS_LINK_FLAGS ${IOS_LINK_FLAGS} -lcblas "-framework Accelerate")
+ message(STATUS "Found vecLib as part of Accelerate.framework")
+ endif()
+
+ endif()
+endif()
+
+set(CMAKE_C_LINK_FLAGS "${IOS_LINK_FLAGS} ${CMAKE_C_LINK_FLAGS}")
+set(CMAKE_CXX_LINK_FLAGS "${IOS_LINK_FLAGS} ${CMAKE_CXX_LINK_FLAGS}")
+
+set(CMAKE_PLATFORM_HAS_INSTALLNAME 1)
+if(NOT IOS_ENABLE_BITCODE)
+ set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -headerpad_max_install_names")
+ set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -headerpad_max_install_names")
+else()
+ set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib")
+ set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle")
+endif()
+set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,")
+set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,")
+set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a")
+
+# hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old build tree
+# (where install_name_tool was hardcoded) and where CMAKE_INSTALL_NAME_TOOL isn't in the cache
+# and still cmake didn't fail in CMakeFindBinUtils.cmake (because it isn't rerun)
+# hardcode CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did before, Alex
+if(NOT DEFINED CMAKE_INSTALL_NAME_TOOL)
+ find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool)
+endif()
+
+# Set the find root to the iOS developer roots and to user defined paths
+set(CMAKE_FIND_ROOT_PATH ${IOS_DEVELOPER_ROOT} ${IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH}
+ CACHE string "iOS find search path root")
+
+# default to searching for frameworks first
+set(CMAKE_FIND_FRAMEWORK FIRST)
+
+# set up the default search directories for frameworks
+set(CMAKE_SYSTEM_FRAMEWORK_PATH
+ ${IOS_SDK_ROOT}/System/Library/Frameworks
+ ${IOS_SDK_ROOT}/System/Library/PrivateFrameworks
+ ${IOS_SDK_ROOT}/Developer/Library/Frameworks
+ )
+
+# only search the iOS sdks, not the remainder of the host filesystem
+set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+
+message(STATUS "iOS: Targeting iOS '${CMAKE_SYSTEM_VERSION}', "
+ "building for '${IOS_PLATFORM}' platform, with architecture '${CMAKE_OSX_ARCHITECTURES}'")
+message(STATUS "System CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}")
+message(STATUS "System CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
+
+# Used in ExternalProject command
+string(REPLACE ";" "\\$" EXTERNAL_IOS_ARCHITECTURES "${CMAKE_OSX_ARCHITECTURES}")
+set(EXTERNAL_OPTIONAL_ARGS
+ -DCMAKE_OSX_SYSROOT=${CMAKE_OSX_SYSROOT}
+ -DCMAKE_OSX_ARCHITECTURES=${EXTERNAL_IOS_ARCHITECTURES})
+
+# This little macro lets you set any XCode specific property
+macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE)
+ set_property (TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY} ${XCODE_VALUE})
+endmacro(set_xcode_property)
+
+# This macro lets you find executable programs on the host system
+macro(find_host_package)
+ set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
+ set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER)
+ set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER)
+ set(IOS FALSE)
+
+ find_package(${ARGN})
+
+ set(IOS TRUE)
+ set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
+ set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
+ set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
+endmacro(find_host_package)
diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake
index 16e5bef4cd..957f8271e4 100644
--- a/cmake/external/gflags.cmake
+++ b/cmake/external/gflags.cmake
@@ -18,9 +18,9 @@ SET(GFLAGS_SOURCES_DIR ${THIRD_PARTY_PATH}/gflags)
SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
IF(WIN32)
- set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
+ set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ELSE(WIN32)
- set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
+ set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
@@ -39,13 +39,14 @@ ExternalProject_Add(
PREFIX ${GFLAGS_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
- CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
- CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
- CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- CMAKE_ARGS -DBUILD_TESTING=OFF
- CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+ -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+ -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+ -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
+ -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+ -DBUILD_TESTING=OFF
+ -DCMAKE_BUILD_TYPE=Release
+ ${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GFLAGS_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
@@ -56,3 +57,12 @@ SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
ADD_DEPENDENCIES(gflags extern_gflags)
LIST(APPEND external_project_dependencies gflags)
+
+IF(WITH_C_API)
+ INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags)
+ IF(ANDROID)
+ INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib)
+ ENDIF()
+ENDIF()
diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake
index 8a594a825a..b3fef738cc 100644
--- a/cmake/external/glog.cmake
+++ b/cmake/external/glog.cmake
@@ -19,9 +19,9 @@ SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog)
SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE)
IF(WIN32)
- SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
+ SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE)
ELSE(WIN32)
- SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
+ SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE)
ENDIF(WIN32)
INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR})
@@ -34,16 +34,17 @@ ExternalProject_Add(
PREFIX ${GLOG_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
- CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
- CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR}
- CMAKE_ARGS -DCMAKE_INSTALL_LIBDIR=${GLOG_INSTALL_DIR}/lib
- CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- CMAKE_ARGS -DWITH_GFLAGS=ON
- CMAKE_ARGS -Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags
- CMAKE_ARGS -DBUILD_TESTING=OFF
- CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+ -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+ -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+ -DCMAKE_INSTALL_PREFIX=${GLOG_INSTALL_DIR}
+ -DCMAKE_INSTALL_LIBDIR=${GLOG_INSTALL_DIR}/lib
+ -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+ -DWITH_GFLAGS=ON
+ -Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags
+ -DBUILD_TESTING=OFF
+ -DCMAKE_BUILD_TYPE=Release
+ ${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GLOG_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR:PATH=${GLOG_INSTALL_DIR}/lib
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
@@ -56,3 +57,12 @@ ADD_DEPENDENCIES(glog extern_glog gflags)
LINK_LIBRARIES(glog gflags)
LIST(APPEND external_project_dependencies glog)
+
+IF(WITH_C_API)
+ INSTALL(DIRECTORY ${GLOG_INCLUDE_DIR} DESTINATION third_party/glog)
+ IF(ANDROID)
+ INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib)
+ ENDIF()
+ENDIF()
diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake
index e3970073a1..6a2a79b763 100644
--- a/cmake/external/gtest.cmake
+++ b/cmake/external/gtest.cmake
@@ -48,15 +48,16 @@ IF(WITH_TESTING)
PREFIX ${GTEST_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
- CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
- CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR}
- CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- CMAKE_ARGS -DBUILD_GMOCK=ON
- CMAKE_ARGS -Dgtest_disable_pthreads=ON
- CMAKE_ARGS -Dgtest_force_shared_crt=ON
- CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+ -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+ -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+ -DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR}
+ -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+ -DBUILD_GMOCK=ON
+ -Dgtest_disable_pthreads=ON
+ -Dgtest_force_shared_crt=ON
+ -DCMAKE_BUILD_TYPE=Release
+ ${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake
index f9e05af59f..143b57a954 100644
--- a/cmake/external/openblas.cmake
+++ b/cmake/external/openblas.cmake
@@ -29,30 +29,41 @@ IF(NOT ${CBLAS_FOUND})
"${CBLAS_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}openblas${CMAKE_STATIC_LIBRARY_SUFFIX}"
CACHE FILEPATH "openblas library." FORCE)
- IF(APPLE)
- SET(OPENBLAS_CC "${CMAKE_C_COMPILER} -isysroot ${CMAKE_OSX_SYSROOT}")
- SET(COMMON_ARGS CC=${OPENBLAS_CC} NO_SHARED=1 NO_LAPACK=1 libs)
- ELSE()
- SET(COMMON_ARGS CC=${CMAKE_C_COMPILER} NO_SHARED=1 NO_LAPACK=1 libs)
- ENDIF()
+ SET(OPENBLAS_CC "${CMAKE_C_COMPILER}")
IF(CMAKE_CROSSCOMPILING)
+ SET(OPTIONAL_ARGS HOSTCC=${HOST_C_COMPILER})
+ GET_FILENAME_COMPONENT(CROSS_SUFFIX ${CMAKE_C_COMPILER} DIRECTORY)
+ SET(CROSS_SUFFIX ${CROSS_SUFFIX}/)
IF(ANDROID)
# arm_soft_fp_abi branch of OpenBLAS to support softfp
# https://github.com/xianyi/OpenBLAS/tree/arm_soft_fp_abi
SET(OPENBLAS_COMMIT "b5c96fcfcdc82945502a2303116a64d89985daf5")
IF(ANDROID_ABI MATCHES "^armeabi(-v7a)?$")
- SET(TARGET "ARMV7")
+ SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV7 ARM_SOFTFP_ABI=1 USE_THREAD=0)
ELSEIF(ANDROID_ABI STREQUAL "arm64-v8a")
- SET(TARGET "ARMV8")
+ SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV8 BINARY=64 USE_THREAD=0)
+ ENDIF()
+ ELSEIF(IOS)
+ # FIXME(liuyiqun): support multiple architectures
+ SET(OPENBLAS_COMMIT "b5c96fcfcdc82945502a2303116a64d89985daf5")
+ SET(OPENBLAS_CC "${OPENBLAS_CC} ${CMAKE_C_FLAGS} -isysroot ${CMAKE_OSX_SYSROOT}")
+ IF(CMAKE_OSX_ARCHITECTURES MATCHES "armv7")
+ SET(OPENBLAS_CC "${OPENBLAS_CC} -arch armv7")
+ SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV7 ARM_SOFTFP_ABI=1 USE_THREAD=0)
+ ELSEIF(CMAKE_OSX_ARCHITECTURES MATCHES "arm64")
+ SET(OPENBLAS_CC "${OPENBLAS_CC} -arch arm64")
+ SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV8 BINARY=64 USE_THREAD=0 CROSS_SUFFIX=${CROSS_SUFFIX})
ENDIF()
- SET(OPTIONAL_ARGS HOSTCC=${HOST_C_COMPILER} TARGET=${TARGET} ARM_SOFTFP_ABI=1 USE_THREAD=0)
ELSEIF(RPI)
# use hardfp
SET(OPENBLAS_COMMIT "v0.2.20")
- SET(OPTIONAL_ARGS HOSTCC=${HOST_C_COMPILER} TARGET=ARMV7 USE_THREAD=0)
+ SET(OPTIONAL_ARGS ${OPTIONAL_ARGS} TARGET=ARMV7 USE_THREAD=0)
ENDIF()
ELSE()
+ IF(APPLE)
+ SET(OPENBLAS_CC "${CMAKE_C_COMPILER} -isysroot ${CMAKE_OSX_SYSROOT}")
+ ENDIF()
SET(OPENBLAS_COMMIT "v0.2.20")
SET(OPTIONAL_ARGS "")
IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^x86(_64)?$")
@@ -60,6 +71,8 @@ IF(NOT ${CBLAS_FOUND})
ENDIF()
ENDIF()
+ SET(COMMON_ARGS CC=${OPENBLAS_CC} NO_SHARED=1 NO_LAPACK=1 libs)
+
ExternalProject_Add(
extern_openblas
${EXTERNAL_PROJECT_LOG_ARGS}
@@ -73,6 +86,26 @@ IF(NOT ${CBLAS_FOUND})
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
)
+
+ IF(WITH_C_API)
+ INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas)
+ # Because libopenblas.a is a symbolic link of another library, thus need to
+ # install the whole directory.
+ IF(ANDROID)
+ SET(TMP_INSTALL_DIR third_party/openblas/lib/${ANDROID_ABI})
+ ELSE()
+ SET(TMP_INSTALL_DIR third_party/openblas/lib)
+ ENDIF()
+ INSTALL(CODE "execute_process(
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${CBLAS_INSTALL_DIR}/lib
+ destination ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}
+ )"
+ )
+ INSTALL(CODE "MESSAGE(STATUS \"Installing: \"
+ \"${CBLAS_INSTALL_DIR}/lib -> ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}\"
+ )"
+ )
+ ENDIF()
ENDIF(NOT ${CBLAS_FOUND})
MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}")
diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake
index e629d61585..7cf7ba85cc 100644
--- a/cmake/external/protobuf.cmake
+++ b/cmake/external/protobuf.cmake
@@ -173,7 +173,8 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST)
"-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}"
"-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}"
"-Dprotobuf_WITH_ZLIB=ON"
- "-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}")
+ "-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}"
+ ${EXTERNAL_OPTIONAL_ARGS})
SET(OPTIONAL_CACHE_ARGS "-DZLIB_ROOT:STRING=${ZLIB_ROOT}")
ENDIF()
@@ -223,6 +224,15 @@ IF(NOT PROTOBUF_FOUND)
SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY}
CACHE FILEPATH "protoc library." FORCE)
+ IF(WITH_C_API)
+ INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf)
+ IF(ANDROID)
+ INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib)
+ ENDIF()
+ ENDIF()
+
IF(CMAKE_CROSSCOMPILING)
PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf)
ELSE()
diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake
index 490c87d67e..46c68cce32 100644
--- a/cmake/external/python.cmake
+++ b/cmake/external/python.cmake
@@ -12,16 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-INCLUDE(ExternalProject)
+IF(NOT WITH_PYTHON)
+ return()
+ENDIF()
+
INCLUDE(python_module)
FIND_PACKAGE(PythonInterp 2.7)
-IF(WITH_PYTHON)
- FIND_PACKAGE(PythonLibs 2.7)
- # Fixme: Maybe find a static library. Get SHARED/STATIC by FIND_PACKAGE.
- ADD_LIBRARY(python SHARED IMPORTED GLOBAL)
- SET_PROPERTY(TARGET python PROPERTY IMPORTED_LOCATION ${PYTHON_LIBRARIES})
-ENDIF(WITH_PYTHON)
+FIND_PACKAGE(PythonLibs 2.7)
+# Fixme: Maybe find a static library. Get SHARED/STATIC by FIND_PACKAGE.
+ADD_LIBRARY(python SHARED IMPORTED GLOBAL)
+SET_PROPERTY(TARGET python PROPERTY IMPORTED_LOCATION ${PYTHON_LIBRARIES})
SET(py_env "")
IF(PYTHONINTERP_FOUND)
@@ -36,9 +37,5 @@ IF(PYTHONINTERP_FOUND)
ENDIF()
ENDIF(PYTHONINTERP_FOUND)
-IF(WITH_PYTHON)
- INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR})
- INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR})
-ELSE()
- SET(PYTHON_LIBRARIES "")
-ENDIF()
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIR})
+INCLUDE_DIRECTORIES(${PYTHON_NUMPY_INCLUDE_DIR})
diff --git a/cmake/external/swig.cmake b/cmake/external/swig.cmake
index 744c766ee7..ce088ae7ea 100644
--- a/cmake/external/swig.cmake
+++ b/cmake/external/swig.cmake
@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+IF(NOT WITH_SWIG_PY)
+ return()
+ENDIF()
+
FIND_PACKAGE(SWIG)
IF(NOT SWIG_FOUND)
diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake
index 2d7daed9bc..bb258c7b55 100644
--- a/cmake/external/warpctc.cmake
+++ b/cmake/external/warpctc.cmake
@@ -16,25 +16,14 @@ INCLUDE(ExternalProject)
SET(WARPCTC_SOURCES_DIR ${THIRD_PARTY_PATH}/warpctc)
SET(WARPCTC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/warpctc)
-SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include" CACHE PATH "Warp-ctc Directory" FORCE)
-INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR})
-
-SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib" CACHE PATH "Warp-ctc Library Directory" FORCE)
-
-IF(WIN32)
- SET(WARPCTC_LIBRARIES
- "${WARPCTC_INSTALL_DIR}/lib/warpctc.dll" CACHE FILEPATH "Warp-ctc Library" FORCE)
-ELSE(WIN32)
- IF(APPLE)
- SET(_warpctc_SHARED_SUFFIX dylib)
- ELSE(APPLE)
- SET(_warpctc_SHARED_SUFFIX so)
- ENDIF(APPLE)
-
- SET(WARPCTC_LIBRARIES
- "${WARPCTC_INSTALL_DIR}/lib/libwarpctc.${_warpctc_SHARED_SUFFIX}" CACHE FILEPATH "Warp-ctc Library" FORCE)
-ENDIF(WIN32)
+SET(WARPCTC_INCLUDE_DIR "${WARPCTC_INSTALL_DIR}/include"
+ CACHE PATH "Warp-ctc Directory" FORCE)
+# Used in unit test test_WarpCTCLayer
+SET(WARPCTC_LIB_DIR "${WARPCTC_INSTALL_DIR}/lib"
+ CACHE PATH "Warp-ctc Library Directory" FORCE)
+SET(WARPCTC_LIBRARIES "${WARPCTC_INSTALL_DIR}/lib/libwarpctc${CMAKE_SHARED_LIBRARY_SUFFIX}"
+ CACHE FILEPATH "Warp-ctc Library" FORCE)
IF(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" )
SET(USE_OMP OFF)
@@ -49,22 +38,26 @@ ExternalProject_Add(
PREFIX ${WARPCTC_SOURCES_DIR}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
- CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
- CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR}
- CMAKE_ARGS -DWITH_GPU=${WITH_GPU}
- CMAKE_ARGS -DWITH_OMP=${USE_OMP}
- CMAKE_ARGS -DWITH_TORCH=OFF
- CMAKE_ARGS -DCMAKE_DISABLE_FIND_PACKAGE_Torch=ON
- CMAKE_ARGS -DBUILD_SHARED=ON
- CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+ -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+ -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+ -DCMAKE_INSTALL_PREFIX=${WARPCTC_INSTALL_DIR}
+ -DWITH_GPU=${WITH_GPU}
+ -DWITH_OMP=${USE_OMP}
+ -DWITH_TORCH=OFF
+ -DCMAKE_DISABLE_FIND_PACKAGE_Torch=ON
+ -DBUILD_SHARED=ON
+ -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+ -DCMAKE_BUILD_TYPE=Release
+ ${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_INSTALL_PREFIX:PATH=${WARPCTC_INSTALL_DIR}
)
+MESSAGE(STATUS "warp-ctc library: ${WARPCTC_LIBRARIES}")
+INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR})
+
ADD_LIBRARY(warpctc STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET warpctc PROPERTY IMPORTED_LOCATION ${WARPCTC_LIBRARIES})
ADD_DEPENDENCIES(warpctc extern_warpctc)
diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake
index 45ca5542b7..c496a52b78 100644
--- a/cmake/external/zlib.cmake
+++ b/cmake/external/zlib.cmake
@@ -34,18 +34,28 @@ ExternalProject_Add(
GIT_TAG "v1.2.8"
PREFIX ${ZLIB_SOURCES_DIR}
UPDATE_COMMAND ""
- CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
- CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
- CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR}
- CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF
- CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
- CMAKE_ARGS -DCMAKE_MACOSX_RPATH=ON
- CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
+ -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
+ -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+ -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+ -DCMAKE_INSTALL_PREFIX=${ZLIB_INSTALL_DIR}
+ -DBUILD_SHARED_LIBS=OFF
+ -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+ -DCMAKE_MACOSX_RPATH=ON
+ -DCMAKE_BUILD_TYPE=Release
+ ${EXTERNAL_OPTIONAL_ARGS}
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ZLIB_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
)
LIST(APPEND external_project_dependencies zlib)
+
+IF(WITH_C_API)
+ INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib)
+ IF(ANDROID)
+ INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib/${ANDROID_ABI})
+ ELSE()
+ INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib)
+ ENDIF()
+ENDIF()
diff --git a/cmake/flags.cmake b/cmake/flags.cmake
index ff246b2eb4..4593ae6180 100644
--- a/cmake/flags.cmake
+++ b/cmake/flags.cmake
@@ -128,8 +128,10 @@ set(GPU_COMMON_FLAGS
)
if (APPLE)
- # On Mac OS X build fat binaries with x86_64 architectures by default.
- set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE)
+ if(NOT CMAKE_CROSSCOMPILING)
+ # On Mac OS X build fat binaries with x86_64 architectures by default.
+ set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE)
+ endif()
else()
set(GPU_COMMON_FLAGS
-Wall
diff --git a/cmake/system.cmake b/cmake/system.cmake
index adf5e2c539..396bd1a079 100644
--- a/cmake/system.cmake
+++ b/cmake/system.cmake
@@ -24,11 +24,10 @@ IF(WIN32)
SET(HOST_SYSTEM "win32")
ELSE(WIN32)
IF(APPLE)
- EXEC_PROGRAM (sw_vers ARGS -productVersion OUTPUT_VARIABLE MACOSX_VERSION)
- STRING(REGEX MATCH "[0-9]+.[0-9]+" VERSION "${MACOSX_VERSION}")
- SET(MACOS_VERSION ${VERSION})
SET(HOST_SYSTEM "macosx")
- IF(NOT DEFINED ENV{MACOSX_DEPLOYMENT_TARGET})
+ EXEC_PROGRAM(sw_vers ARGS -productVersion OUTPUT_VARIABLE HOST_SYSTEM_VERSION)
+ STRING(REGEX MATCH "[0-9]+.[0-9]+" MACOS_VERSION "${HOST_SYSTEM_VERSION}")
+ IF(NOT DEFINED $ENV{MACOSX_DEPLOYMENT_TARGET})
# Set cache variable - end user may change this during ccmake or cmake-gui configure.
SET(CMAKE_OSX_DEPLOYMENT_TARGET ${MACOS_VERSION} CACHE STRING
"Minimum OS X version to target for deployment (at runtime); newer APIs weak linked. Set to empty string for default value.")
@@ -49,6 +48,8 @@ ELSE(WIN32)
ELSEIF(LINUX_ISSUE MATCHES "Fedora")
SET(HOST_SYSTEM "fedora")
ENDIF()
+
+ STRING(REGEX MATCH "(([0-9]+)\\.)+([0-9]+)" HOST_SYSTEM_VERSION "${LINUX_ISSUE}")
ENDIF(EXISTS "/etc/issue")
IF(EXISTS "/etc/redhat-release")
@@ -70,7 +71,7 @@ CMAKE_HOST_SYSTEM_INFORMATION(RESULT CPU_CORES QUERY NUMBER_OF_LOGICAL_CORES)
MARK_AS_ADVANCED(HOST_SYSTEM CPU_CORES)
-MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}")
+MESSAGE(STATUS "Found Paddle host system: ${HOST_SYSTEM}, version: ${HOST_SYSTEM_VERSION}")
MESSAGE(STATUS "Found Paddle host system's CPU: ${CPU_CORES} cores")
# configuration for cross-compiling
@@ -82,6 +83,9 @@ IF(DEFINED CMAKE_SYSTEM_NAME)
ELSEIF(${CMAKE_SYSTEM_NAME} STREQUAL "RPi")
SET(RPI TRUE)
INCLUDE(cross_compiling/raspberry_pi)
+ ELSEIF(${CMAKE_SYSTEM_NAME} STREQUAL "iOS")
+ SET(IOS TRUE)
+ INCLUDE(cross_compiling/ios)
ENDIF()
ENDIF()
diff --git a/cmake/util.cmake b/cmake/util.cmake
index 0da4969d31..e814cad36f 100644
--- a/cmake/util.cmake
+++ b/cmake/util.cmake
@@ -25,7 +25,9 @@ function(target_circle_link_libraries TARGET_NAME)
endif()
endforeach()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
- list(APPEND LIBS "-undefined dynamic_lookup")
+ if(IOS AND NOT IOS_ENABLE_BITCODE)
+ list(APPEND LIBS "-undefined dynamic_lookup")
+ endif()
endif()
list(REVERSE libsInArgn)
target_link_libraries(${TARGET_NAME}
diff --git a/doc/design/api.md b/doc/design/api.md
index 8185d2af0e..e6a4638d91 100644
--- a/doc/design/api.md
+++ b/doc/design/api.md
@@ -3,7 +3,7 @@
## Ingredients
As our design principle is starting from the essence: how could we
-allow users to express and solve their problems at neural networks.
+allow users to express and solve their problems as neural networks.
Some essential concepts that our API have to provide include:
1. A *topology* is an expression of *layers*.
@@ -233,7 +233,7 @@ paddle.dist_train(model,
num_parameter_servers=15)
```
-The pseudo code if `paddle.dist_train` is as follows:
+The pseudo code of `paddle.dist_train` is as follows:
```python
def dist_train(topology, parameters, trainer, reader, ...):
diff --git a/doc/design/auto_gradient_check.md b/doc/design/auto_gradient_check.md
index 1f4d4ec16f..f9991541bc 100644
--- a/doc/design/auto_gradient_check.md
+++ b/doc/design/auto_gradient_check.md
@@ -1,17 +1,17 @@
## Auto Gradient Checker Design
## Backgraound:
-- Operator forward computing is easy to check if the result is right because it has a clear definition. **But** backpropagation is a notoriously difficult algorithm to debug and get right:
- - 1. you should get the right backpropagation formula according to the forward computation.
- - 2. you should implement it right in CPP.
- - 3. it's difficult to prepare test data.
+- Generally, it is easy to check whether the forward computation of an Operator is correct or not. However, backpropagation is a notoriously difficult algorithm to debug and get right:
+ 1. you should get the right backpropagation formula according to the forward computation.
+ 2. you should implement it right in CPP.
+ 3. it's difficult to prepare test data.
-- Auto gradient check gets a numeric gradient by forward Operator and use it as a reference of the backward Operator's result. It has several advantages:
- - 1. numeric gradient checker only need forward operator.
- - 2. user only need to prepare the input data for forward Operator.
+- Auto gradient checking gets a numerical gradient by forward Operator and use it as a reference of the backward Operator's result. It has several advantages:
+ 1. numerical gradient checker only need forward operator.
+ 2. user only need to prepare the input data for forward Operator.
## Mathematical Theory
-The following two document from stanford has a detailed explanation of how to get numeric gradient and why it's useful.
+The following two document from Stanford has a detailed explanation of how to get numerical gradient and why it's useful.
- [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization)
- [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96)
@@ -20,7 +20,7 @@ The following two document from stanford has a detailed explanation of how to ge
## Numeric Gradient Implementation
### Python Interface
```python
-def get_numeric_gradient(op,
+def get_numerical_gradient(op,
input_values,
output_name,
input_to_check,
@@ -30,13 +30,13 @@ def get_numeric_gradient(op,
Get Numeric Gradient for an operator's input.
:param op: C++ operator instance, could be an network
- :param input_values: The input variables. Should be an dictionary, key is
- variable name. Value is numpy array.
+ :param input_values: The input variables. Should be an dictionary, whose key is
+ variable name, and value is numpy array.
:param output_name: The final output variable name.
- :param input_to_check: The input variable need to get gradient.
+ :param input_to_check: The input variable with respect to which to compute the gradient.
:param delta: The perturbation value for numeric gradient method. The
smaller delta is, the more accurate result will get. But if that delta is
- too small, it could occur numerical stability problem.
+ too small, it will suffer from numerical stability problem.
:param local_scope: The local scope used for get_numeric_gradient.
:return: The gradient array in numpy format.
"""
@@ -45,28 +45,28 @@ def get_numeric_gradient(op,
### Explaination:
- Why need `output_name`
- - One Operator may have multiple Output, you can get independent gradient from each Output. So user should set one output to calculate.
+ - An Operator may have multiple Output, one can get independent gradient from each Output. So caller should specify the name of the output variable.
- Why need `input_to_check`
- - One operator may have multiple inputs. Gradient Op can calculate the gradient of these Inputs at the same time. But Numeric Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times.
+ - One operator may have multiple inputs. Gradient Op can calculate the gradient of these inputs at the same time. But Numeric Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times.
### Core Algorithm Implementation
```python
- # we only compute gradient of one element each time.
- # we use a for loop to compute the gradient of every element.
+ # we only compute gradient of one element a time.
+ # we use a for loop to compute the gradient of each element.
for i in xrange(tensor_size):
- # get one input element throw it's index i.
+ # get one input element by its index i.
origin = tensor_to_check.get_float_element(i)
- # add delta to it, run op and then get the sum of the result tensor.
+ # add delta to it, run op and then get the new value of the result tensor.
x_pos = origin + delta
tensor_to_check.set_float_element(i, x_pos)
y_pos = get_output()
- # plus delta to this element, run op and get the sum of the result tensor.
+ # plus delta to this element, run op and get the new value of the result tensor.
x_neg = origin - delta
tensor_to_check.set_float_element(i, x_neg)
y_neg = get_output()
@@ -85,15 +85,15 @@ def get_numeric_gradient(op,
Each Operator Kernel has three kinds of Gradient:
-- 1. Numeric Gradient
-- 2. CPU Operator Gradient
-- 3. GPU Operator Gradient(if supported)
+1. Numerical gradient
+2. CPU kernel gradient
+3. GPU kernel gradient (if supported)
-Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as the reference value.
+The numerical gradient only relies on forward Operator. So we use the numerical gradient as the reference value. And the gradient checking is performed in the following three steps:
-- 1. calculate the numeric gradient.
-- 2. calculate CPU kernel Gradient with the backward Operator and compare it with the numeric gradient.
-- 3. calculate GPU kernel Gradient with the backward Operator and compare it with the numeric gradient.(if support GPU)
+1. calculate the numerical gradient
+2. calculate CPU kernel gradient with the backward Operator and compare it with the numerical gradient
+3. calculate GPU kernel gradient with the backward Operator and compare it with the numeric gradient (if supported)
#### Python Interface
@@ -110,8 +110,8 @@ Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as
:param forward_op: used to create backward_op
:param input_vars: numpy value of input variable. The following
computation will use these variables.
- :param inputs_to_check: inputs var names that should check gradient.
- :param output_name: output name that used to
+ :param inputs_to_check: the input variable with respect to which to compute the gradient.
+ :param output_name: The final output variable name.
:param max_relative_error: The relative tolerance parameter.
:param no_grad_set: used when create backward ops
:param only_cpu: only compute and check gradient on cpu kernel.
@@ -120,24 +120,24 @@ Numeric Gradient Only relies on forward Operator. So we use Numeric Gradient as
```
### How to check if two numpy array is close enough?
-if `abs_numeric_grad` is nearly zero, then use abs error for numeric_grad, not relative
+if `abs_numerical_grad` is nearly zero, then use abs error for numerical_grad
```python
-numeric_grad = ...
+numerical_grad = ...
operator_grad = numpy.array(scope.find_var(grad_var_name(name)).get_tensor())
-abs_numeric_grad = numpy.abs(numeric_grad)
-# if abs_numeric_grad is nearly zero, then use abs error for numeric_grad, not relative
+abs_numerical_grad = numpy.abs(numerical_grad)
+# if abs_numerical_grad is nearly zero, then use abs error for numeric_grad, not relative
# error.
-abs_numeric_grad[abs_numeric_grad < 1e-3] = 1
+abs_numerical_grad[abs_numerical_grad < 1e-3] = 1
-diff_mat = numpy.abs(abs_numeric_grad - operator_grad) / abs_numeric_grad
+diff_mat = numpy.abs(abs_numerical_grad - operator_grad) / abs_numerical_grad
max_diff = numpy.max(diff_mat)
```
#### Notes:
-1,The Input data for auto gradient checker should be reasonable to avoid numeric problem.
+The Input data for auto gradient checker should be reasonable to avoid numerical stability problem.
#### Refs:
diff --git a/doc/design/block.md b/doc/design/block.md
new file mode 100644
index 0000000000..be88001220
--- /dev/null
+++ b/doc/design/block.md
@@ -0,0 +1,338 @@
+# Design Doc: Block and Scope
+
+## The Representation of Computation
+
+Both deep learning systems and programming languages help users describe computation procedures. These systems use various representations of computation:
+
+- Caffe, Torch, and Paddle: sequences of layers.
+- TensorFlow, Caffe2, Mxnet: graphs of operators.
+- PaddlePaddle: nested blocks, like C++ and Java programs.
+
+## Block in Programming Languages and Deep Learning
+
+In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions, or operators.
+
+Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning:
+
+| programming languages | PaddlePaddle |
+|-----------------------|-----------------------|
+| for, while loop | RNN, WhileOp |
+| if, if-else, switch | IfElseOp, SwitchOp |
+| sequential execution | a sequence of layers |
+
+A key difference is that a C++ program describes a one pass computation, whereas a deep learning program describes both the forward and backward passes.
+
+## Stack Frames and the Scope Hierarchy
+
+The existence of the backward makes the execution of a block of traditional programs and PaddlePaddle different to each other:
+
+| programming languages | PaddlePaddle |
+|-----------------------|-------------------------------|
+| stack | scope hierarchy |
+| stack frame | scope |
+| push at entering block| push at entering block |
+| pop at leaving block | destroy at minibatch completes|
+
+1. In traditional programs:
+
+ - When the execution enters the left curly brace of a block, the runtime pushes a frame into the stack, where it realizes local variables.
+ - After the execution leaves the right curly brace, the runtime pops the frame.
+ - The maximum number of frames in the stack is the maximum depth of nested blocks.
+
+1. In PaddlePaddle
+
+ - When the execution enters a block, PaddlePaddle adds a new scope, where it realizes variables.
+ - PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are to be used by the backward pass. So it has a stack forest known as a *scope hierarchy*.
+ - The height of the highest tree is the maximum depth of nested blocks.
+ - After the process of a minibatch, PaddlePaddle destroys the scope hierarchy.
+
+## Use Blocks in C++ and PaddlePaddle Programs
+
+Let us consolidate the discussion by presenting some examples.
+
+### Blocks with `if-else` and `IfElseOp`
+
+The following C++ programs shows how blocks are used with the `if-else` structure:
+
+```c++
+int x = 10;
+int y = 20;
+int out;
+bool cond = false;
+if (cond) {
+ int z = x + y;
+ out = softmax(z);
+} else {
+ int z = fc(x);
+ out = z;
+}
+```
+
+An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows:
+
+```python
+import paddle as pd
+
+x = var(10)
+y = var(20)
+cond = var(false)
+ie = pd.create_ifelseop(inputs=[x], output_num=1)
+with ie.true_block():
+ x = ie.inputs(true, 0)
+ z = operator.add(x, y)
+ ie.set_output(true, 0, operator.softmax(z))
+with ie.false_block():
+ x = ie.inputs(false, 0)
+ z = layer.fc(x)
+ ie.set_output(true, 0, operator.softmax(z))
+out = b(cond)
+```
+
+In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`.
+
+A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values.
+
+### Blocks with `for` and `RNNOp`
+
+The following RNN model from the [RNN design doc](./rnn.md)
+
+```python
+x = sequence([10, 20, 30])
+m = var(0)
+W = tensor()
+U = tensor()
+
+rnn = create_rnn(inputs=[input])
+with rnn.stepnet() as net:
+ x = net.set_inputs(0)
+ h = net.add_memory(init=m)
+ fc_out = pd.matmul(W, x)
+ hidden_out = pd.matmul(U, h.pre(n=1))
+ sum = pd.add_two(fc_out, hidden_out)
+ act = pd.sigmoid(sum)
+ h.update(act) # update memory with act
+ net.set_outputs(0, act, hidden_out) # two outputs
+
+o1, o2 = rnn()
+print o1, o2
+```
+
+has its equivalent C++ program as follows
+
+```c++
+int* x = {10, 20, 30};
+int m = 0;
+int W = some_value();
+int U = some_other_value();
+
+int mem[sizeof(x) / sizeof(x[0]) + 1];
+int o1[sizeof(x) / sizeof(x[0]) + 1];
+int o2[sizeof(x) / sizeof(x[0]) + 1];
+for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) {
+ int x = x[i-1];
+ if (i == 1) mem[0] = m;
+ int fc_out = W * x;
+ int hidden_out = Y * mem[i-1];
+ int sum = fc_out + hidden_out;
+ int act = sigmoid(sum);
+ mem[i] = act;
+ o1[i] = act;
+ o2[i] = hidden_out;
+}
+
+print_array(o1);
+print_array(o2);
+```
+
+
+## Compilation and Execution
+
+Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference.
+
+The generation of this protobuf message is like what a compiler generates a binary executable file. The execution of the message that the OS executes the binary file.
+
+## The "Binary Executable File Format"
+
+The definition of the protobuf message is as follows:
+
+```protobuf
+message BlockDesc {
+ repeated VarDesc vars = 1;
+ repeated OpDesc ops = 2;
+}
+```
+
+The step net in above RNN example would look like
+
+```
+BlockDesc {
+ vars = {
+ VarDesc {...} // x
+ VarDesc {...} // h
+ VarDesc {...} // fc_out
+ VarDesc {...} // hidden_out
+ VarDesc {...} // sum
+ VarDesc {...} // act
+ }
+ ops = {
+ OpDesc {...} // matmul
+ OpDesc {...} // add_two
+ OpDesc {...} // sigmoid
+ }
+};
+```
+
+Also, the RNN operator in above example is serialized into a protobuf message of type `OpDesc` and would look like:
+
+```
+OpDesc {
+ inputs = {0} // the index of x
+ outputs = {5, 3} // indices of act and hidden_out
+ attrs {
+ "memories" : {1} // the index of h
+ "step_net" :
+ }
+};
+```
+
+This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing the global block.
+
+
+## The Compilation of Blocks
+
+During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator).
+
+VarDesc in a block should have its name scope to avoid local variables affect parent block's name scope.
+Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example
+
+```python
+a = pd.Varaible(shape=[20, 20])
+b = pd.fc(a, params=["fc.w", "fc.b"])
+
+rnn = pd.create_rnn()
+with rnn.stepnet() as net:
+ x = net.set_inputs(a)
+ # reuse fc's parameter
+ fc_without_b = pd.get_variable("fc.w")
+ net.set_outputs(fc_without_b)
+
+out = rnn()
+```
+the method `pd.get_variable` can help retrieve a Variable by a name, a Variable may store in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance.
+
+In compiler design, the symbol table is a data structure created and maintained by compilers to store information about the occurrence of various entities such as variable names, function names, classes, etc.
+
+To store the definition of variables and operators, we define a C++ class `SymbolTable`, like the one used in compilers.
+
+`SymbolTable` can do the following stuff:
+
+- store the definitions (some names and attributes) of variables and operators,
+- to verify if a variable was declared,
+- to make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers).
+
+
+```c++
+// Information in SymbolTable is enough to trace the dependency graph. So maybe
+// the Eval() interface takes a SymbolTable is enough.
+class SymbolTable {
+ public:
+ SymbolTable(SymbolTable* parent) : parent_(parent) {}
+
+ OpDesc* NewOp(const string& name="");
+
+ // TODO determine whether name is generated by python or C++
+ // currently assume that a unique name will be generated by C++ if the
+ // argument name left default.
+ VarDesc* NewVar(const string& name="");
+
+ // find a VarDesc by name, if recursive true, find parent's SymbolTable
+ // recursively.
+ // this interface is introduced to support InferShape, find protobuf messages
+ // of variables and operators, pass pointers into InferShape.
+ // operator
+ //
+ // NOTE maybe some C++ classes such as VarDescBuilder and OpDescBuilder should
+ // be proposed and embedded into pybind to enable python operate on C++ pointers.
+ VarDesc* FindVar(const string& name, bool recursive=true);
+
+ OpDesc* FindOp(const string& name);
+
+ BlockDesc Compile() const;
+
+ private:
+ SymbolTable* parent_;
+
+ map ops_;
+ map vars_;
+};
+```
+
+After all the description of variables and operators is added into SymbolTable,
+the block has enough information to run.
+
+The `Block` class takes a `BlockDesc` as input, and provide `Run` and `InferShape` functions.
+
+
+```c++
+namespace {
+
+class Block : OperatorBase {
+public:
+ Block(const BlockDesc& desc) desc_(desc) {}
+
+ void InferShape(const framework::Scope& scope) const override {
+ if (!symbols_ready_) {
+ CreateVariables(scope);
+ CreateOperators();
+ }
+ // should run InferShape first.
+ for (auto& op : runtime_table_.ops()) {
+ op->InferShape(scope);
+ }
+ }
+
+ void Run(const framework::Scope& scope,
+ const platform::DeviceContext& dev_ctx) const override {
+ PADDLE_ENFORCE(symbols_ready_, "operators and variables should be created first.");
+ for (auto& op : runtime_table_.ops()) {
+ op->Run(scope, dev_ctx);
+ }
+ }
+
+ void CreateVariables(const framework::Scope& scope);
+ void CreateOperators();
+
+ // some other necessary interfaces of NetOp are list below
+ // ...
+
+private:
+ BlockDesc desc_;
+ bool symbols_ready_{false};
+};
+```
+
+## The Execution of Blocks
+
+Block inherits from OperatorBase, which has a Run method.
+Block's Run method will run its operators sequentially.
+
+There is another important interface called `Eval`, which take some arguments called targets, and generate a minimal graph which takes targets as the end points and creates a new Block,
+after `Run`, `Eval` will get the latest value and return the targets.
+
+The definition of Eval is as follows:
+
+```c++
+// clean a block description by targets using the corresponding dependency graph.
+// return a new BlockDesc with minimal number of operators.
+// NOTE not return a Block but the block's description so that this can be distributed
+// to a cluster.
+BlockDesc Prune(const BlockDesc& desc, vector targets);
+
+void Block::Eval(const vector& targets,
+ const framework::Scope& scope,
+ const platform::DeviceContext& dev_ctx) {
+ BlockDesc min_desc = Prune(desc_, targets);
+ Block min_block(min_desc);
+ min_block.Run(scope, dev_ctx);
+}
+```
diff --git a/doc/design/functions_operators_layers.md b/doc/design/functions_operators_layers.md
index d23ba56b57..984b59f4c6 100644
--- a/doc/design/functions_operators_layers.md
+++ b/doc/design/functions_operators_layers.md
@@ -53,12 +53,12 @@ Let's explain using an example. Suppose that we are going to compose the FC usi
```python
def operator.mul(X1, X2):
O = Var()
- paddle.cpp.create_operator("mul", input={X1, Y1], output=O)
+ paddle.cpp.create_operator("mul", input={X1, Y1}, output=O)
return O
def operator.add(X1, X2):
O = Var()
- paddle.cpp.create_operator("add", input={X1, X2], output=O)
+ paddle.cpp.create_operator("add", input={X1, X2}, output=O)
return O
```
diff --git a/doc/design/graph.md b/doc/design/graph.md
index 51b7f87638..7519a65df8 100644
--- a/doc/design/graph.md
+++ b/doc/design/graph.md
@@ -56,7 +56,7 @@ For each parameter, like W and b created by `layer.fc`, marked as double circles
## Block and Graph
-The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block[(https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block.
+The word block and graph are interchangable in the desgin of PaddlePaddle. A [Block](https://github.com/PaddlePaddle/Paddle/pull/3708) is a metaphore of the code and local variables in a pair of curly braces in programming languages, where operators are like statements or instructions. A graph of operators and variables is a representation of the block.
A Block keeps operators in an array `BlockDesc::ops`
@@ -67,4 +67,4 @@ message BlockDesc {
}
```
-in the order that there appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators.
+in the order that they appear in user programs, like the Python program at the beginning of this article. We can imagine that in `ops`, we have some forward operators, followed by some gradient operators, and then some optimization operators.
diff --git a/doc/design/if_else_op.md b/doc/design/if_else_op.md
index 7370c2a24f..954a19c073 100644
--- a/doc/design/if_else_op.md
+++ b/doc/design/if_else_op.md
@@ -1,22 +1,4 @@
-IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has M (M<=N) instances, each corresponds to a true element in `cond`.
-
-```python
-import paddle as pd
-
-x = var()
-y = var()
-cond = var()
-
-b = pd.create_ifop(inputs=[x], output_num=1)
-with b.true_block():
- x = b.inputs(0)
- z = operator.add(x, y)
- b.set_output(0, operator.softmax(z))
-
-out = b(cond)
-```
-
-If we want the output still has N instances, we can use IfElseOp with a default value, whose minibatch size must be N:
+IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has N instances. If cond[i] == True, input instance input[i] will go through true_block() and generate output[i]; otherwise it will produce output from false_bloack().
```python
import paddle as pd
@@ -39,7 +21,7 @@ with b.false_block():
out = b(cond)
```
-If only true_block is set in an IfElseOp, we can have a default value for false as:
+If only true_block is set in an IfElseOp, a special case is that we can have a default value for false as:
```python
import paddle as pd
diff --git a/doc/design/ops/images/2_level_rnn.dot b/doc/design/ops/images/2_level_rnn.dot
new file mode 100644
index 0000000000..a498e882a3
--- /dev/null
+++ b/doc/design/ops/images/2_level_rnn.dot
@@ -0,0 +1,56 @@
+digraph G {
+
+ rnn [label="1-th level RNN" shape=box]
+
+ subgraph cluster0 {
+ label = "time step 0"
+
+ sent0 [label="sentence"]
+ sent1 [label="sentence"]
+
+ rnn1 [label="2-th level RNN" shape=box]
+
+ sent0 -> rnn1
+ sent1 -> rnn1
+ }
+
+ subgraph cluster1 {
+ label = "time step 1"
+
+ sent2 [label="sentence"]
+ sent3 [label="sentence"]
+
+ rnn2 [label="2-th level RNN" shape=box]
+
+ sent2 -> rnn2
+ sent3 -> rnn2
+ }
+
+ subgraph cluster2 {
+ label = "time step 2"
+
+ sent4 [label="sentence"]
+ sent5 [label="sentence"]
+
+ rnn3 [label="2-th level RNN" shape=box]
+
+ sent4 -> rnn3
+ sent5 -> rnn3
+ }
+
+
+ para0 [label="paragraph info 0"]
+ para1 [label="paragraph info 1"]
+ para2 [label="paragraph info 2"]
+
+ rnn1 -> para0
+ rnn2 -> para1
+ rnn3 -> para2
+
+ para0 -> rnn
+ para1 -> rnn
+ para2 -> rnn
+
+ chapter [label="chapter info"]
+ rnn -> chapter
+}
diff --git a/doc/design/ops/images/2_level_rnn.png b/doc/design/ops/images/2_level_rnn.png
new file mode 100644
index 0000000000..0537a75beb
Binary files /dev/null and b/doc/design/ops/images/2_level_rnn.png differ
diff --git a/doc/design/ops/images/rnn.dot b/doc/design/ops/images/rnn.dot
new file mode 100644
index 0000000000..c1141cd9c9
--- /dev/null
+++ b/doc/design/ops/images/rnn.dot
@@ -0,0 +1,87 @@
+digraph G {
+ label = "simple RNN implementation"
+
+ ranksep=2;
+
+ //graph [nodesep=1, ranksep=1];
+
+ node[nodesep=1]
+
+ subgraph cluster0 {
+ label = "global scope"
+ rankdir = TB
+ W
+ boot_memory
+ input
+ output
+ }
+
+ subgraph cluster1 {
+ label = "step-scope 0"
+ rankdir = TB
+ memory0[label="memory"]
+ prememory0[label="pre-memory"]
+ step_input0[label="step input"]
+ step_output0[label="step output"]
+ }
+
+ subgraph cluster2 {
+ label = "step-scope 1"
+ rankdir = TB
+ memory1[label="memory"]
+ prememory1[label="pre-memory"]
+ step_input1[label="step input"]
+ step_output1[label="step output"]
+ }
+
+ subgraph cluster3 {
+ label = "step-scope 2"
+ rankdir = TB
+ memory2[label="memory"]
+ prememory2[label="pre-memory"]
+ step_input2[label="step input"]
+ step_output2[label="step output"]
+ }
+
+ stepnet [shape=box]
+ stepnet0 [shape=box, style=dashed]
+ stepnet1 [shape=box, style=dashed]
+ stepnet2 [shape=box, style=dashed]
+
+
+ edge[color=blue]
+ boot_memory -> prememory0 [label="init" color="blue"]
+ memory0 -> prememory1 [label="copy/reference" color="blue"]
+ memory1 -> prememory2 [label="copy/reference" color="blue"]
+
+ edge[color=black]
+ W -> stepnet0[constraint=false, style=dashed]
+ W -> stepnet1[constraint=false, style=dashed]
+ W -> stepnet2[constraint=false, style=dashed]
+
+ memory0 -> stepnet0[style=dashed]
+ prememory0 -> stepnet0 -> step_output0[style=dashed]
+
+ memory1 -> stepnet1[style=dashed]
+ prememory1 -> stepnet1 -> step_output1[style=dashed]
+
+ memory2 -> stepnet2[style=dashed]
+ prememory2 -> stepnet2 -> step_output2[style=dashed]
+
+ input -> step_input0
+ input -> step_input1
+ input -> step_input2
+
+ step_input0 -> stepnet0 [style=dashed]
+ step_input1 -> stepnet1[style=dashed]
+ step_input2 -> stepnet2[style=dashed]
+
+ step_output0 -> output
+ step_output1 -> output
+ step_output2 -> output
+
+ stepnet0 -> stepnet[style=dashed]
+ stepnet1 -> stepnet[style=dashed]
+ stepnet2 -> stepnet[style=dashed]
+
+}
diff --git a/doc/design/ops/images/rnn.jpg b/doc/design/ops/images/rnn.jpg
new file mode 100644
index 0000000000..9867e404cf
Binary files /dev/null and b/doc/design/ops/images/rnn.jpg differ
diff --git a/doc/design/ops/images/rnn.png b/doc/design/ops/images/rnn.png
new file mode 100644
index 0000000000..e139e373fe
Binary files /dev/null and b/doc/design/ops/images/rnn.png differ
diff --git a/doc/design/ops/images/rnn_2level_data.dot b/doc/design/ops/images/rnn_2level_data.dot
new file mode 100644
index 0000000000..1d85ae2617
--- /dev/null
+++ b/doc/design/ops/images/rnn_2level_data.dot
@@ -0,0 +1,75 @@
+digraph G {
+ chapter [label="chapter"]
+
+ subgraph cluster0 {
+ label = "paragraph 0"
+
+ top_rnn0[label="top rnn step 0" shape=box]
+
+ p0 [label="paragraph 0"]
+ p1 [label="paragraph 1"]
+ }
+
+ subgraph cluster1{
+ label = "paragraph 1"
+
+ top_rnn1[label="top rnn step 1" shape=box]
+
+ p2 [label="paragraph 0"]
+ p3 [label="paragraph 1"]
+ }
+
+ subgraph cluster_p0 {
+ label = "sentence 0"
+
+ low_rnn0 [label="low rnn step 0" shape=box]
+ s00 [label="sentence 0"]
+ s01 [label="sentence 1"]
+
+ low_rnn0 -> s00
+ low_rnn0 -> s01
+ }
+
+ subgraph cluster_p1 {
+ label = "sentence 1"
+ low_rnn1 [label="low rnn step 1" shape=box]
+ s10 [label="sentence 0"]
+ s11 [label="sentence 1"]
+ low_rnn1 -> s10
+ low_rnn1 -> s11
+ }
+
+ subgraph cluster_p2 {
+ label = "sentence 1"
+ low_rnn2 [label="low rnn step 0" shape=box]
+ s20 [label="sentence 0"]
+ s21 [label="sentence 1"]
+ low_rnn2 -> s20
+ low_rnn2 -> s21
+ }
+
+ subgraph cluster_p3 {
+ label = "sentence 1"
+ low_rnn3 [label="low rnn step 1" shape=box]
+ s30 [label="sentence 0"]
+ s31 [label="sentence 1"]
+ low_rnn3 -> s30
+ low_rnn3 -> s31
+ }
+
+
+ chapter -> top_rnn0
+ chapter -> top_rnn1
+
+ top_rnn0 -> p0
+ top_rnn0 -> p1
+ top_rnn1 -> p2
+ top_rnn1 -> p3
+
+
+ p0 -> low_rnn0
+ p1 -> low_rnn1
+ p2 -> low_rnn2
+ p3 -> low_rnn3
+
+}
diff --git a/doc/design/ops/images/rnn_2level_data.png b/doc/design/ops/images/rnn_2level_data.png
new file mode 100644
index 0000000000..4be81b2430
Binary files /dev/null and b/doc/design/ops/images/rnn_2level_data.png differ
diff --git a/doc/design/ops/rnn.md b/doc/design/ops/rnn.md
new file mode 100644
index 0000000000..a78eea7d45
--- /dev/null
+++ b/doc/design/ops/rnn.md
@@ -0,0 +1,153 @@
+# RNNOp design
+
+This document is about an RNN operator which requires that instances in a mini-batch have the same length. We will have a more flexible RNN operator.
+
+## RNN Algorithm Implementation
+
+
+
+
+
+The above diagram shows an RNN unrolled into a full network.
+
+There are several important concepts:
+
+- *step-net*: the sub-graph to run at each step,
+- *memory*, $h_t$, the state of the current step,
+- *ex-memory*, $h_{t-1}$, the state of the previous step,
+- *initial memory value*, the ex-memory of the first step.
+
+### Step-scope
+
+There could be local variables defined in step-nets. PaddlePaddle runtime realizes these variables in *step-scopes* -- scopes created for each step.
+
+
+![](./images/rnn.png)
+Figure 2 the RNN's data flow
+
+
+Please be aware that all steps run the same step-net. Each step
+
+1. creates the step-scope,
+2. realizes local variables, including step-outputs, in the step-scope, and
+3. runs the step-net, which could use these variables.
+
+The RNN operator will compose its output from step outputs in step scopes.
+
+### Memory and Ex-memory
+
+Let's give more details about memory and ex-memory via a simply example:
+
+$$
+h_t = U h_{t-1} + W x_t
+$$,
+
+where $h_t$ and $h_{t-1}$ are the memory and ex-memory of step $t$'s respectively.
+
+In the implementation, we can make an ex-memory variable either "refers to" the memory variable of the previous step,
+or copy the value of the previous memory value to the current ex-memory variable.
+
+### Usage in Python
+
+For more information on Block, please refer to the [design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md).
+
+We can define an RNN's step-net using Block:
+
+```python
+import paddle as pd
+
+X = some_op() # x is some operator's output, and is a LoDTensor
+a = some_op()
+
+# declare parameters
+W = pd.Variable(shape=[20, 30])
+U = pd.Variable(shape=[20, 30])
+
+rnn = pd.create_rnn_op(output_num=1)
+with rnn.stepnet():
+ x = rnn.add_input(X)
+ # declare a memory (rnn's step)
+ h = rnn.add_memory(init=a)
+ # h.pre_state() means previous memory of rnn
+ new_state = pd.add_two( pd.matmul(W, x) + pd.matmul(U, h.pre_state()))
+ # update current memory
+ h.update(new_state)
+ # indicate that h variables in all step scopes should be merged
+ rnn.add_outputs(h)
+
+out = rnn()
+```
+
+Python API functions in above example:
+
+- `rnn.add_input` indicates the parameter is a variable that will be segmented into step-inputs.
+- `rnn.add_memory` creates a variable used as the memory.
+- `rnn.add_outputs` mark the variables that will be concatenated across steps into the RNN output.
+
+### Nested RNN and LoDTensor
+
+An RNN whose step-net includes other RNN operators is known as an *nested RNN*.
+
+For example, we could have a 2-level RNN, where the top level corresponds to paragraphs, and the lower level corresponds to sentences.
+
+The following figure illustrates the feeding of text into the lower level, one sentence each step, and the feeding of step outputs to the top level. The final top level output is about the whole text.
+
+
+
+
+
+```python
+import paddle as pd
+
+W = pd.Variable(shape=[20, 30])
+U = pd.Variable(shape=[20, 30])
+
+W0 = pd.Variable(shape=[20, 30])
+U0 = pd.Variable(shape=[20, 30])
+
+# a is output of some op
+a = some_op()
+
+# chapter_data is a set of 128-dim word vectors
+# the first level of LoD is sentence
+# the second level of LoD is chapter
+chapter_data = pd.Variable(shape=[None, 128], type=pd.lod_tensor, level=2)
+
+def lower_level_rnn(paragraph):
+ '''
+ x: the input
+ '''
+ rnn = pd.create_rnn_op(output_num=1)
+ with rnn.stepnet():
+ sentence = rnn.add_input(paragraph, level=0)
+ h = rnn.add_memory(shape=[20, 30])
+ h.update(
+ pd.matmul(W, sentence) + pd.matmul(U, h.pre_state()))
+ # get the last state as sentence's info
+ rnn.add_outputs(h)
+ return rnn
+
+top_level_rnn = pd.create_rnn_op(output_num=1)
+with top_level_rnn.stepnet():
+ paragraph_data = rnn.add_input(chapter_data, level=1)
+ low_rnn = lower_level_rnn(paragraph_data)
+ paragraph_out = low_rnn()
+
+ h = rnn.add_memory(init=a)
+ h.update(
+ pd.matmul(W0, paragraph_data) + pd.matmul(U0, h.pre_state()))
+ top_level_rnn.add_outputs(h)
+
+# just output the last step
+chapter_out = top_level_rnn(output_all_steps=False)
+```
+
+in above example, the construction of the `top_level_rnn` calls `lower_level_rnn`. The input is a LoD Tensor. The top level RNN segments input text data into paragraphs, and the lower level RNN segments each paragraph into sentences.
+
+By default, the `RNNOp` will concatenate the outputs from all the time steps,
+if the `output_all_steps` set to False, it will only output the final time step.
+
+
+
+
+
diff --git a/doc/design/parameters_in_cpp.md b/doc/design/parameters_in_cpp.md
index b6f99bc7d9..a7ac3f17c4 100644
--- a/doc/design/parameters_in_cpp.md
+++ b/doc/design/parameters_in_cpp.md
@@ -1,19 +1,19 @@
# Design Doc: The C++ Class `Parameters`
-`Parameters` is a concept we designed in Paddle V2 API. `Parameters` is a container of parameters, and make Paddle can shared parameter between topologies. We described usages of `Parameter` in [api.md](./api.md).
+`Parameters` is a concept we designed in PaddlePaddle V2 API. `Parameters` is a container of parameters, which makes PaddlePaddle capable of sharing parameter between topologies. We described usages of `Parameter` in [api.md](./api.md).
-We used Python to implement Parameters when designing V2 API before. There are several defects for current implementation:
+We used Python to implement Parameters when designing V2 API before. There are several defects for the current implementation:
* We just use `memcpy` to share Parameters between topologies, but this is very inefficient.
-* We did not implement share Parameters while training. We just trigger `memcpy` when start training.
+* We did not support sharing Parameters while training. We just trigger `memcpy` when start training.
-It is necessary that we implement Parameters in CPP side. However, it could be a code refactoring for Paddle, because Paddle was designed for training only one topology before, i.e., each GradientMachine contains its Parameter as a data member. In current Paddle implementation, there are three concepts associated with `Parameters`:
+It is necessary that we implement Parameters in CPP side. However, it could result a code refactoring for PaddlePaddle, because PaddlePaddle was designed for training only one topology before, i.e., each GradientMachine contains its Parameter as a data member. In current PaddlePaddle implementation, there are three concepts associated with `Parameters`:
1. `paddle::Parameter`. A `Parameters` is a container for `paddle::Parameter`.
It is evident that we should use `paddle::Parameter` when developing `Parameters`.
However, the `Parameter` class contains many functions and does not have a clear interface.
It contains `create/store Parameter`, `serialize/deserialize`, `optimize(i.e SGD)`, `randomize/zero`.
When we developing `Parameters`, we only use `create/store Parameter` functionality.
-We should extract functionalities of Parameter into many classes to clean Paddle CPP implementation.
+We should extract functionalities of Parameter into many classes to clean PaddlePaddle CPP implementation.
2. `paddle::GradientMachine` and its sub-classes, e.g., `paddle::MultiGradientMachine`, `paddle::NeuralNetwork`.
We should pass `Parameters` to `paddle::GradientMachine` when `forward/backward` to avoid `memcpy` between topologies.
@@ -24,7 +24,7 @@ Also, we should handle multi-GPU/CPU training, because `forward` and `backward`
So `Parameters` should be used by `paddle::ParameterUpdater`, and `paddle::ParameterUpdater` should optimize `Parameters` (by SGD).
-The step by step approach for implementation Parameters in Paddle C++ core is listed below. Each step should be a PR and could be merged into Paddle one by one.
+The step by step approach for implementation Parameters in PaddlePaddle C++ core is listed below. Each step should be a PR and could be merged into PaddlePaddle one by one.
1. Clean `paddle::Parameter` interface. Extract the functionalities of `paddle::Parameter` to prepare for the implementation of Parameters.
diff --git a/doc/design/reader/README.md b/doc/design/reader/README.md
index f21f7af520..320dccec3d 100644
--- a/doc/design/reader/README.md
+++ b/doc/design/reader/README.md
@@ -52,7 +52,7 @@ Here are valid outputs:
# a mini batch of three data items, each data item is a list (single column).
[([1,1,1],),
([2,2,2],),
-([3,3,3],),
+([3,3,3],)]
```
Please note that each item inside the list must be a tuple, below is an invalid output:
diff --git a/doc/design/refactorization.md b/doc/design/refactorization.md
new file mode 100644
index 0000000000..ad801ca421
--- /dev/null
+++ b/doc/design/refactorization.md
@@ -0,0 +1,253 @@
+# Design Doc: Refactorization Overview
+
+The goal of refactorizaiton include:
+
+1. Make it easy for external contributors to write new elementory computaiton operations.
+1. Make the codebase clean and readable.
+1. Introduce a new design of computation representation -- a computation graph of operators and variables.
+1. The graph representation helps implementing auto-scalable and auto fault recoverable distributed computing.
+
+## Computation Graphs
+
+1. PaddlePaddle represent the computation, training and inference of DL models, by computation graphs.
+
+ 1. Please dig into [computation graphs](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/graph.md) for a solid example.
+
+1. Users write Python programs to describe the graphs and run it (locally or remotely).
+
+1. A graph is composed of *variables* and *operators*.
+
+1. The description of graphs must be able to be serialized/deserialized, so it
+
+ 1. could to be sent to the cloud for distributed execution, and
+ 1. be sent to clients for mobile or enterprise deployment.
+
+1. The Python program do
+
+ 1. *compilation*: runs a Python program to generate a protobuf message representation of the graph and send it to
+ 1. the C++ library `libpaddle.so` for local execution,
+ 1. the master process of a distributed training job for training, or
+ 1. the server process of a Kubernetes serving job for distributed serving.
+ 1. *execution*: according to the protobuf message, constructs instances of class `Variable` and `OperatorBase`, and run them.
+
+## Description and Realization
+
+At compile time, the Python program generates protobuf message representation of the graph, or the description of the graph.
+
+At runtime, the C++ program realizes the graph and run it.
+
+| | Representation (protobuf messages) | Realization (C++ class objects) |
+|---|---|---|
+|Data|[VarDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L107)|[Variable](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24)|
+|Operation|[OpDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L35)|[Operator](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L64)|
+|Block|BlockDesc|Block|
+
+The word *graph* is exchangable with *block* in this document. A graph represent computation steps and local variables as a C++/Java program block, or a pair of { and }.
+
+## Compilation and Execution
+
+1. Run an applicaton Python program to describe the graph. In particular,
+
+ 1. create VarDesc to represent local/intermediate variables,
+ 1. create operators and set attributes,
+ 1. validate attribute values,
+ 1. inference the type and the shape of variables,
+ 1. plan for memory-reuse for variables,
+ 1. generate backward and optimization part of the Graph.
+ 1. possiblly split the graph for distributed training.
+
+1. The invocation of `train` or `infer` in the application Python program:
+
+ 1. create a new Scope instance in the [scope hierarchy](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md) for each run of a block,
+ 1. realize local variables defined in the BlockDesc message in the new scope,
+ 1. a scope is similar to the stack frame in programming languages,
+
+ 1. create an instance of class `Block`, in which,
+ 1. realize operators in the BlockDesc message,
+
+ 1. run the Block by calling
+ 1. `Block::Eval(vector* targets)` for forward and backward computations, or
+ 1. `Block::Eval(vector* targets)` for optimization.
+
+
+## Intermediate Representation (IR)
+
+```text
+Compile Time -> IR -> Runtime
+```
+
+### Benefit
+
+- Optimization
+ ```text
+ Compile Time -> IR -> Optimized IR -> Runtime
+ ```
+- Send automatically partitioned IR to different nodes.
+ - Automatic data parallel
+ ```text
+ Compile Time
+ |-> Single GPU IR
+ |-> [trainer-IR-0, trainer-IR-1, pserver-IR]
+ |-> Node-0 (runs trainer-IR-0)
+ |-> Node-1 (runs trainer-IR-1)
+ |-> Node-2 (runs pserver-IR)
+ ```
+ - Automatic model parallel (planned for future)
+
+---
+
+# Operator/OpWithKernel/OpKernel
+
+![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/49caf1fb70820fb4a6c217634317c9306f361f36/op_op_with_kern_class_diagram.dot)
+
+---
+
+# Operator
+![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/dd598e8f1976f5759f58af5e5ef94738a6b2e661/op.dot)
+
+* `Operator` is the fundamental building block as the user interface.
+ * Operator stores input/output variable name, and attributes.
+ * The `InferShape` interface is used to infer output variable shapes by its input shapes.
+ * Use `Run` to compute `input variables` to `output variables`.
+
+---
+
+# OpWithKernel/Kernel
+
+![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/9d7f4eba185cf41c8e2fbfb40ae21890dbddcd39/op_with_kernel.dot)
+
+* `OpWithKernel` inherits `Operator`.
+* `OpWithKernel` contains a Kernel map.
+ * `OpWithKernel::Run` get device's kernel, and invoke `OpKernel::Compute`.
+ * `OpKernelKey` is the map key. Only device place now, but may be data type later.
+
+---
+
+# Why separate Kernel and Operator
+
+* Separate GPU and CPU code.
+ * Make Paddle can run without GPU.
+* Make one operator (which is user interface) can contain many implementations.
+ * Same mul op, different FP16, FP32 Kernel. different MKL, eigen kernel.
+---
+
+# Libraries for Kernel development
+
+* `Eigen::Tensor` contains basic math and element-wise functions.
+ * Note that `Eigen::Tensor` has broadcast implementation.
+ * Limit number of `tensor.device(dev) = ` in your code.
+* `thrust::tranform` and `std::transform`.
+ * `thrust` has the same API as C++ standard library. Using `transform` can quickly implement a customized elementwise kernel.
+ * `thrust` has more complex API, like `scan`, `reduce`, `reduce_by_key`.
+* Hand-writing `GPUKernel` and `CPU` code
+ * Do not write `.h`. CPU Kernel should be in `.cc`. GPU kernel should be in `.cu`. (`GCC` cannot compile GPU code.)
+---
+# Operator Register
+
+## Why register is necessary?
+We need a method to build mappings between Op type names and Op classes.
+
+## How to do the register?
+
+Maintain a map, whose key is the type name and value is corresponding Op constructor.
+
+---
+# The Registry Map
+
+### `OpInfoMap`
+
+`op_type(string)` -> `OpInfo`
+
+`OpInfo`:
+
+- **`creator`**: The Op constructor.
+- **`grad_op_type`**: The type of the gradient Op.
+- **`proto`**: The Op's Protobuf, including inputs, outputs and required attributes.
+- **`checker`**: Used to check attributes.
+
+---
+# Related Concepts
+
+### Op_Maker
+It's constructor takes `proto` and `checker`. They are compeleted during Op_Maker's construction. ([ScaleOpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37))
+
+### Register Macros
+```cpp
+REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, grad_op_class)
+REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class)
+```
+
+### `USE` Macros
+make sure the registration process is executed and linked.
+
+---
+# Register Process
+1. Write Op class, as well as its gradient Op class if there is.
+2. Write Op maker class. In the constructor, describe its inputs, outputs, and attributes.
+3. Invoke macro `REGISTER_OP`. The macro will
+ 1. call maker class to complete `proto` and `checker`
+ 2. with the completed `proto` and `checker`, build a new key-value pair in the `OpInfoMap`
+
+4. Invoke `USE` macro in where the Op is used to make sure it is linked.
+
+---
+# Backward Module (1/2)
+### Create Backward Operator
+- Mapping from forwarding Op to backward Op
+![backward](https://gist.githubusercontent.com/dzhwinter/a6fbd4623ee76c459f7f94591fd1abf0/raw/61026ab6e518e66bde66a889bc42557a1fccff33/backward.png)
+
+---
+# Backward Module (2/2)
+### Build Backward Network
+- **Input** graph of forwarding operators
+- **Output** graph of backward operators
+- **corner case in construction**
+ - shared variable => insert `Add` operator
+ - no gradient => insert `fill_zero_grad` operator
+ - recursive netOp => call `Backward` recursively
+ - RNN Op => recursively call `Backward` on stepnet
+
+
+---
+# Scope, Variable, Tensor
+
+* `Tensor` is an n-dimension array with type.
+ * Only dims and data pointers are stored in `Tensor`.
+ * All operators on `Tensor` is written in `Operator` or global functions.
+ * variable length Tensor design [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md)
+* `Variable` is the inputs and outputs of an operator. Not just `Tensor`.
+ * step_scopes in RNN is a variable and not a tensor.
+* `Scope` is where variables store at.
+ * map
+ * `Scope` has a hierarchical structure. The local scope can get variable from its parent scope.
+
+---
+# Block (in design)
+## the difference with original RNNOp
+- as an operator is more intuitive than `RNNOp`,
+- offers new interface `Eval(targets)` to deduce the minimal block to `Run`,
+- fits the compile-time/ runtime separation design.
+ - during the compilation, `SymbolTable` stores `VarDesc`s and `OpDesc`s and serialize to a `BlockDesc`
+ - when graph executes, a Block with `BlockDesc` passed in creates `Op` and `Var` then `Run`
+
+---
+# Milestone
+- take Paddle/books as the main line, the requirement of the models motivates framework refactoring,
+- model migration
+ - framework development gives **priority support** to model migration, for example,
+ - the MNIST demo needs a Python interface,
+ - the RNN models require the framework to support `LoDTensor`.
+ - determine some timelines,
+ - heavily-relied Ops need to be migrated first,
+ - different models can be migrated parallelly.
+- improve the framework at the same time
+- accept imperfection, concentrated on solving the specific problem at the right price.
+
+---
+# Control the migration quality
+- compare the performance of migrated models with old ones.
+- follow google C style
+- build the automatic workflow of generating Python/C++ documentations
+ - the documentation of layers and ops should be written inside the code
+ - take the documentation quality into account when doing PR
+ - preview the documentations, read and improve them from users' perspective
diff --git a/doc/design/releasing_process.md b/doc/design/releasing_process.md
index 0c10e78280..62ff8f3229 100644
--- a/doc/design/releasing_process.md
+++ b/doc/design/releasing_process.md
@@ -1,8 +1,8 @@
-# Paddle发行规范
+# PaddlePaddle发行规范
-Paddle使用git-flow branching model做分支管理,使用[Semantic Versioning](http://semver.org/)标准表示Paddle版本号。
+PaddlePaddle使用git-flow branching model做分支管理,使用[Semantic Versioning](http://semver.org/)标准表示PaddlePaddle版本号。
-Paddle每次发新的版本,遵循以下流程:
+PaddlePaddle每次发新的版本,遵循以下流程:
1. 从`develop`分支派生出新的分支,分支名为`release/版本号`。例如,`release/0.10.0`
2. 将新分支的版本打上tag,tag为`版本号rc.Patch号`。第一个tag为`0.10.0rc1`,第二个为`0.10.0rc2`,依次类推。
@@ -27,14 +27,14 @@ Paddle每次发新的版本,遵循以下流程:
需要注意的是:
-* `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试Paddle的行为。
+* `release/版本号`分支一旦建立,一般不允许再从`develop`分支合入`release/版本号`。这样保证`release/版本号`分支功能的封闭,方便测试人员测试PaddlePaddle的行为。
* 在`release/版本号`分支存在的时候,如果有bugfix的行为,需要将bugfix的分支同时merge到`master`, `develop`和`release/版本号`这三个分支。
-# Paddle 分支规范
+# PaddlePaddle 分支规范
-Paddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应github的特性做了一些区别。
+PaddlePaddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,并适应github的特性做了一些区别。
-* Paddle的主版本库遵循[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范。其中:
+* PaddlePaddle的主版本库遵循[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范。其中:
* `master`分支为稳定(stable branch)版本分支。每一个`master`分支的版本都是经过单元测试和回归测试的版本。
* `develop`分支为开发(develop branch)版本分支。每一个`develop`分支的版本都经过单元测试,但并没有经过回归测试。
* `release/版本号`分支为每一次Release时建立的临时分支。在这个阶段的代码正在经历回归测试。
@@ -42,18 +42,18 @@ Paddle开发过程使用[git-flow](http://nvie.com/posts/a-successful-git-branch
* 其他用户的fork版本库并不需要严格遵守[git-flow](http://nvie.com/posts/a-successful-git-branching-model/)分支规范,但所有fork的版本库的所有分支都相当于特性分支。
* 建议,开发者fork的版本库使用`develop`分支同步主版本库的`develop`分支
* 建议,开发者fork的版本库中,再基于`develop`版本fork出自己的功能分支。
- * 当功能分支开发完毕后,向Paddle的主版本库提交`Pull Reuqest`,进而进行代码评审。
+ * 当功能分支开发完毕后,向PaddlePaddle的主版本库提交`Pull Reuqest`,进而进行代码评审。
* 在评审过程中,开发者修改自己的代码,可以继续在自己的功能分支提交代码。
* BugFix分支也是在开发者自己的fork版本库维护,与功能分支不同的是,BugFix分支需要分别给主版本库的`master`、`develop`与可能有的`release/版本号`分支,同时提起`Pull Request`。
-# Paddle回归测试列表
+# PaddlePaddle回归测试列表
-本列表说明Paddle发版之前需要测试的功能点。
+本列表说明PaddlePaddle发版之前需要测试的功能点。
-## Paddle Book中所有章节
+## PaddlePaddle Book中所有章节
-Paddle每次发版本首先要保证Paddle Book中所有章节功能的正确性。功能的正确性包括验证Paddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。
+PaddlePaddle每次发版本首先要保证PaddlePaddle Book中所有章节功能的正确性。功能的正确性包括验证PaddlePaddle目前的`paddle_trainer`训练和纯使用`Python`训练模型正确性。
| | 新手入门章节 | 识别数字 | 图像分类 | 词向量 | 情感分析 | 语意角色标注 | 机器翻译 | 个性化推荐 |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
diff --git a/doc/design/scope.md b/doc/design/scope.md
index c9e0be716b..b1f9bb4378 100644
--- a/doc/design/scope.md
+++ b/doc/design/scope.md
@@ -17,7 +17,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`.
1. Scope only contains a map of a name to variable.
- All parameters, data, states in a Net should be variables and stored inside a scope. Each op should get inputs and outputs to do computation from a scope, such as data buffer, state(momentum) etc.
+ All parameters, data, states in a Net should be variables and stored inside a scope. Each op should get inputs and outputs to do computation from a scope, such as data buffer, state (momentum) etc.
1. Variable can only be created by Scope and a variable can only be got from Scope. User cannot create or get a variable outside a scope. This is a constraints of our framework, and will keep our framework simple and clear.
@@ -32,7 +32,7 @@ Scope is an association of a name to variable. All variables belong to `Scope`.
1. Scope should destruct all Variables inside it when itself is destructed. User can never store `Variable` pointer somewhere else.
- Because Variable can only be got from Scope. When destroying Scope, we also need to destroy all the Variables in it. If user store `Variable` pointer to private data member or some global variable, the pointer will be a invalid pointer when associated `Scope` is destroyed.
+ Because Variable can only be got from Scope. When destroying Scope, we also need to destroy all the Variables in it. If user store `Variable` pointer to private data member or some global variable, the pointer will be an invalid pointer when associated `Scope` is destroyed.
```cpp
class Scope {
@@ -50,7 +50,7 @@ class Scope {
Just like [scope](https://en.wikipedia.org/wiki/Scope_(computer_science)) in programming languages, `Scope` in the neural network can also be a local scope. There are two attributes about local scope.
-1. We can create local variables in a local scope. When that local scope are destroyed, all local variables should also be destroyed.
+1. We can create local variables in a local scope. When that local scope is destroyed, all local variables should also be destroyed.
2. Variables in a parent scope can be retrieved from local scopes of that parent scope, i.e., when user get a variable from a scope, it will try to search this variable in current scope. If there is no such variable in the local scope, `scope` will keep searching from its parent, until the variable is found or there is no parent.
```cpp
@@ -121,4 +121,4 @@ Also, as the parent scope is a `shared_ptr`, we can only `Create()` a scope shar
## Orthogonal interface
-`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return a `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily.
+`FindVar` will return `nullptr` when `name` is not found. It can be used as `Contains` method. `NewVar` will return an `Error` when there is a name conflict locally. Combine `FindVar` and `NewVar`, we can implement `NewVar` easily.
diff --git a/doc/design/simple_op_design.md b/doc/design/simple_op_design.md
index fded4a6861..c7aeed7f9b 100644
--- a/doc/design/simple_op_design.md
+++ b/doc/design/simple_op_design.md
@@ -6,9 +6,9 @@ The Interaction between Python and C++ can be simplified as two steps:
1. C++ tells Python how many Ops there are, and what parameter do users need to offer to initialize a new Op. Python then builds API for each Op at compile time.
-2. Users invoke APIs built by Python and provide necessary parameters. These parameters will be sent to C++ fo finish Op construction task.
+2. Users invoke APIs built by Python and provide necessary parameters. These parameters will be sent to C++ for finishing the Op construction task.
-### Message form C++ to Python
+### Message from C++ to Python
We define a Protobuf message class `OpProto` to hold message needed in the first step. What should an `OpProto` contain? This question is equivalent to “What message do we need to offer, to build a Python API which is legal and user oriented and can use to describe a whole Op.”
@@ -193,7 +193,7 @@ def fc_layer(input, size, with_bias, activation):
elif:
# ...
return act_output;
-```
+```
### Low Leval API
diff --git a/doc/design/var_desc.md b/doc/design/var_desc.md
index 86a95c10d5..bfbbdd0578 100644
--- a/doc/design/var_desc.md
+++ b/doc/design/var_desc.md
@@ -1,7 +1,7 @@
## Background
PaddlePaddle divides the description of neural network computation graph into two stages: compile time and runtime.
-PaddlePaddle use proto message to describe compile time graph for
+PaddlePaddle use proto message to describe compile time graph because
1. Computation graph should be able to be saved to a file.
1. In distributed training, the graph will be serialized and send to multiple workers.
diff --git a/doc/faq/index_cn.rst b/doc/faq/index_cn.rst
index 138efb566e..00192aa69b 100644
--- a/doc/faq/index_cn.rst
+++ b/doc/faq/index_cn.rst
@@ -321,3 +321,55 @@ pip uninstall py_paddle paddle
然后安装paddle的python环境, 在build目录下执行
pip install python/dist/paddle*.whl && pip install ../paddle/dist/py_paddle*.whl
+
+16. PaddlePaddle存储的参数格式是什么,如何和明文进行相互转化
+---------------------------------------------------------
+
+PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数两部分组成。头信息中,1~4字节表示PaddlePaddle版本信息,请直接填充0;5~8字节表示每个参数占用的字节数,当保存的网络参数为float类型时为4,double类型时为8;9~16字节表示保存的参数总个数。
+
+将PaddlePaddle保存的模型参数还原回明文时,可以使用相应数据类型的 :code:`numpy.array` 加载具体网络参数,此时可以跳过PaddlePaddle模型参数文件的头信息。若在PaddlePaddle编译时,未指定按照double精度编译,默认情况下按照float精度计算,保存的参数也是float类型。这时在使用 :code:`numpy.array` 时,一般设置 :code:`dtype=float32` 。示例如下:
+
+.. code-block:: python
+
+ def read_parameter(fname, width):
+ s = open(fname).read()
+ # skip header
+ vec = np.fromstring(s[16:], dtype=np.float32)
+ # width is the size of the corresponding layer
+ np.savetxt(fname + ".csv", vec.reshape(width, -1),
+ fmt="%.6f", delimiter=",")
+
+
+将明文参数转化为PaddlePaddle可加载的模型参数时,首先构造头信息,再写入网络参数。下面的代码将随机生成的矩阵转化为可以被PaddlePaddle加载的模型参数。
+
+.. code-block:: python
+
+ def gen_rand_param(param_file, width, height, need_trans):
+ np.random.seed()
+ header = struct.pack("iil", 0, 4, height * width)
+ param = np.float32(np.random.rand(height, width))
+ with open(param_file, "w") as fparam:
+ fparam.write(header + param.tostring())
+
+17. 如何加载预训练参数
+------------------------------
+
+* 对加载预训练参数的层,设置其参数属性 :code:`is_static=True`,使该层的参数在训练过程中保持不变。以embedding层为例,代码如下:
+
+.. code-block:: python
+
+ emb_para = paddle.attr.Param(name='emb', is_static=True)
+ paddle.layer.embedding(size=word_dim, input=x, param_attr=emb_para)
+
+
+* 从模型文件将预训练参数载入 :code:`numpy.array`,在创建parameters后,使用 :code:`parameters.set()` 加载预训练参数。PaddlePaddle保存的模型参数文件前16字节为头信息,用户将参数载入 :code:`numpy.array` 时须从第17字节开始。以embedding层为例,代码如下:
+
+.. code-block:: python
+
+ def load_parameter(file_name, h, w):
+ with open(file_name, 'rb') as f:
+ f.read(16) # skip header.
+ return np.fromfile(f, dtype=np.float32).reshape(h, w)
+
+ parameters = paddle.parameters.create(my_cost)
+ parameters.set('emb', load_parameter(emb_param_file, 30000, 256))
diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md
index 58665e9f2b..c6570b89ae 100644
--- a/doc/howto/dev/new_op_cn.md
+++ b/doc/howto/dev/new_op_cn.md
@@ -34,7 +34,7 @@ Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU
注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,GPU实现在`.cu`文件中
-实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。
+实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。**系统会根据文件名自动构建op和其对应的Python扩展。**
下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。
@@ -224,45 +224,15 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs,
### 5. 编译
-- 简单**无特殊依赖**的OP无需修改CMakeList.txt文件。[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动将 `paddle/operators` 目录下新增的 `*_op.cc` 文件加入编译。
-- 较为复杂、**有额外依赖** 的operator仍需要修改[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)。如,`mul_op` 依赖 `math_function`,需要在`CMakeLists.txt`中添加如下内容:
+运行下面命令可以进行编译:
- ```
- op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) +
- ```
-
-- 运行下面命令可以进行编译:
-
- ```
- make mul_op
- ```
+```
+make mul_op
+```
## 绑定Python
-- 绑定Python
-
- 在 [`paddle/pybind/pybind.cc
-`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc) 使用`USE_OP`告知编译器需要链接的Op,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。
-
- ```
- USE_OP(mul);
- ```
- 如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP`:
-
- ```
- USE_CPU_ONLY_OP(gather);
- ```
-
- 如果OP不带Kernel,则使用`USE_NO_KENREL_OP`:
-
- ```
- USE_NO_KENREL_OP(recurrent);
- ```
-
-
- - 生成库
-
- 无需修改 [`paddle/pybind/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/CMakeLists.txt)文件,`paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。
+系统会对新增的op自动绑定Python,并链接到生成的lib库中。
## 实现单元测试
@@ -354,11 +324,7 @@ class TestMulGradOp(GradientChecker):
### 编译和执行单元测试
-单元测试编写完成之后,在[`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt)中添加以下内容,将单元测试加入工程:
-
-```
-py_test(test_mul_op SRCS test_mul_op.py)
-```
+`python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。
请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试:
@@ -371,3 +337,10 @@ make test ARGS="-R test_mul_op -V"
```bash
ctest -R test_mul_op
```
+
+## 注意事项
+
+- 为每个Op创建单独的`*_op.h`(如有)、`*_op.cc`和`*_op.cu`(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。
+- 注册Op时的类型名,需要和该Op的名字一样。即不允许在`A_op.cc`里面,注册`REGISTER_OP(B, ...)`等,这将会导致单元测试出错。
+- 如果Op没有实现GPU Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。
+- 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。
diff --git a/doc/howto/dev/write_docs_cn.rst b/doc/howto/dev/write_docs_cn.rst
index 36e5d420c9..731a63f945 100644
--- a/doc/howto/dev/write_docs_cn.rst
+++ b/doc/howto/dev/write_docs_cn.rst
@@ -5,15 +5,13 @@
PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。
-如何构建PaddlePaddle的文档
-==========================
+如何构建文档
+============
-PaddlePaddle的文档构建有直接构建和基于Docker构建两种方式,我们提供了一个构建脚本build_docs.sh来进行构建。
-PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使用基于Docker来构建PaddlePaddle的文档。
+PaddlePaddle的文档构建有两种方式。
-
-使用Docker构建PaddlePaddle的文档
---------------------------------
+使用Docker构建
+--------------
使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即
@@ -21,58 +19,46 @@ PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使
cd TO_YOUR_PADDLE_CLONE_PATH
cd paddle/scripts/tools/build_docs
- bash build_docs.sh with_docker
-
-编译完成后,会在当前目录生成两个子目录\:
-
-* doc 英文文档目录
-* doc_cn 中文文档目录
+ sh build_docs.sh
+编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。
打开浏览器访问对应目录下的index.html即可访问本地文档。
-
-
-直接构建PaddlePaddle的文档
---------------------------
-
-因为PaddlePaddle的v2 api文档生成过程依赖于py_paddle Python包,用户需要首先确认py_paddle包已经安装。
-
-.. code-block:: bash
-
- python -c "import py_paddle"
-
-如果提示错误,那么用户需要在本地编译安装PaddlePaddle,请参考 `源码编译文档 `_ 。
-注意,用户在首次编译安装PaddlePaddle时,请将WITH_DOC选项关闭。在编译安装正确之后,请再次确认py_paddle包已经安装,即可进行下一步操作。
+直接构建
+--------
如果提示正确,可以执行以下命令编译生成文档,即
.. code-block:: bash
cd TO_YOUR_PADDLE_CLONE_PATH
- cd paddle/scripts/tools/build_docs
- bash build_docs.sh local
-
-编译完成之后,会在当前目录生成两个子目录\:
-
-* doc 英文文档目录
-* doc_cn 中文文档目录
+ mkdir -p build
+ cd build
+ cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON
+ make gen_proto_py
+ make paddle_docs paddle_docs_cn
+编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。
打开浏览器访问对应目录下的index.html即可访问本地文档。
-如何书写PaddlePaddle的文档
-==========================
+如何书写文档
+============
PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程进行书写。
-如何更新www.paddlepaddle.org文档
-================================
+如何更新文档主题
+================
+
+PaddlePaddle文档主题在 `TO_YOUR_PADDLE_CLONE_PATH/doc_theme` 文件夹下,包含所有和前端网页设计相关的文件。
-开发者给PaddlePaddle代码增加的注释以PR的形式提交到github中,提交方式可参见 `贡献文档 `_ 。
+如何更新doc.paddlepaddle.org
+============================
+
+更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 `_ 。
目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 `_ 和
`英文文档 `_ 。
-
.. _cmake: https://cmake.org/
.. _sphinx: http://www.sphinx-doc.org/en/1.4.8/
diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt
index ec866b2907..b435de80a2 100644
--- a/paddle/CMakeLists.txt
+++ b/paddle/CMakeLists.txt
@@ -19,7 +19,7 @@ if(Boost_FOUND)
endif()
if(WITH_C_API)
- add_subdirectory(capi)
+ add_subdirectory(capi)
endif()
if(WITH_SWIG_PY)
diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt
index dde99ab340..dd9e4f1cbd 100644
--- a/paddle/capi/CMakeLists.txt
+++ b/paddle/capi/CMakeLists.txt
@@ -28,48 +28,64 @@ add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER}
add_dependencies(paddle_capi paddle_proto)
-
# combine all paddle static libraries together, into libpaddle_capi_whole.a
# user should use PaddleCAPI as -lpaddle_capi_whole
-set(capi_whole_library libpaddle_capi_whole.a)
-add_custom_target(paddle_capi_whole ALL
- COMMAND mkdir -p o_files/capi && cd o_files/capi/ && ar -x $
- COMMAND mkdir -p o_files/utils && cd o_files/utils/ && ar -x $
- COMMAND mkdir -p o_files/parameter && cd o_files/parameter/ && ar -x $
- COMMAND mkdir -p o_files/math && cd o_files/math/ && ar -x $
- COMMAND mkdir -p o_files/cuda && cd o_files/cuda/ && ar -x $
- COMMAND mkdir -p o_files/function && cd o_files/function/ && ar -x $
- COMMAND mkdir -p o_files/gserver && cd o_files/gserver/ && ar -x $
- COMMAND mkdir -p o_files/proto && cd o_files/proto/ && ar -x $
- COMMAND mkdir -p o_files/network && cd o_files/network/ && ar -x $
- COMMAND mkdir -p o_files/pserver && cd o_files/pserver/ && ar -x $
- COMMAND ar crs ${capi_whole_library} `find ./o_files -name '*.o'`
- COMMAND rm -rf o_files
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- DEPENDS paddle_capi paddle_utils paddle_parameter paddle_math
- paddle_cuda paddle_function paddle_gserver
- paddle_proto paddle_pserver paddle_network
- )
-set_target_properties(paddle_capi_whole
- PROPERTIES IMPORTED_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library})
+set(PADDLE_CAPI_INFER_LIBS
+ paddle_utils
+ paddle_parameter
+ paddle_math
+ paddle_cuda
+ paddle_function
+ paddle_gserver
+ paddle_proto
+ paddle_pserver
+ paddle_network)
+
+cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS})
-set(LINK_FLAGS " -Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/export.sym -Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/export.map")
-# TODO: merge mkl into paddle_capi_shared
-add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
-set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
-target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
-link_paddle_exe(paddle_capi_shared)
+# No shared library for iOS
+if(NOT IOS)
+ set(LINK_FLAGS " -Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/export.sym -Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/export.map")
+ # TODO: merge mkl into paddle_capi_shared
+ add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
+ set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
+ target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
+ link_paddle_exe(paddle_capi_shared)
+endif()
# install library & headers.
install(FILES ${CAPI_HEADERS} DESTINATION include/paddle)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle)
if(ANDROID)
- install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library}
- DESTINATION lib/${ANDROID_ABI})
- install(TARGETS paddle_capi_shared DESTINATION lib/${ANDROID_ABI})
+ install(TARGETS paddle_capi_whole paddle_capi_shared
+ ARCHIVE DESTINATION lib/${ANDROID_ABI}
+ LIBRARY DESTINATION lib/${ANDROID_ABI})
+ execute_process(
+ COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1
+ OUTPUT_VARIABLE GIT_COMMITS_LIST
+ RESULT_VARIABLE GIT_COMMITS_LIST_RESULT
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(${GIT_COMMITS_LIST_RESULT})
+ set(GIT_COMMITS_LIST "No commits.")
+ endif()
+ install(CODE "FILE(WRITE ${CMAKE_INSTALL_PREFIX}/lib/${ANDROID_ABI}/BUILD.txt
+ \"Compiler:\n\"
+ \"\\t${CMAKE_C_COMPILER}\\n\"
+ \"\\t${CMAKE_CXX_COMPILER}\\n\"
+ \"Compiler Flags:\\n\"
+ \"\\t${CMAKE_F_FLAGS}\\n\"
+ \"\\t${CMAKE_CXX_FLAGS}\\n\"
+ \"Android API: ${CMAKE_SYSTEM_VERSION}\\n\"
+ \"Lastest commit:\\n\"
+ \"\\t${GIT_COMMITS_LIST}\\n\"
+ )"
+ )
else(ANDROID)
- install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib)
- install(TARGETS paddle_capi_shared DESTINATION lib)
+ install(TARGETS paddle_capi_whole
+ ARCHIVE DESTINATION lib)
+ if(NOT IOS)
+ install(TARGETS paddle_capi_shared DESTINATION lib)
+ endif()
endif(ANDROID)
# this variable used for unittest
diff --git a/paddle/cuda/include/hl_cuda_cudnn.h b/paddle/cuda/include/hl_cuda_cudnn.h
index 3f68c62de6..b44b071bd1 100644
--- a/paddle/cuda/include/hl_cuda_cudnn.h
+++ b/paddle/cuda/include/hl_cuda_cudnn.h
@@ -22,10 +22,10 @@ limitations under the License. */
*/
typedef enum {
HL_POOLING_MAX = 0,
- // average includes padded values
- HL_POOLING_AVERAGE = 1,
// average does not include padded values
- HL_POOLING_AVERAGE_EXCLUDE_PADDING = 2,
+ HL_POOLING_AVERAGE = 1,
+ // average includes padded values
+ HL_POOLING_AVERAGE_INCLUDE_PADDING = 2,
HL_POOLING_END
} hl_pooling_mode_t;
diff --git a/paddle/cuda/include/hl_tensor_ops.h b/paddle/cuda/include/hl_tensor_ops.h
index 93d38b7d22..b2bf334dab 100644
--- a/paddle/cuda/include/hl_tensor_ops.h
+++ b/paddle/cuda/include/hl_tensor_ops.h
@@ -461,7 +461,7 @@ class add {
public:
INLINE float32x4_t operator()(const float32x4_t a,
const float32x4_t b) const {
- return vmulq_f32(a, b);
+ return vaddq_f32(a, b);
}
};
diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu
index 9ba3d14261..58674febdc 100644
--- a/paddle/cuda/src/hl_cuda_cnn.cu
+++ b/paddle/cuda/src/hl_cuda_cnn.cu
@@ -211,13 +211,11 @@ __global__ void KeAvgPoolForward(const int nthreads,
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
- int hend = min(hstart + sizeY, height + padH);
- int wend = min(wstart + sizeX, width + padW);
- int pool_size = (hend - hstart) * (wend - wstart);
+ int hend = min(hstart + sizeY, height);
+ int wend = min(wstart + sizeX, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
- hend = min(hend, height);
- wend = min(wend, width);
+ int pool_size = (hend - hstart) * (wend - wstart);
real aveval = 0;
inputData += (frameNum * channels + c) * height * width;
@@ -299,12 +297,14 @@ __global__ void KeAvgPoolBackward(const int nthreads,
outGrad += (frameNum * outStride + offsetC * pooledH * pooledW);
for (int ph = phstart; ph < phend; ++ph) {
+ int hstart = ph * strideH - padH;
+ int hend = min(hstart + sizeY, height);
+ hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
- int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
- int hend = min(hstart + sizeY, height + padH);
- int wend = min(wstart + sizeX, width + padW);
+ int wend = min(wstart + sizeX, width);
+ wstart = max(wstart, 0);
int poolsize = (hend - hstart) * (wend - wstart);
gradient += outGrad[ph * pooledW + pw] / poolsize;
}
@@ -600,16 +600,13 @@ __global__ void KeAvgPool3DForward(const int nthreads,
int dstart = pd * strideD - padD;
int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
- int dend = min(dstart + sizeZ, depth + padD);
- int hend = min(hstart + sizeY, height + padH);
- int wend = min(wstart + sizeX, width + padW);
- int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
+ int dend = min(dstart + sizeZ, depth);
+ int hend = min(hstart + sizeY, height);
+ int wend = min(wstart + sizeX, width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
- dend = min(dend, depth);
- hend = min(hend, height);
- wend = min(wend, width);
+ int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart);
real aveval = 0;
inputData += (frameNum * channels + c) * depth * height * width;
@@ -712,15 +709,18 @@ __global__ void KeAvgPool3DBackward(const int nthreads,
outGrad += (frameNum * channels + offsetC) * pooledD * pooledH * pooledW;
for (int pd = pdstart; pd < pdend; ++pd) {
+ int dstart = pd * strideD - padD;
+ int dend = min(dstart + sizeZ, depth);
+ dstart = max(dstart, 0);
for (int ph = phstart; ph < phend; ++ph) {
+ int hstart = ph * strideH - padH;
+ int hend = min(hstart + sizeY, height);
+ hstart = max(hstart, 0);
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
- int dstart = pd * strideD - padD;
- int hstart = ph * strideH - padH;
int wstart = pw * strideW - padW;
- int dend = min(dstart + sizeZ, depth + padD);
- int hend = min(hstart + sizeY, height + padH);
- int wend = min(wstart + sizeX, width + padW);
+ int wend = min(wstart + sizeX, width);
+ wstart = max(wstart, 0);
int poolsize = (dend - dstart) * (hend - hstart) * (wend - wstart);
gradient += outGrad[(pd * pooledH + ph) * pooledW + pw] / poolsize;
}
diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc
index f38ef69255..b8caf48f9c 100644
--- a/paddle/cuda/src/hl_cuda_cudnn.cc
+++ b/paddle/cuda/src/hl_cuda_cudnn.cc
@@ -432,11 +432,11 @@ void hl_create_pooling_descriptor(hl_pooling_descriptor* pooling_desc,
cudnn_mode = CUDNN_POOLING_MAX;
break;
case HL_POOLING_AVERAGE:
- cudnn_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
- break;
- case HL_POOLING_AVERAGE_EXCLUDE_PADDING:
cudnn_mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
break;
+ case HL_POOLING_AVERAGE_INCLUDE_PADDING:
+ cudnn_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
+ break;
default:
LOG(FATAL) << "parameter mode error";
}
diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt
index c0838d9b75..3371962c63 100644
--- a/paddle/framework/CMakeLists.txt
+++ b/paddle/framework/CMakeLists.txt
@@ -9,6 +9,7 @@ cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor)
cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor)
+nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor)
cc_test(variable_test SRCS variable_test.cc)
diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md
index c762811dfc..0a6d762bc8 100644
--- a/paddle/framework/backward.md
+++ b/paddle/framework/backward.md
@@ -2,11 +2,22 @@
## Motivation
-In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
+In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need a module chains the gradient operators/expressions together with to construct the backward pass. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass.
-## Backward Operator Registry
+## Implementation
-A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients.
+In this design doc, we exported only one API for generating the backward pass.
+
+```c++
+std::unique_ptr Backward(const OperatorBase& forwardOp,
+ const std::unordered_set& no_grad_vars);
+```
+
+The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**.
+
+### Backward Operator Registry
+
+A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients.
| | forward operator | backward operator
| ---------------------- | ---------------- |------------------------- |
@@ -25,7 +36,7 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad);
`mul_grad` is the type of backward operator, and `MulOpGrad` is its class name.
-## Backward Opeartor Creating
+### Backward Opeartor Creating
Given a certain forward operator, we can get its corresponding backward operator by calling:
@@ -43,40 +54,47 @@ The function `BuildGradOp` will sequentially execute following processes:
4. Building backward operator with `inputs`, `outputs` and forward operator's attributes.
-## Backward Network Building
-
-A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and put them together.
+### Backward Network Building
-In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network.
-
-given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`, `InputGradients`.
+A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially.
1. Op
- when the input forward network is an Op, return its gradient Operator Immediately.
+ When the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`.
2. NetOp
- when the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp.
+ In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp.
+
+3. RnnOp
+
+ RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet.
+
+4. Sharing Variables
+
+ **sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.
+
+
+![](./images/duplicate_op.png)
- **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their shared input variable.
+ pic 1. Sharing variables in operators.
-
- ![](./images/duplicate_op.png)
+
- 1. Shared variable in operators.
+ Sharing variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator to replace the overwrite links.
-
+
+![](images/duplicate_op2.png)
- Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links.
+ pic 2. Replace sharing variable's gradient with `Add` operator.
-
- ![](images/duplicate_op2.png)
+
- 2. Replace shared variable's gradient with `Add` operator.
+ Because our framework finds variables accord to their names, we need to rename the output links. We add a suffix of number to represent its position in clockwise.
-
+5. Part of Gradient is Zero.
+ In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implement, we insert a special `fillZeroLike` operator.
- Then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it.
+Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it.
diff --git a/paddle/framework/images/duplicate_op2.graffle b/paddle/framework/images/duplicate_op2.graffle
index ede3bca30a..5cec3bc64d 100644
Binary files a/paddle/framework/images/duplicate_op2.graffle and b/paddle/framework/images/duplicate_op2.graffle differ
diff --git a/paddle/framework/images/duplicate_op2.png b/paddle/framework/images/duplicate_op2.png
index 4e872dc2ca..21cdd5cabf 100644
Binary files a/paddle/framework/images/duplicate_op2.png and b/paddle/framework/images/duplicate_op2.png differ
diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h
index 154068fef6..fac5cd20aa 100644
--- a/paddle/framework/lod_tensor.h
+++ b/paddle/framework/lod_tensor.h
@@ -18,8 +18,10 @@
#ifndef PADDLE_ONLY_CPU
#include
#include
+#include
#endif
+#include
#include "paddle/framework/ddim.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/enforce.h"
@@ -32,7 +34,8 @@ template
using Vector = std::vector;
#else
template
-using Vector = thrust::host_vector;
+using Vector = thrust::host_vector<
+ T, thrust::system::cuda::experimental::pinned_allocator>;
#endif
using LoD = std::vector>;
@@ -48,18 +51,15 @@ bool operator==(const LoD& a, const LoD& b);
* LoDTensor (Level of details Tensor)
* see https://en.wikipedia.org/wiki/Level_of_details for reference.
*/
-class LoDTensor {
+class LoDTensor : public Tensor {
public:
LoDTensor() {}
- LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {}
- void set_lod(const LoD& lod) { lod_ = lod; }
-
- void set_tensor(Tensor* tensor) { tensor_ = tensor; }
+ explicit LoDTensor(const LoD& lod) : lod_(lod) {}
- Tensor& tensor() { return *tensor_; }
+ void set_lod(const LoD& lod) { lod_ = lod; }
- LoD lod() { return lod_; }
+ LoD lod() const { return lod_; }
/*
* Get a element from LoD.
@@ -101,7 +101,6 @@ class LoDTensor {
private:
LoD lod_;
- Tensor* tensor_; // not owned
};
} // namespace framework
} // namespace paddle
diff --git a/paddle/framework/lod_tensor.md b/paddle/framework/lod_tensor.md
index 769b61f175..07bbdf9416 100644
--- a/paddle/framework/lod_tensor.md
+++ b/paddle/framework/lod_tensor.md
@@ -4,13 +4,13 @@ PaddlePaddle's RNN doesn't require that all instances have the same length. To
## Challenge of Variable-length Inputs
-People usually represent a mini-batch by a Tensor. For example, a mini-batch of 32 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 32x32xO-dimensional tensor T and the 10x32x32 Tensor.
+People usually represent a mini-batch by a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 10xOx32-dimensional tensor T and the 10x32x32 Tensor.
Another example is that each mini-batch contains 32 sentences, where each word is a D-dimensional one-hot vector. If all sentences have the same length L, we can represent this mini-batch by a 32xLxD tensor. However, in most cases, sentences have variable lengths, and we will need an index data structure to record these variable lengths.
## LoD as a Solution
-### Mini-Batch of variable-length sentenses
+### Mini-Batch of variable-length sentences
Let's imagine a mini-batch of 3 variable lengths sentences, containing 3, 1, and 2 words respectively. We can represent it by a (3+1+2)xD tensor plus some index information:
@@ -51,17 +51,17 @@ The many 1's on the second level seem duplicated. For this particular case of 2
In summary, as long as that the essential elements (words or images) have the same size, we can represent mini-batches by a LoD Tensor:
- The underlying tensor has size LxD1xD2x..., where D1xD2... is the size of the essential elements, and
-- the first dimension size L has an additon property -- a LoD index as a nested vector:
+- The first dimension size L has an additonal property -- a LoD index as a nested vector:
```c++
- typedef std::vector > LoD;
+ typedef std::vector> LoD;
```
-- The LoD index can is not necessary when there are only two levels and all elements of the second level have length 1.
+- The LoD index is not necessary when there are only two levels and all elements of the second level have length 1.
## Slicing of LoD Tensor
-Consider that we have a network with three levels of RNN: the top level one handles articles, the second level one handles sentences, and the basic level one handles words. This network requires that mini-batches represented by 4 level LoD Tensor, for example,
+Consider that we have a network with three levels of RNN: the top level one handles articles, the second level one handles sentences, and the basic level one handles words. This network requires that mini-batches represented by 3 level LoD Tensor, for example,
```
3
@@ -90,8 +90,9 @@ and the <1,2>-slice of above example is
Let's go on slicing this slice. Its <1,1>-slice is
```
-3
-|||
+1
+1
+|
```
### The Slicing Algorithm
@@ -99,7 +100,7 @@ Let's go on slicing this slice. Its <1,1>-slice is
The algorithm, with over-simplified data structure, is defined as
```c++
-typedef vector > LoD;
+typedef std::vector> LoD;
struct LoDTensor {
LoD lod_;
@@ -128,7 +129,7 @@ Suppose that we want to retrieve the <1,2>-slice
we will need to find out the starting position of this slice by summing over all leaf nodes in `LoD` to the left of the slice, i.e., 3 + 2 + 4 + 1 = 10.
-To avoid the traversal of the LoD tree at slcing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into
+To avoid the traversal of the LoD tree at slicing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into
```
0
diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc
index 1da8553134..7915326b27 100644
--- a/paddle/framework/lod_tensor_test.cc
+++ b/paddle/framework/lod_tensor_test.cc
@@ -36,69 +36,64 @@ class LoDTensorTester : public ::testing::Test {
ASSERT_EQ(lod.size(), 3UL);
- tensor.Resize({20 /*batch size*/, 128 /*dim*/});
+ lod_tensor_.Resize({20 /*batch size*/, 128 /*dim*/});
// malloc memory
- tensor.mutable_data(place);
+ lod_tensor_.mutable_data(place);
- lod_tensor.set_lod(lod);
- lod_tensor.set_tensor(&tensor);
+ lod_tensor_.set_lod(lod);
}
protected:
platform::CPUPlace place;
- Tensor tensor;
- LoDTensor lod_tensor;
+ LoDTensor lod_tensor_;
};
-TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); }
+TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); }
TEST_F(LoDTensorTester, NumElements) {
- ASSERT_EQ(lod_tensor.NumElements(0), 2UL);
- ASSERT_EQ(lod_tensor.NumElements(1), 4UL);
- ASSERT_EQ(lod_tensor.NumElements(2), 8UL);
+ ASSERT_EQ(lod_tensor_.NumElements(0), 2UL);
+ ASSERT_EQ(lod_tensor_.NumElements(1), 4UL);
+ ASSERT_EQ(lod_tensor_.NumElements(2), 8UL);
}
TEST_F(LoDTensorTester, SliceLevels) {
// slice 1 level
for (size_t level = 0; level < 3UL; ++level) {
- LoDTensor new_lod_tensor = lod_tensor;
+ LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 1);
ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL);
- ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
}
// slice 2 level
for (size_t level = 0; level < 2UL; ++level) {
- LoDTensor new_lod_tensor = lod_tensor;
+ LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceLevels(level, level + 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
- ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level));
- ASSERT_EQ(new_lod_tensor.NumElements(1), lod_tensor.NumElements(level + 1));
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level));
+ ASSERT_EQ(new_lod_tensor.NumElements(1),
+ lod_tensor_.NumElements(level + 1));
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
}
}
TEST_F(LoDTensorTester, SliceInLevel) {
size_t level = 0;
- LoDTensor new_lod_tensor = lod_tensor;
+ LoDTensor new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2);
EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL);
EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL);
EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL);
EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL);
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
level = 1;
- new_lod_tensor = lod_tensor;
+ new_lod_tensor = lod_tensor_;
new_lod_tensor.SliceInLevel(level, 0, 2);
ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL);
ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL);
- ASSERT_EQ(new_lod_tensor.tensor().data(),
- lod_tensor.tensor().data());
+ ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data());
}
} // namespace framework
diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu
new file mode 100644
index 0000000000..97e69cdb2e
--- /dev/null
+++ b/paddle/framework/lod_tensor_test.cu
@@ -0,0 +1,50 @@
+/*
+ Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+#include
+#include
+#include "paddle/framework/lod_tensor.h"
+#include "paddle/platform/assert.h"
+
+#include
+
+__global__ void test(size_t* a, int size) {
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
+ i += blockDim.x * gridDim.x) {
+ a[i] *= 2;
+ }
+}
+
+TEST(LoDTensor, LoDInGPU) {
+ paddle::framework::LoDTensor lod_tensor;
+ paddle::platform::GPUPlace place(0);
+
+ paddle::framework::LoD src_lod;
+ src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14});
+
+ lod_tensor.Resize({14, 16});
+ lod_tensor.mutable_data(place);
+
+ lod_tensor.set_lod(src_lod);
+ CHECK_EQ(lod_tensor.lod_element(0, 2), 4);
+ CHECK_EQ(lod_tensor.lod_element(0, 4), 8);
+
+ auto lod = lod_tensor.lod();
+
+ test<<<1, 8>>>(lod[0].data(), lod[0].size());
+ cudaDeviceSynchronize();
+
+ for (size_t i = 0; i < src_lod[0].size(); ++i) {
+ CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
+ }
+}
diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc
index e1e122091f..f8a64a7866 100644
--- a/paddle/framework/operator.cc
+++ b/paddle/framework/operator.cc
@@ -22,14 +22,14 @@ namespace framework {
template <>
Eigen::DefaultDevice& ExecutionContext::GetEigenDevice<
platform::CPUPlace, Eigen::DefaultDevice>() const {
- return *device_context_->get_eigen_device();
+ return *device_context_.get_eigen_device();
}
#ifndef PADDLE_ONLY_CPU
template <>
Eigen::GpuDevice&
ExecutionContext::GetEigenDevice() const {
- return *device_context_->get_eigen_device();
+ return *device_context_.get_eigen_device();
}
#endif
@@ -186,6 +186,48 @@ void OperatorBase::GenerateTemporaryNames() {
}
}
+template <>
+const Tensor* InferShapeContext::Input(const std::string& name) const {
+ auto* var = InputVar(name);
+ return var == nullptr ? nullptr : GetTensorFromVar(var);
+}
+
+template <>
+const std::vector InferShapeContext::MultiInput(
+ const std::string& name) const {
+ auto names = op().Inputs(name);
+ std::vector res;
+ res.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(res),
+ [&](const std::string& sub_name) {
+ auto var = scope_.FindVar(sub_name);
+ return var == nullptr ? nullptr : GetTensorFromVar(var);
+ });
+ return res;
+}
+
+template <>
+Tensor* ExecutionContext::Output(const std::string& name) const {
+ auto* var = OutputVar(name);
+ return var == nullptr ? nullptr : const_cast(GetTensorFromVar(var));
+}
+
+template <>
+std::vector ExecutionContext::MultiOutput(
+ const std::string& name) const {
+ auto names = op().Outputs(name);
+ std::vector res;
+ res.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(res),
+ [&](const std::string& sub_name) {
+ auto var = scope().FindVar(sub_name);
+ return var == nullptr
+ ? nullptr
+ : const_cast(GetTensorFromVar(var));
+ });
+ return res;
+}
+
void OpProtoAndCheckerMaker::Validate() {
validated_ = true;
CheckNoDuplicatedInOutAttrs();
diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h
index 4600b06009..b7c9c39402 100644
--- a/paddle/framework/operator.h
+++ b/paddle/framework/operator.h
@@ -22,6 +22,7 @@ limitations under the License. */
#include "op_info.h"
#include "paddle/framework/attribute.h"
#include "paddle/framework/framework.pb.h"
+#include "paddle/framework/lod_tensor.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device_context.h"
@@ -326,11 +327,27 @@ class InferShapeContext {
return res;
}
+ const Tensor* GetTensorFromVar(const Variable* var) const {
+ if (var->IsType()) {
+ return &var->Get();
+ }
+ PADDLE_ENFORCE(var->IsType(),
+ "The Input(%s) must be LoDTensor or Tensor.");
+ return &var->Get();
+ }
+
private:
const OperatorBase& op_;
const Scope& scope_;
};
+template <>
+const Tensor* InferShapeContext::Input(const std::string& name) const;
+
+template <>
+const std::vector InferShapeContext::MultiInput(
+ const std::string& name) const;
+
template
struct EigenDeviceConverter;
@@ -349,7 +366,7 @@ struct EigenDeviceConverter {
class ExecutionContext : public InferShapeContext {
public:
ExecutionContext(const OperatorBase& op, const Scope& scope,
- const platform::DeviceContext* device_context)
+ const platform::DeviceContext& device_context)
: InferShapeContext(op, scope), device_context_(device_context) {}
template ::EigenDeviceType>
DeviceType& GetEigenDevice() const;
- platform::Place GetPlace() const { return device_context_->GetPlace(); }
+ platform::Place GetPlace() const { return device_context_.GetPlace(); }
- const platform::DeviceContext* device_context() const {
+ const platform::DeviceContext& device_context() const {
return device_context_;
}
- const platform::DeviceContext* device_context_;
+ // redefine Output function,
+ // use Variable::Get instead of Variable::GetMutable
+ template
+ T* Output(const std::string& name) const {
+ auto var = OutputVar(name);
+ return var == nullptr ? nullptr : const_cast(&var->Get());
+ }
+
+ // redefine MultiOutput function.
+ // use Variable::Get instead of Variable::GetMutable
+ template
+ std::vector MultiOutput(const std::string& name) const {
+ auto names = op().Outputs(name);
+ std::vector res;
+ res.reserve(names.size());
+ std::transform(
+ names.begin(), names.end(), std::back_inserter(res),
+ [&](const std::string& sub_name) { return Output(sub_name); });
+ return res;
+ }
+
+ private:
+ const platform::DeviceContext& device_context_;
};
+template <>
+Tensor* ExecutionContext::Output(const std::string& name) const;
+
+template <>
+std::vector ExecutionContext::MultiOutput(
+ const std::string& name) const;
+
class OpKernel {
public:
/**
@@ -416,7 +462,7 @@ class OperatorWithKernel : public OperatorBase {
void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const final {
auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx));
- opKernel->Compute(ExecutionContext(*this, scope, &dev_ctx));
+ opKernel->Compute(ExecutionContext(*this, scope, dev_ctx));
}
static std::unordered_map&
diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h
index ce938b2143..4b5a2ae523 100644
--- a/paddle/framework/tensor.h
+++ b/paddle/framework/tensor.h
@@ -81,6 +81,9 @@ class Tensor {
/*! Return the dimensions of the memory block. */
inline const DDim& dims() const;
+ /*! Return the numel of the memory block. */
+ inline int64_t numel() const;
+
/*! Resize the dimensions of the memory block. */
inline Tensor& Resize(const DDim& dims);
@@ -162,6 +165,12 @@ class Tensor {
/*! points to dimensions of memory block. */
DDim dims_;
+ /**
+ * A cache of the number of elements in a tensor.
+ * Would be 0 for an uninitialized tensor.
+ */
+ int64_t numel_;
+
/**
* @brief A PlaceHolder may be shared by more than one tensor.
*
diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h
index 637f04ae00..ed166935f7 100644
--- a/paddle/framework/tensor_impl.h
+++ b/paddle/framework/tensor_impl.h
@@ -22,9 +22,9 @@ namespace framework {
template
inline void Tensor::check_memory_size() const {
PADDLE_ENFORCE_NOT_NULL(
- holder_, "Tenosr holds no memory. Call Tensor::mutable_data first.");
+ holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
PADDLE_ENFORCE_GE(
- holder_->size(), product(dims_) * sizeof(T) + offset_,
+ holder_->size(), numel() * sizeof(T) + offset_,
"Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data already stored.");
@@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) {
template
inline T* Tensor::mutable_data(platform::Place place) {
static_assert(std::is_pod::value, "T must be POD");
- PADDLE_ENFORCE_GT(product(dims_), 0,
+ PADDLE_ENFORCE_GT(numel(), 0,
"Tensor's numel must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first.");
/* some versions of boost::variant don't have operator!= */
- int64_t size = product(dims_) * sizeof(T);
+ int64_t size = numel() * sizeof(T);
if (holder_ == nullptr || !(holder_->place() == place) ||
holder_->size() < size + offset_) {
if (platform::is_cpu_place(place)) {
@@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src,
auto dst_ptr = static_cast(mutable_data(dst_place));
- auto size = product(src.dims_) * sizeof(T);
+ auto size = src.numel() * sizeof(T);
if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) {
memory::Copy(boost::get(dst_place), dst_ptr,
@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE_LT(begin_idx, end_idx,
"Begin index must be less than end index.");
PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1.");
- size_t base = product(dims_) / dims_[0];
+ size_t base = numel() / dims_[0];
Tensor dst;
dst.holder_ = holder_;
DDim dst_dims = dims_;
@@ -143,11 +143,14 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
inline Tensor& Tensor::Resize(const DDim& dims) {
dims_ = dims;
+ numel_ = product(dims_);
return *this;
}
inline const DDim& Tensor::dims() const { return dims_; }
+inline int64_t Tensor::numel() const { return numel_; }
+
template
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
Tensor res;
diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc
index 55302ea471..e2ec738de3 100644
--- a/paddle/framework/tensor_test.cc
+++ b/paddle/framework/tensor_test.cc
@@ -36,7 +36,7 @@ TEST(Tensor, DataAssert) {
} catch (paddle::platform::EnforceNotMet err) {
caught = true;
std::string msg =
- "holder_ should not be null\nTenosr holds no memory. Call "
+ "holder_ should not be null\nTensor holds no memory. Call "
"Tensor::mutable_data first.";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
@@ -112,7 +112,7 @@ TEST(Tensor, ShareDataWith) {
} catch (paddle::platform::EnforceNotMet err) {
caught = true;
std::string msg =
- "holder_ should not be null\nTenosr holds no memory. Call "
+ "holder_ should not be null\nTensor holds no memory. Call "
"Tensor::mutable_data first.";
const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) {
@@ -274,4 +274,4 @@ TEST(Tensor, ReshapeToMatrix) {
Tensor res = ReshapeToMatrix(src, 2);
ASSERT_EQ(res.dims()[0], 2 * 3);
ASSERT_EQ(res.dims()[1], 4 * 9);
-}
\ No newline at end of file
+}
diff --git a/paddle/function/neon/NeonDepthwiseConv.cpp b/paddle/function/neon/NeonDepthwiseConv.cpp
index 18126152ea..38aa667061 100644
--- a/paddle/function/neon/NeonDepthwiseConv.cpp
+++ b/paddle/function/neon/NeonDepthwiseConv.cpp
@@ -52,7 +52,7 @@ public:
int outputHeight = output[2];
int outputWidth = output[3];
int filterMultiplier = outputChannels / groups_;
- CHECK_EQ(inputChannels, groups_);
+ CHECK_EQ(static_cast(inputChannels), groups_);
// only support strideH() == strideW() and filterHeight == filterWidth.
CHECK_EQ(strideH(), strideW());
diff --git a/paddle/function/neon/NeonDepthwiseConv.h b/paddle/function/neon/NeonDepthwiseConv.h
index aefeea78ba..33722d3cac 100644
--- a/paddle/function/neon/NeonDepthwiseConv.h
+++ b/paddle/function/neon/NeonDepthwiseConv.h
@@ -594,7 +594,7 @@ struct StridePadding {
float32x4_t s1 = vdupq_n_f32(0.f);
for (int s = 0; s < step; s++) {
float32x4_t s0 = vld1q_f32(input);
- float32x4x2_t v = {s0, s1};
+ float32x4x2_t v = {{s0, s1}};
vst2q_f32(inputPadding, v);
input += 4;
inputPadding += 8;
diff --git a/paddle/gserver/activations/ActivationFunction.cpp b/paddle/gserver/activations/ActivationFunction.cpp
index 78e958e06f..8b7b2e9b65 100644
--- a/paddle/gserver/activations/ActivationFunction.cpp
+++ b/paddle/gserver/activations/ActivationFunction.cpp
@@ -22,9 +22,12 @@ limitations under the License. */
#include
#include "paddle/parameter/Argument.h"
#include "paddle/utils/ClassRegistrar.h"
-
#include "paddle/utils/Logging.h"
+#ifdef PADDLE_USE_MKLDNN
+#include "MKLDNNActivation.h"
+#endif
+
namespace paddle {
static ClassRegistrar gActivationRegistrar;
@@ -456,6 +459,12 @@ Error __must_check backward(Argument& act) {
END_DEFINE_ACTIVATION(log)
ActivationFunction* ActivationFunction::create(const std::string& type) {
+#ifdef PADDLE_USE_MKLDNN
+ if (!type.empty() && type.compare(0, 7, "mkldnn_") == 0) {
+ return MKLDNNActivation::create(type);
+ }
+#endif
+
return gActivationRegistrar.createByType(type);
}
diff --git a/paddle/gserver/activations/MKLDNNActivation.cpp b/paddle/gserver/activations/MKLDNNActivation.cpp
new file mode 100644
index 0000000000..ac50937ef3
--- /dev/null
+++ b/paddle/gserver/activations/MKLDNNActivation.cpp
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "MKLDNNActivation.h"
+#include "mkldnn.hpp"
+#include "paddle/utils/ClassRegistrar.h"
+
+namespace paddle {
+
+static ClassRegistrar gMKLDNNActivationRegistrar;
+/**
+ * @def MKLDNN_ACTIVATION_CLASS_NAME
+ * @note MKLDNN_ACTIVATION_CLASS_NAME(relu) relu_;
+ * means mkldnn_reluActivation relu_;
+ */
+#define MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE) mkldnn_##ACT_TYPE##Activation
+
+/**
+ * @def DEFINE_MKLDNN_ELTWISE_ACTIVATION
+ */
+#define DEFINE_MKLDNN_ELTWISE_ACTIVATION(ACT_TYPE, ALPHA, BWD_ALPHA) \
+ class MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE) \
+ : public MKLDNNEltwiseActivation { \
+ private: \
+ static const std::string name; \
+ static const float alpha; \
+ static const float bwdAlpha; \
+ \
+ public: \
+ const std::string& getName() const { return name; } \
+ float getAlpha() const { return alpha; } \
+ float getBwdAlpha() const { return bwdAlpha; } \
+ }; \
+ const std::string MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)::name = \
+ "mkldnn_" #ACT_TYPE; \
+ const float MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)::alpha = ALPHA; \
+ const float MKLDNN_ACTIVATION_CLASS_NAME(ACT_TYPE)::bwdAlpha = BWD_ALPHA; \
+ static InitFunction __reg_activation__mkldnn_##ACT_TYPE([] { \
+ gMKLDNNActivationRegistrar \
+ .registerClass( \
+ "mkldnn_" #ACT_TYPE); \
+ });
+
+/**
+ * @brief MKLDNN Relu Activation.
+ * Actually mkldnn_relu is Leaky Relu.
+ * f(x) = x (x >= 0)
+ * f(x) = negative_slope * x (x < 0)
+ * @note the negative_slope should be -0.f in forward
+ */
+DEFINE_MKLDNN_ELTWISE_ACTIVATION(relu, -0.f, 0.f)
+
+/**
+ * @brief MKLDNN Tanh Activation.
+ */
+DEFINE_MKLDNN_ELTWISE_ACTIVATION(tanh, 0.f, 0.f)
+
+/**
+ * @brief MKLDNN ELU(Exponential Linear Unit) Activation.
+ * f(x) = x (x >= 0)
+ * f(x) = negative_slope * (exp(x) - 1) (x < 0)
+ */
+DEFINE_MKLDNN_ELTWISE_ACTIVATION(elu, 0.f, 0.f)
+
+ActivationFunction* MKLDNNActivation::create(const std::string& type) {
+ return gMKLDNNActivationRegistrar.createByType(type);
+}
+
+std::vector MKLDNNActivation::getAllRegisteredTypes() {
+ std::vector types;
+ gMKLDNNActivationRegistrar.forEachType(
+ [&](const std::string& type) { types.push_back(type); });
+ return types;
+}
+
+} // namespace paddle
diff --git a/paddle/gserver/activations/MKLDNNActivation.h b/paddle/gserver/activations/MKLDNNActivation.h
new file mode 100644
index 0000000000..86ffe38736
--- /dev/null
+++ b/paddle/gserver/activations/MKLDNNActivation.h
@@ -0,0 +1,183 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+#include "ActivationFunction.h"
+#include "mkldnn.hpp"
+#include "paddle/gserver/layers/MKLDNNBase.h"
+#include "paddle/math/MKLDNNMatrix.h"
+#include "paddle/parameter/Argument.h"
+
+namespace paddle {
+
+/**
+ * @brief Base class of MKLDNN Activation.
+ * Common activation function are provieded,
+ * including mkldnn_relu, mkldnn_elu, mkldnn_tanh, mkldnn_softmax
+ */
+class MKLDNNActivation : public ActivationFunction {
+protected:
+ // input value element count
+ size_t cnt_;
+ // should not merge the resetBwd into resetFwd,
+ // because the grad data would be changing before backward.
+ bool needResetBwd_;
+ // mkldnn matrix, primitive, stream and pipeline
+ MKLDNNMatrixPtr val_;
+ MKLDNNMatrixPtr grad_;
+ std::shared_ptr stream_;
+ std::shared_ptr fwd_;
+ std::shared_ptr bwd_;
+ std::vector pipelineFwd_;
+ std::vector pipelineBwd_;
+
+public:
+ MKLDNNActivation() : cnt_(0), needResetBwd_(true) {}
+ ~MKLDNNActivation() {}
+ static ActivationFunction* create(const std::string& type);
+ static std::vector getAllRegisteredTypes();
+ virtual const std::string& getName() const = 0;
+ virtual Error __must_check forward(Argument& act) = 0;
+ virtual Error __must_check backward(Argument& act) = 0;
+};
+
+/**
+ * @brief Base class of MKLDNN Eltwise Activation,
+ * includes mkldnn_relu, mkldnn_elu and mkldnn_tanh.
+ */
+class MKLDNNEltwiseActivation : public MKLDNNActivation {
+ typedef mkldnn::eltwise_forward eltwise_fwd;
+ typedef mkldnn::eltwise_backward eltwise_bwd;
+
+protected:
+ // save the forward primitive desc, which can be used backward
+ std::shared_ptr fwdPD_;
+ // eltwise_bwd need src input value
+ MKLDNNMatrixPtr inVal_;
+ // use for copy data
+ std::shared_ptr copyInVal_;
+
+public:
+ MKLDNNEltwiseActivation() {}
+
+ ~MKLDNNEltwiseActivation() {}
+
+ virtual const std::string& getName() const = 0;
+
+ // in common, the alpha of forward and backward should be equal.
+ // but for relu, to avoid negative value, they should be opposite
+ virtual float getAlpha() const = 0;
+ virtual float getBwdAlpha() const = 0;
+ virtual float getBeta() const { return 0.f; }
+ virtual mkldnn::algorithm getAlgo(const std::string& type) const {
+ if (type == "mkldnn_relu") {
+ return mkldnn::algorithm::eltwise_relu;
+ } else if (type == "mkldnn_tanh") {
+ return mkldnn::algorithm::eltwise_tanh;
+ } else if (type == "mkldnn_elu") {
+ return mkldnn::algorithm::eltwise_elu;
+ } else {
+ LOG(FATAL) << "Unkown eltwise activation type: " << type;
+ }
+ return (mkldnn::algorithm)0;
+ }
+
+ /**
+ * reshape and reset the forward primitives
+ */
+ void resetFwd(Argument& act) {
+ if (cnt_ == act.value->getElementCnt()) {
+ return;
+ }
+ cnt_ = act.value->getElementCnt();
+ stream_.reset(new MKLDNNStream());
+ auto eng = CPUEngine::Instance().getEngine();
+
+ // get algo setting
+ mkldnn::algorithm algo = getAlgo(this->getName());
+ // note: alpha represents the NegativeSlope when used in relu.
+ float alpha = getAlpha();
+ float beta = getBeta();
+
+ /// forward
+ pipelineFwd_.clear();
+ val_ = std::dynamic_pointer_cast(act.value);
+ if (val_ == nullptr) {
+ int bs = act.getBatchSize();
+ int ih = act.getFrameHeight() > 0 ? act.getFrameHeight() : 1;
+ int iw = act.getFrameWidth() > 0 ? act.getFrameWidth() : 1;
+ int ic = cnt_ / bs / ih / iw;
+ CHECK_EQ(cnt_, (size_t)bs * ic * ih * iw);
+ val_ = MKLDNNMatrix::create(
+ act.value, {bs, ic, ih, iw}, mkldnn::memory::format::nchw, eng);
+ CHECK(val_);
+ }
+ auto fwdDesc = eltwise_fwd::desc(mkldnn::prop_kind::forward_training,
+ algo,
+ val_->getMemoryDesc(),
+ alpha,
+ beta);
+ fwdPD_.reset(new eltwise_fwd::primitive_desc(fwdDesc, eng));
+ // use inplace for forward but save input value before submit
+ inVal_ = val_;
+ copyInVal_ = nullptr;
+ if (act.grad && algo == mkldnn::algorithm::eltwise_tanh) {
+ // tanh need save src input for backward
+ inVal_ = MKLDNNMatrix::create(nullptr, val_->getPrimitiveDesc());
+ copyInVal_ = std::make_shared(*val_, *inVal_);
+ CHECK(copyInVal_) << "should not be emptry";
+ pipelineFwd_.push_back(*copyInVal_);
+ }
+ fwd_.reset(new eltwise_fwd(*fwdPD_, *val_, *val_));
+ pipelineFwd_.push_back(*fwd_);
+ needResetBwd_ = true;
+ }
+
+ /**
+ * reset the backward primitives, can not merge into resetFwd as the grad data
+ * would be changing before backward.
+ */
+ void resetBwd(Argument& act) {
+ if (!needResetBwd_) {
+ return;
+ }
+ needResetBwd_ = false;
+ mkldnn::algorithm algo = getAlgo(this->getName());
+ float alpha = getBwdAlpha();
+ float beta = getBeta();
+ grad_ = MKLDNNMatrix::create(act.grad, val_->getPrimitiveDesc());
+ auto eng = CPUEngine::Instance().getEngine();
+ auto bwdDesc = eltwise_bwd::desc(
+ algo, grad_->getMemoryDesc(), val_->getMemoryDesc(), alpha, beta);
+ auto bwdPD = eltwise_bwd::primitive_desc(bwdDesc, eng, *fwdPD_);
+ CHECK(inVal_);
+ bwd_.reset(new eltwise_bwd(bwdPD, *inVal_, *grad_, *grad_));
+ pipelineBwd_.clear();
+ pipelineBwd_.push_back(*bwd_);
+ }
+
+ Error __must_check forward(Argument& act) {
+ resetFwd(act);
+ stream_->submit(pipelineFwd_);
+ return Error();
+ }
+
+ Error __must_check backward(Argument& act) {
+ resetBwd(act);
+ stream_->submit(pipelineBwd_);
+ return Error();
+ }
+};
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/CudnnPoolLayer.cpp b/paddle/gserver/layers/CudnnPoolLayer.cpp
index 4adb2d4709..810a1af2d0 100644
--- a/paddle/gserver/layers/CudnnPoolLayer.cpp
+++ b/paddle/gserver/layers/CudnnPoolLayer.cpp
@@ -29,9 +29,9 @@ bool CudnnPoolLayer::typeCheck(const std::string &poolType,
if (mode) {
*mode = HL_POOLING_AVERAGE;
}
- } else if (poolType == "cudnn-avg-excl-pad-pool") {
+ } else if (poolType == "cudnn-avg-incl-pad-pool") {
if (mode) {
- *mode = HL_POOLING_AVERAGE_EXCLUDE_PADDING;
+ *mode = HL_POOLING_AVERAGE_INCLUDE_PADDING;
}
} else {
return false;
diff --git a/paddle/gserver/layers/DetectionOutputLayer.cpp b/paddle/gserver/layers/DetectionOutputLayer.cpp
index 0cf0a92bf4..f9040f7ae7 100644
--- a/paddle/gserver/layers/DetectionOutputLayer.cpp
+++ b/paddle/gserver/layers/DetectionOutputLayer.cpp
@@ -143,7 +143,7 @@ void DetectionOutputLayer::forward(PassType passType) {
resetOutput(numKept, 7);
} else {
MatrixPtr outV = getOutputValue();
- outV = NULL;
+ if (outV) outV->resize(0, 0);
return;
}
MatrixPtr outV = getOutputValue();
diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp
deleted file mode 100644
index 2b7bef0a75..0000000000
--- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-#include "ExpandConvBaseLayer.h"
-
-#include "paddle/utils/Logging.h"
-namespace paddle {
-
-bool ExpandConvBaseLayer::init(const LayerMap &layerMap,
- const ParameterMap ¶meterMap) {
- /* Initialize the basic convolutional parent class */
- ConvBaseLayer::init(layerMap, parameterMap);
-
- int index = 0;
- for (auto &inputConfig : config_.inputs()) {
- const ConvConfig &conf = inputConfig.conv_conf();
- /* Consistent caffe mode for multiple input */
- caffeMode_ = conf.caffe_mode();
-
- // create a new weight
- size_t height, width;
- height = filterPixels_[index] * filterChannels_[index];
- width = (!isDeconv_) ? numFilters_ : channels_[index];
- CHECK_EQ(parameters_[index]->getSize(), width * height);
- Weight *w = new Weight(height, width, parameters_[index]);
- weights_.emplace_back(w);
- index++;
- }
- if (biasParameter_.get()) {
- if (sharedBiases_) {
- CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
- biases_ =
- std::unique_ptr(new Weight(numFilters_, 1, biasParameter_));
- } else {
- biases_ =
- std::unique_ptr(new Weight(getSize(), 1, biasParameter_));
- }
- }
- getOutputSize();
-
- return true;
-}
-
-size_t ExpandConvBaseLayer::getOutputSize() {
- CHECK_NE(inputLayers_.size(), 0UL);
- size_t layerSize = ConvBaseLayer::calOutputSize();
- return layerSize;
-}
-
-void ExpandConvBaseLayer::addSharedBias() {
- size_t mapW = getOutputSize() / numFilters_;
- size_t mapH = getOutputValue()->getElementCnt() / mapW;
- MatrixPtr out =
- Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_);
-
- Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
-
- out->transpose(transOutValue_, false); // false means no memory allocation
- transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
- numFilters_);
-
- MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
- 1,
- biases_->getW()->getElementCnt(),
- false,
- useGpu_);
- transOutValue_->addBias(*bias, 1.0f);
-
- transOutValue_->reshape(mapW, mapH);
- transOutValue_->transpose(out, false); // false means no memory allocation
-
- out->clear();
- bias->clear();
-}
-
-void ExpandConvBaseLayer::addUnsharedBias() {
- MatrixPtr outValue = getOutputValue();
- MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
- 1,
- biases_->getW()->getElementCnt(),
- false,
- useGpu_);
- outValue->addBias(*bias, 1.0f);
-}
-
-void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) {
- size_t mapW = getOutputSize() / numFilters_;
- size_t mapH = v->getElementCnt() / mapW;
- MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_);
-
- Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
-
- vTmp->transpose(transOutValue_, false); // false means no memory allocation
- transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
- numFilters_);
- biases->collectBias(*transOutValue_, 1.0f);
-}
-
-void ExpandConvBaseLayer::bpropBiases(MatrixPtr v) {
- MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(),
- 1,
- biases_->getWGrad()->getElementCnt(),
- false,
- useGpu_);
- if (sharedBiases_) {
- bpropSharedBias(biases, v);
- } else {
- biases->collectBias(*v, 1.0f);
- }
- biases->clear();
-}
-
-} // namespace paddle
diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.h b/paddle/gserver/layers/ExpandConvBaseLayer.h
deleted file mode 100644
index 01c699d234..0000000000
--- a/paddle/gserver/layers/ExpandConvBaseLayer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License. */
-
-#pragma once
-
-#include
-#include "ConvBaseLayer.h"
-#include "paddle/math/Matrix.h"
-
-namespace paddle {
-
-/**
- * @brief A subclass of ConvBaseLayer that is a superclass of both
- * ExpandConvLayer and ExpandConvTransLayer
- */
-class ExpandConvBaseLayer : public ConvBaseLayer {
-protected:
- /// The transpose of output, which is an auxiliary matrix.
- MatrixPtr transOutValue_;
-
-public:
- explicit ExpandConvBaseLayer(const LayerConfig& config)
- : ConvBaseLayer(config) {}
-
- ~ExpandConvBaseLayer() {}
-
- bool init(const LayerMap& layerMap,
- const ParameterMap& parameterMap) override;
-
- size_t getOutputSize();
-
- /**
- * Add shared bias.
- */
- void addSharedBias();
-
- /**
- * Add unshared bias.
- */
- void addUnsharedBias();
-
- void bpropSharedBias(MatrixPtr biases, MatrixPtr v);
- void bpropBiases(MatrixPtr v);
-};
-
-} // namespace paddle
diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp
index 20de475fc3..48dfcb49a4 100644
--- a/paddle/gserver/layers/ExpandConvLayer.cpp
+++ b/paddle/gserver/layers/ExpandConvLayer.cpp
@@ -36,7 +36,36 @@ inline bool isDepthwiseConv(int channels, int groups) {
bool ExpandConvLayer::init(const LayerMap &layerMap,
const ParameterMap ¶meterMap) {
/* Initialize the basic convolutional parent class */
- ExpandConvBaseLayer::init(layerMap, parameterMap);
+ ConvBaseLayer::init(layerMap, parameterMap);
+
+ int index = 0;
+ for (auto &inputConfig : config_.inputs()) {
+ const ConvConfig &conf = inputConfig.conv_conf();
+ /* Consistent caffe mode for multiple input */
+ caffeMode_ = conf.caffe_mode();
+
+ // create a new weight
+ size_t height, width;
+ height = filterPixels_[index] * filterChannels_[index];
+ width = (!isDeconv_) ? numFilters_ : channels_[index];
+ CHECK_EQ(parameters_[index]->getSize(), width * height);
+ Weight *w = new Weight(height, width, parameters_[index]);
+ weights_.emplace_back(w);
+ index++;
+ }
+
+ if (biasParameter_.get()) {
+ if (sharedBiases_) {
+ CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
+ biases_ = std::unique_ptr(
+ new Weight(1, numFilters_, biasParameter_, 0));
+ } else {
+ biases_ =
+ std::unique_ptr(new Weight(1, getSize(), biasParameter_, 0));
+ }
+ }
+
+ getOutputSize();
size_t numInputs = config_.inputs_size();
inputShape_.resize(numInputs);
@@ -108,6 +137,12 @@ bool ExpandConvLayer::init(const LayerMap &layerMap,
return true;
}
+size_t ExpandConvLayer::getOutputSize() {
+ CHECK_NE(inputLayers_.size(), 0UL);
+ size_t layerSize = ConvBaseLayer::calOutputSize();
+ return layerSize;
+}
+
// i is the index of input layers
#define BACKWARD_INPUT(i, inputs, outputs) \
backward_[2 * i]->calc(inputs, outputs)
@@ -155,11 +190,7 @@ void ExpandConvLayer::forward(PassType passType) {
/* add the bias-vector */
if (biases_.get()) {
- if (sharedBiases_) {
- addSharedBias();
- } else {
- addUnsharedBias();
- }
+ output_.value->addBias(*biases_->getW(), 1.0, sharedBiases_);
}
/* activation */
@@ -171,7 +202,7 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) {
MatrixPtr outGrad = getOutputGrad();
if (biases_ && biases_->getWGrad()) {
- bpropBiases(outGrad);
+ biases_->getWGrad()->collectBias(*getOutputGrad(), 1, sharedBiases_);
/* Increasing the number of gradient */
biases_->getParameterPtr()->incUpdate(callback);
}
diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h
index a1f943d152..a0873de192 100644
--- a/paddle/gserver/layers/ExpandConvLayer.h
+++ b/paddle/gserver/layers/ExpandConvLayer.h
@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
#include
-#include "ExpandConvBaseLayer.h"
+#include "ConvBaseLayer.h"
#include "paddle/math/Matrix.h"
namespace paddle {
@@ -28,10 +28,9 @@ namespace paddle {
* The config file api is img_conv_layer.
*/
-class ExpandConvLayer : public ExpandConvBaseLayer {
+class ExpandConvLayer : public ConvBaseLayer {
public:
- explicit ExpandConvLayer(const LayerConfig& config)
- : ExpandConvBaseLayer(config) {}
+ explicit ExpandConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {}
~ExpandConvLayer() {}
@@ -41,6 +40,8 @@ public:
void forward(PassType passType) override;
void backward(const UpdateCallback& callback) override;
+ size_t getOutputSize();
+
protected:
std::vector inputShape_;
std::vector filterShape_;
diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp
index 2bc20eee6c..e95f42c863 100644
--- a/paddle/gserver/layers/Layer.cpp
+++ b/paddle/gserver/layers/Layer.cpp
@@ -14,26 +14,12 @@ limitations under the License. */
#include "paddle/utils/Util.h"
+#include "CostLayer.h"
+#include "ValidationLayer.h"
#include "paddle/math/SparseMatrix.h"
#include "paddle/utils/Error.h"
#include "paddle/utils/Logging.h"
-#include "AddtoLayer.h"
-#include "CRFLayer.h"
-#include "CosSimLayer.h"
-#include "CostLayer.h"
-#include "DataLayer.h"
-#include "ExpandConvLayer.h"
-#include "FullyConnectedLayer.h"
-#include "HierarchicalSigmoidLayer.h"
-#include "MaxLayer.h"
-#include "MixedLayer.h"
-#include "NormLayer.h"
-#include "PoolLayer.h"
-#include "TensorLayer.h"
-#include "TransLayer.h"
-#include "ValidationLayer.h"
-
DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
namespace paddle {
@@ -109,6 +95,10 @@ ClassRegistrar Layer::registrar_;
LayerPtr Layer::create(const LayerConfig& config) {
std::string type = config.type();
+ // NOTE: As following types have illegal character '-',
+ // they can not use REGISTER_LAYER to registrar.
+ // Besides, to fit with old training models,
+ // they can not use '_' instead.
if (type == "multi-class-cross-entropy")
return LayerPtr(new MultiClassCrossEntropy(config));
else if (type == "rank-cost")
@@ -117,8 +107,6 @@ LayerPtr Layer::create(const LayerConfig& config) {
return LayerPtr(new AucValidation(config));
else if (type == "pnpair-validation")
return LayerPtr(new PnpairValidation(config));
- // NOTE: stop adding "if" statements here.
- // Instead, use REGISTER_LAYER to add more layer types
return LayerPtr(registrar_.createByType(config.type(), config));
}
diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h
index edef36194a..4002a3d074 100644
--- a/paddle/gserver/layers/Layer.h
+++ b/paddle/gserver/layers/Layer.h
@@ -49,6 +49,12 @@ struct LayerState {
};
typedef std::shared_ptr LayerStatePtr;
+/// Paddle device ID, MKLDNN is -2, CPU is -1
+enum PADDLE_DEVICE_ID {
+ MKLDNN_DEVICE = -2,
+ CPU_DEVICE = -1,
+};
+
/**
* @brief Base class for layer.
* Define necessary variables and functions for every layer.
@@ -59,11 +65,6 @@ protected:
LayerConfig config_;
/// whether to use GPU
bool useGpu_;
- /// Paddle device ID, MKLDNN is -2, CPU is -1
- enum PADDLE_DEVICE_ID {
- MKLDNN_DEVICE = -2,
- CPU_DEVICE = -1,
- };
/// Device Id. MKLDNN is -2, CPU is -1, and GPU is 0, 1, 2 ...
int deviceId_;
/// Input layers
diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp
new file mode 100644
index 0000000000..88b047c89b
--- /dev/null
+++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp
@@ -0,0 +1,542 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "MKLDNNConvLayer.h"
+#include "paddle/math/MathUtils.h"
+#include "paddle/utils/Logging.h"
+
+using namespace mkldnn; // NOLINT
+typedef memory::format format;
+
+namespace paddle {
+
+REGISTER_LAYER(mkldnn_conv, MKLDNNConvLayer);
+
+bool MKLDNNConvLayer::init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) {
+ if (!MKLDNNLayer::init(layerMap, parameterMap)) {
+ return false;
+ }
+ CHECK_EQ(inputLayers_.size(), 1) << "Only support one input layer yet";
+ CHECK_EQ(inputLayers_.size(), parameters_.size());
+ CHECK(config_.shared_biases()) << "Only support shared biases yet";
+
+ oc_ = config_.num_filters();
+ const ConvConfig& conf = config_.inputs(0).conv_conf();
+ ic_ = conf.channels();
+ fw_ = conf.filter_size();
+ fh_ = conf.filter_size_y();
+ pw_ = conf.padding();
+ ph_ = conf.padding_y();
+ dw_ = conf.dilation();
+ dh_ = conf.dilation_y();
+ sw_ = conf.stride();
+ sh_ = conf.stride_y();
+ gp_ = conf.groups();
+ oh_ = conf.output_y();
+ ow_ = conf.output_x();
+ ih_ = conf.img_size_y();
+ iw_ = conf.img_size();
+ caffeMode_ = conf.caffe_mode();
+ CHECK(caffeMode_) << "Only support caffe mode yet";
+ CHECK(dh_ == 1 && dw_ == 1) << "Only support dilation 1 yet";
+ // check group setting
+ CHECK_EQ((oc_ / gp_) * gp_, oc_) << "group is indivisible for oc";
+ CHECK_EQ((ic_ / gp_) * gp_, ic_) << "group is indivisible for ic";
+
+ // create weight
+ size_t height = oc_ / gp_;
+ size_t width = ic_ * fh_ * fw_;
+ CHECK_EQ(parameters_[0]->getSize(), height * width);
+ weight_ =
+ std::unique_ptr(new Weight(height, width, parameters_[0], 0));
+
+ // create biases
+ if (biasParameter_.get() != NULL) {
+ biases_ = std::unique_ptr(new Weight(1, oc_, biasParameter_));
+ }
+ return true;
+}
+
+void MKLDNNConvLayer::convertWeightsFromPaddle() {
+ if (hasInitedWgt_) {
+ return;
+ }
+
+ CHECK(wgtVal_) << "should have been initialized";
+ // the paddle weight format is oihw or goihw
+ auto targetDim = wgtVal_->getDims();
+ auto srcFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
+ wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim);
+ hasInitedWgt_ = true;
+}
+
+void MKLDNNConvLayer::convertWeightsToPaddle() {
+ CHECK(wgtVal_) << "should have been initialized";
+ auto targetDim = wgtVal_->getDims();
+ auto dstFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw;
+ wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
+}
+
+void MKLDNNConvLayer::reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
+ reshapeInput(bs, ih, iw);
+
+ // cal output sizes
+ // oc can not be changed
+ int fh = (fh_ - 1) * dh_ + 1;
+ int fw = (fw_ - 1) * dw_ + 1;
+ oh = outputSize(ih, fh, ph_, sh_, caffeMode_);
+ ow = outputSize(iw, fw, pw_, sw_, caffeMode_);
+
+ reshapeOutput(oh, ow);
+ resizeOutput(bs, oc * oh * ow);
+
+ printSizeInfo();
+}
+
+void MKLDNNConvLayer::resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ resetFwdPD(fwdPD_);
+
+ resetFwdBuffers(fwdPD_, in, wgt, bias, out);
+
+ resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
+
+ printValueFormatFlow();
+}
+
+void MKLDNNConvLayer::resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ std::shared_ptr bwdWgtPD;
+ std::shared_ptr bwdDataPD;
+
+ resetBwdWgtPD(bwdWgtPD);
+
+ resetBwdDataPD(bwdDataPD);
+
+ resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgt, bias, out);
+
+ resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
+
+ printGradFormatFlow();
+}
+
+void MKLDNNConvLayer::updateInputData() {
+ cpuInVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
+}
+
+void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) {
+ weight_->getParameterPtr()->incUpdate(callback);
+ if (biases_ && biases_->getWGrad()) {
+ biases_->getParameterPtr()->incUpdate(callback);
+ }
+}
+
+void MKLDNNConvLayer::loadConvSettings(memory::dims& wgt,
+ memory::dims& bias,
+ memory::dims& stride,
+ memory::dims& dilation,
+ memory::dims& padL,
+ memory::dims& padR) {
+ wgt = (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_}
+ : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_};
+ bias = memory::dims{oc_};
+ stride = memory::dims{sh_, sw_};
+ padL = memory::dims{ph_, pw_};
+ padR = getPaddingR();
+ // note: mkldnn dilation start from 0
+ dilation = memory::dims{dh_ - 1, dw_ - 1};
+}
+
+void MKLDNNConvLayer::resetFwdPD(
+ std::shared_ptr& pd) {
+ // dims for conv
+ memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
+ memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
+ memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
+ loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
+
+ prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring
+ : prop_kind::forward_training;
+ algorithm algo = algorithm::convolution_direct;
+ padding_kind padKind = padding_kind::zero;
+ conv_fwd::desc fwdDesc =
+ biases_ && biases_->getW()
+ ? conv_fwd::desc(pk,
+ algo,
+ MKLDNNMatrix::createMemoryDesc(inDims),
+ MKLDNNMatrix::createMemoryDesc(wgtDims),
+ MKLDNNMatrix::createMemoryDesc(biasDims),
+ MKLDNNMatrix::createMemoryDesc(outDims),
+ strides,
+ dilations,
+ padL,
+ padR,
+ padKind)
+ : conv_fwd::desc(pk,
+ algo,
+ MKLDNNMatrix::createMemoryDesc(inDims),
+ MKLDNNMatrix::createMemoryDesc(wgtDims),
+ MKLDNNMatrix::createMemoryDesc(outDims),
+ strides,
+ dilations,
+ padL,
+ padR,
+ padKind);
+ pd.reset(new conv_fwd::primitive_desc(fwdDesc, engine_));
+}
+
+void MKLDNNConvLayer::resetFwdBuffers(
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ CHECK(pd);
+ resetInValue(pd, in);
+
+ resetWgtBiasValue(pd, wgt, bias);
+
+ resetOutValue(pd, out);
+}
+
+void MKLDNNConvLayer::resetFwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+
+ if (cvtInVal_) {
+ pipeline.push_back(*cvtInVal_);
+ }
+
+ if (bias) {
+ fwd_.reset(new conv_fwd(*pd, *in, *wgt, *bias, *out));
+ } else {
+ fwd_.reset(new conv_fwd(*pd, *in, *wgt, *out));
+ }
+ pipeline.push_back(*fwd_);
+
+ if (cvtOutVal_) {
+ pipeline.push_back(*cvtOutVal_);
+ }
+}
+
+void MKLDNNConvLayer::resetInValue(
+ std::shared_ptr& pd, MKLDNNMatrixPtr& in) {
+ const MatrixPtr& inMat = inputLayers_[0]->getOutput().value;
+ in = MKLDNNMatrix::create(inMat, pd->src_primitive_desc());
+
+ // create buffer and reorder if input value do not match
+ cpuInVal_ = nullptr;
+ cvtInVal_ = nullptr;
+ if (inputIsOnlyMKLDNN()) {
+ MKLDNNMatrixPtr dnnIn = std::dynamic_pointer_cast(inMat);
+ CHECK(dnnIn) << "Input should be MKLDNNMatrix";
+ if (dnnIn->getPrimitiveDesc() != in->getPrimitiveDesc()) {
+ CHECK_EQ(dnnIn->getFormat(), format::nc);
+ CHECK(ih_ == 1 && iw_ == 1) << "when input is nc format";
+ // create a new one with nchw format and same data
+ memory::dims inDims = memory::dims{bs_, ic_, 1, 1};
+ dnnIn = MKLDNNMatrix::create(inMat, inDims, format::nchw, engine_);
+ CHECK(dnnIn->getPrimitiveDesc() == in->getPrimitiveDesc());
+ }
+ in = dnnIn;
+ } else {
+ const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
+ memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
+ cpuInVal_ = MKLDNNMatrix::create(cpuIn, inDims, format::nchw, engine_);
+ if (cpuInVal_->getPrimitiveDesc() != in->getPrimitiveDesc()) {
+ // create new mkldnn matrix
+ in = MKLDNNMatrix::create(nullptr, pd->src_primitive_desc());
+ cvtInVal_ = MKLDNNMatrix::createReorder(cpuInVal_, in);
+ CHECK(cvtInVal_) << "should not be emptry";
+ } else {
+ in = cpuInVal_;
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetWgtBiasValue(
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias) {
+ wgt = MKLDNNMatrix::create(weight_->getW(), pd->weights_primitive_desc());
+ VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat();
+
+ bias = (biases_ && biases_->getW())
+ ? MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc())
+ : nullptr;
+}
+
+void MKLDNNConvLayer::resetOutValue(
+ std::shared_ptr& pd, MKLDNNMatrixPtr& out) {
+ out = MKLDNNMatrix::create(output_.value, pd->dst_primitive_desc());
+
+ // create reorder if output value has cpu device and pd do not match
+ cpuOutVal_ = nullptr;
+ cvtOutVal_ = nullptr;
+ if (!outputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value;
+ memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
+ cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_);
+ if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) {
+ cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_);
+ CHECK(cvtOutVal_) << "should not be emptry";
+ } else {
+ // CPU output share the same data of MKLDNN output
+ cpuOut->setData(out->getData());
+ cpuOutVal_ = out;
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetBwdWgtPD(
+ std::shared_ptr& pd) {
+ memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
+ loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
+
+ // create backward weight using input, output and weight value memory desc
+ CHECK(inVal_) << "Should have input value";
+ CHECK(outVal_) << "Should have output value";
+ CHECK(wgtVal_) << "Should have weight value";
+ algorithm algo = algorithm::convolution_direct;
+ padding_kind padKind = padding_kind::zero;
+ auto bwdWgtDesc = biasVal_ != nullptr
+ ? conv_bwdWgt::desc(algo,
+ inVal_->getMemoryDesc(),
+ wgtVal_->getMemoryDesc(),
+ biasVal_->getMemoryDesc(),
+ outVal_->getMemoryDesc(),
+ strides,
+ padL,
+ padR,
+ padKind)
+ : conv_bwdWgt::desc(algo,
+ inVal_->getMemoryDesc(),
+ wgtVal_->getMemoryDesc(),
+ outVal_->getMemoryDesc(),
+ strides,
+ padL,
+ padR,
+ padKind);
+ pd.reset(new conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
+ CHECK(pd->src_primitive_desc() == inVal_->getPrimitiveDesc())
+ << "primitive desc of in value should equal";
+ CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc())
+ << "primitive desc of out grad should equal the out value";
+ CHECK(pd->diff_weights_primitive_desc() == wgtVal_->getPrimitiveDesc())
+ << "primitive desc of weight grad should equal the weight value";
+}
+
+void MKLDNNConvLayer::resetBwdDataPD(
+ std::shared_ptr& pd) {
+ pd = nullptr;
+ if (inputLayers_[0]->getOutput().grad == nullptr) {
+ return;
+ }
+
+ memory::dims wgtDims, biasDims, strides, dilations, padL, padR;
+ loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR);
+ CHECK(inVal_) << "Should have input value";
+ CHECK(outVal_) << "Should have output value";
+ // create backward data using input and output value memory desc
+ // but using weight memory desc with any format
+ auto bwdDataDesc = conv_bwdData::desc(algorithm::convolution_direct,
+ inVal_->getMemoryDesc(),
+ MKLDNNMatrix::createMemoryDesc(wgtDims),
+ outVal_->getMemoryDesc(),
+ strides,
+ padL,
+ padR,
+ padding_kind::zero);
+ pd.reset(new conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
+ CHECK(pd->diff_src_primitive_desc() == inVal_->getPrimitiveDesc())
+ << "primitive desc of in grad should equal the in value";
+ CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc())
+ << "primitive desc of out grad should equal";
+}
+
+void MKLDNNConvLayer::resetBwdBuffers(
+ std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ CHECK(wgtPD);
+ resetOutGrad(wgtPD, out);
+
+ resetWgtBiasGrad(wgtPD, wgt, bias);
+
+ resetInGrad(dataPD, in);
+
+ resetWgtValBwdData(dataPD, wgtValBwdData_);
+}
+
+void MKLDNNConvLayer::resetBwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+
+ if (cvtOutGrad_) {
+ pipeline.push_back(*cvtOutGrad_);
+ }
+
+ // add bwdWgt handle
+ if (bias) {
+ bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt, *bias));
+ } else {
+ bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt));
+ }
+ pipeline.push_back(*bwdWgt_);
+
+ if (dataPD == nullptr) {
+ return;
+ }
+
+ if (cvtWgtVal_) {
+ pipeline.push_back(*cvtWgtVal_);
+ }
+
+ // add bwdData handle
+ CHECK(wgtValBwdData_) << "Should have weight memory";
+ bwdData_.reset(new conv_bwdData(*dataPD, *out, *wgtValBwdData_, *in));
+ pipeline.push_back(*bwdData_);
+
+ if (cvtInGrad_) {
+ pipeline.push_back(*cvtInGrad_);
+ }
+}
+
+void MKLDNNConvLayer::resetOutGrad(
+ std::shared_ptr& wgtPD, MKLDNNMatrixPtr& out) {
+ const MatrixPtr& outMat = output_.grad;
+ out = MKLDNNMatrix::create(outMat, wgtPD->diff_dst_primitive_desc());
+ CHECK(outVal_ != nullptr &&
+ out->getPrimitiveDesc() == outVal_->getPrimitiveDesc())
+ << "primitive desc of out grad and value should be equal";
+
+ // TODO(TJ): merge outgrad
+ // create reorder if has output grad does not match
+ cpuOutGrad_ = nullptr;
+ cvtOutGrad_ = nullptr;
+ if (!outputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad;
+ outMat->setData(cpuOut->getData());
+ // same PrimitiveDesc with cpuInVal_
+ CHECK(cpuOutVal_);
+ cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc());
+ if (cpuOutGrad_->getPrimitiveDesc() == out->getPrimitiveDesc()) {
+ out = cpuOutGrad_;
+ } else {
+ out = MKLDNNMatrix::create(nullptr, wgtPD->diff_dst_primitive_desc());
+ cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out);
+ CHECK(cvtOutGrad_);
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetWgtBiasGrad(
+ std::shared_ptr& wgtPD,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias) {
+ wgt = MKLDNNMatrix::create(weight_->getWGrad(),
+ wgtPD->diff_weights_primitive_desc());
+ CHECK(nullptr != wgtVal_ &&
+ wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc())
+ << "primitive desc of weight grad and value should be equal";
+ VLOG(MKLDNN_FMTS) << "weight grad format: " << wgt->getFormat();
+
+ bias = nullptr;
+ if (biasVal_ == nullptr) {
+ return;
+ }
+ bias = MKLDNNMatrix::create(biases_->getWGrad(),
+ wgtPD->diff_bias_primitive_desc());
+ CHECK(bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc())
+ << "primitive desc of bias grad should equal the bias value";
+}
+
+void MKLDNNConvLayer::resetInGrad(
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in) {
+ if (dataPD == nullptr) {
+ return;
+ }
+
+ // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
+ in = MKLDNNMatrix::create(inputLayers_[0]->getOutput().grad,
+ dataPD->diff_src_primitive_desc());
+ CHECK(nullptr != inVal_ &&
+ in->getPrimitiveDesc() == inVal_->getPrimitiveDesc())
+ << "primitive desc of input grad and value should be equal";
+
+ // create reorder if has output grad does not match
+ cpuInGrad_ = nullptr;
+ cvtInGrad_ = nullptr;
+ if (!inputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuIn = getInputGrad(0, CPU_DEVICE);
+ // same PrimitiveDesc with cpuInVal_
+ CHECK(cpuInVal_);
+ cpuInGrad_ = MKLDNNMatrix::create(cpuIn, cpuInVal_->getPrimitiveDesc());
+ if (cpuInGrad_->getPrimitiveDesc() != in->getPrimitiveDesc()) {
+ const MatrixPtr& dnnIn = getInputGrad(0, MKLDNN_DEVICE);
+ in = MKLDNNMatrix::create(dnnIn, in->getPrimitiveDesc());
+ cvtInGrad_ = MKLDNNMatrix::createReorder(in, cpuInGrad_);
+ CHECK(cvtInGrad_);
+ } else {
+ in = cpuInGrad_;
+ }
+ }
+}
+
+void MKLDNNConvLayer::resetWgtValBwdData(
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& wgt) {
+ if (dataPD == nullptr) {
+ return;
+ }
+
+ // create new weight value for backward data, and create reorder if necessary
+ // since the primitive_desc would be different with wgtVal_
+ CHECK(wgtVal_) << "should have weight value";
+ if (dataPD->weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) {
+ wgtValBwdData_ =
+ MKLDNNMatrix::create(nullptr, dataPD->weights_primitive_desc());
+ cvtWgtVal_ = MKLDNNMatrix::createReorder(wgtVal_, wgtValBwdData_);
+ CHECK(cvtWgtVal_);
+ } else {
+ wgtValBwdData_ = wgtVal_;
+ }
+ VLOG(MKLDNN_FMTS) << "weight value format for backward data"
+ << wgtValBwdData_->getFormat();
+}
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h
new file mode 100644
index 0000000000..f84f2f737c
--- /dev/null
+++ b/paddle/gserver/layers/MKLDNNConvLayer.h
@@ -0,0 +1,253 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "MKLDNNLayer.h"
+#include "mkldnn.hpp"
+
+namespace paddle {
+typedef mkldnn::convolution_forward conv_fwd;
+typedef mkldnn::convolution_backward_weights conv_bwdWgt;
+typedef mkldnn::convolution_backward_data conv_bwdData;
+
+/**
+ * @brief A subclass of MKLDNNLayer conv layer.
+ *
+ * The config file api is mkldnn_conv
+ */
+class MKLDNNConvLayer : public MKLDNNLayer {
+protected:
+ // padding height and width
+ int ph_, pw_;
+ // stride height and width
+ int sh_, sw_;
+ // dilation height and width
+ int dh_, dw_;
+ // filter(kenerl) height and width
+ int fh_, fw_;
+ // group number
+ int gp_;
+
+ // in resetBwdData, the format of wgtValBwdData_ is different with wgtVal_
+ MKLDNNMatrixPtr wgtValBwdData_;
+ // convert handle from wgtVal_ to wgtValBwdData_
+ std::shared_ptr cvtWgtVal_;
+
+ // save forward primitive_desc, which can be used backward
+ std::shared_ptr fwdPD_;
+
+ // MKLDNNMatrixPtr which should be created from CPU Device
+ MKLDNNMatrixPtr cpuInVal_;
+ MKLDNNMatrixPtr cpuInGrad_;
+ MKLDNNMatrixPtr cpuOutVal_;
+ MKLDNNMatrixPtr cpuOutGrad_;
+ // convert handle between CPU device and MKLDNN device
+ std::shared_ptr cvtInVal_;
+ std::shared_ptr cvtInGrad_;
+ std::shared_ptr cvtOutVal_;
+ std::shared_ptr cvtOutGrad_;
+
+ // whether the weight has been init
+ bool hasInitedWgt_;
+
+ // true by default, which impact the calculation of output image size.
+ // details can refer to mathUtil.h
+ bool caffeMode_;
+
+ // weight and bias
+ std::unique_ptr weight_;
+ std::unique_ptr biases_;
+
+public:
+ explicit MKLDNNConvLayer(const LayerConfig& config)
+ : MKLDNNLayer(config), hasInitedWgt_(false), caffeMode_(true) {}
+
+ ~MKLDNNConvLayer() {}
+
+ bool init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) override;
+
+ void reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
+
+ void resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
+
+ void resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
+
+ void updateInputData() override;
+
+ void updateWeights(const UpdateCallback& callback) override;
+
+ void convertWeightsFromPaddle() override;
+
+ void convertWeightsToPaddle() override;
+
+ void printSizeInfo() override {
+ MKLDNNLayer::printSizeInfo();
+ VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_
+ << ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
+ << ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_;
+ }
+
+ void printValueFormatFlow() override {
+ if (cpuInVal_) {
+ VLOG(MKLDNN_FMTS) << cpuInVal_->getFormat() << " >>>";
+ }
+ MKLDNNLayer::printValueFormatFlow();
+ if (cpuOutVal_) {
+ VLOG(MKLDNN_FMTS) << " >>> " << cpuOutVal_->getFormat();
+ }
+ }
+
+ void printGradFormatFlow() override {
+ if (cpuInGrad_) {
+ VLOG(MKLDNN_FMTS) << cpuInGrad_->getFormat() << " <<<";
+ }
+ MKLDNNLayer::printGradFormatFlow();
+ if (cpuOutGrad_) {
+ VLOG(MKLDNN_FMTS) << " <<< " << cpuOutGrad_->getFormat();
+ }
+ }
+
+protected:
+ /**
+ * load the dims settings of this conv
+ */
+ void loadConvSettings(mkldnn::memory::dims& wgt,
+ mkldnn::memory::dims& bias,
+ mkldnn::memory::dims& stride,
+ mkldnn::memory::dims& dilation,
+ mkldnn::memory::dims& padL,
+ mkldnn::memory::dims& padR);
+
+ /**
+ * reset the forward primitive descriptor.
+ */
+ void resetFwdPD(std::shared_ptr& pd);
+ /**
+ * reset the MKLDNNMatrix buffers used in forward.
+ */
+ void resetFwdBuffers(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+ /**
+ * reset the forward pipeline.
+ */
+ void resetFwdPipeline(std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * reset MKLDNNMatrix of input value
+ */
+ void resetInValue(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in);
+ /**
+ * reset MKLDNNMatrix of weight and bias value
+ */
+ void resetWgtBiasValue(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias);
+ /**
+ * reset MKLDNNMatrix of output value
+ */
+ void resetOutValue(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * reset the backward weight primitive descriptor.
+ */
+ void resetBwdWgtPD(std::shared_ptr& pd);
+ /**
+ * reset the backward data primitive descriptor.
+ */
+ void resetBwdDataPD(std::shared_ptr& pd);
+ /**
+ * reset the MKLDNNMatrix buffers used in backward.
+ */
+ void resetBwdBuffers(std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+ /**
+ * reset the backward pipeline.
+ */
+ void resetBwdPipeline(std::vector& pipeline,
+ std::shared_ptr& wgtPD,
+ std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * reset MKLDNNMatrix of output grad
+ */
+ void resetOutGrad(std::shared_ptr& wgtPD,
+ MKLDNNMatrixPtr& out);
+ /**
+ * reset MKLDNNMatrix of weight and bias grad
+ */
+ void resetWgtBiasGrad(std::shared_ptr& wgtPD,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias);
+ /**
+ * reset MKLDNNMatrix of input grad
+ */
+ void resetInGrad(std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& in);
+ /**
+ * reset MKLDNNMatrix of weight value for backward data
+ * since the primitive_desc would be different with wgtVal_
+ */
+ void resetWgtValBwdData(std::shared_ptr& dataPD,
+ MKLDNNMatrixPtr& wgt);
+
+ /**
+ * get padding_r according to
+ * https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
+ * test_convolution_forward_common.hpp
+ * @note: mkldnn dilation start from 0 while paddle start from 1
+ */
+ mkldnn::memory::dims getPaddingR() const {
+ mkldnn::memory::dims padR = {ph_, pw_};
+ for (int i = 0; i < 2; ++i) {
+ if ((ih_ - ((fh_ - 1) * dh_ + 1) + ph_ + padR[0]) / sh_ + 1 != oh_) {
+ ++padR[0];
+ }
+ if ((iw_ - ((fw_ - 1) * dw_ + 1) + pw_ + padR[1]) / sw_ + 1 != ow_) {
+ ++padR[1];
+ }
+ }
+ return padR;
+ }
+};
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp
index 53433cef35..afd092666b 100644
--- a/paddle/gserver/layers/MKLDNNFcLayer.cpp
+++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp
@@ -14,13 +14,9 @@ limitations under the License. */
#include "MKLDNNFcLayer.h"
#include "paddle/utils/Logging.h"
-#include "paddle/utils/Stat.h"
using namespace mkldnn; // NOLINT
typedef memory::format format;
-typedef inner_product_forward fc_fwd;
-typedef inner_product_backward_weights fc_bwdWgt;
-typedef inner_product_backward_data fc_bwdData;
namespace paddle {
@@ -40,6 +36,8 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap,
oc_ = getSize();
oh_ = 1;
ow_ = 1;
+ ih_ = 1;
+ iw_ = 1;
// input size can not change in FC
iLayerSize_ = inputLayers_[0]->getSize();
@@ -77,109 +75,164 @@ void MKLDNNFcLayer::convertWeightsToPaddle() {
wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim);
}
-void MKLDNNFcLayer::reshape() {
- const Argument& input = getInput(0, getPrev(0)->getDeviceId());
- int batchSize = input.getBatchSize();
- if (bs_ == batchSize) {
- return;
- }
- bs_ = batchSize;
- ih_ = input.getFrameHeight();
- iw_ = input.getFrameWidth();
- if (ih_ == 0) {
- ih_ = 1;
- }
- if (iw_ == 0) {
- iw_ = 1;
- }
+void MKLDNNFcLayer::reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
+ reshapeInput(bs, ih, iw);
+
CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize());
- ic_ = iLayerSize_ / (ih_ * iw_);
- CHECK_EQ(size_t(ic_ * ih_ * iw_), iLayerSize_) << "not divisible";
- CHECK_EQ(size_t(oc_), getSize());
+ ic = iLayerSize_ / (ih * iw);
+ CHECK_EQ(size_t(ic * ih * iw), iLayerSize_) << "not divisible";
+ CHECK_EQ(size_t(oc), getSize());
+
+ reshapeOutput(oh, ow);
+ resizeOutput(bs, oc);
+
printSizeInfo();
+}
+
+void MKLDNNFcLayer::resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ resetFwdBuffers(in, wgt, bias, out);
- // reset output
- output_.setFrameHeight(oh_);
- output_.setFrameWidth(ow_);
- resetOutput(bs_, oc_);
+ resetFwdPD(fwdPD_, in, wgt, bias, out);
- // reset mkldnn forward
- resetFwd();
- needResetBwd_ = true;
+ resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out);
- convertWeightsFromPaddle();
+ printValueFormatFlow();
}
-void MKLDNNFcLayer::resetFwd() {
- bool hasBias = biases_ && biases_->getW();
- const MatrixPtr& wgt = weight_->getW();
- const MatrixPtr& bias = hasBias ? biases_->getW() : nullptr;
- const MatrixPtr& out = output_.value;
+void MKLDNNFcLayer::resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ std::shared_ptr bwdWgtPD;
+ std::shared_ptr bwdDataPD;
+
+ resetBwdBuffers(in, wgt, bias, out);
+
+ resetBwdWgtPD(bwdWgtPD, wgt, bias, out);
+
+ resetBwdDataPD(bwdDataPD, in, out);
+
+ resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out);
+
+ printGradFormatFlow();
+}
+void MKLDNNFcLayer::updateInputData() {
+ inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
+}
+
+void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) {
+ weight_->getParameterPtr()->incUpdate(callback);
+ if (biases_ && biases_->getWGrad()) {
+ biases_->getParameterPtr()->incUpdate(callback);
+ }
+}
+
+void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ resetInValue(in);
+
+ resetWgtBiasValue(wgt, bias);
+
+ resetOutValue(out);
+}
+
+void MKLDNNFcLayer::resetInValue(MKLDNNMatrixPtr& in) {
if (inputIsOnlyMKLDNN()) {
- const MatrixPtr& in = getInputValue(0);
- inVal_ = std::dynamic_pointer_cast(in);
- CHECK(inVal_) << "Input should be MKLDNNMatrix";
+ const MatrixPtr& dnnIn = getInputValue(0);
+ in = std::dynamic_pointer_cast(dnnIn);
+ CHECK(in) << "Input should be MKLDNNMatrix";
} else {
CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet";
- const MatrixPtr& in = getInputValue(0, CPU_DEVICE);
- inVal_ = MKLDNNMatrix::create(
- in, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_);
+ const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
+ in = MKLDNNMatrix::create(
+ cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_);
}
- inVal_->downSpatial();
- wgtVal_ = MKLDNNMatrix::create(
- wgt, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_);
- wgtVal_->downSpatial();
- biasVal_ =
- hasBias ? MKLDNNMatrix::create(bias, {oc_}, format::x, engine_) : nullptr;
- outVal_ = MKLDNNMatrix::create(out, {bs_, oc_}, format::nc, engine_);
-
- // change original output value to mkldnn output value
- output_.value = std::dynamic_pointer_cast(outVal_);
+ in->downSpatial();
+}
+
+void MKLDNNFcLayer::resetWgtBiasValue(MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias) {
+ wgt = MKLDNNMatrix::create(
+ weight_->getW(), {oc_, ic_, ih_, iw_}, format::oihw, engine_);
+ wgt->downSpatial();
+
+ bias = (biases_ && biases_->getW())
+ ? MKLDNNMatrix::create(biases_->getW(), {oc_}, format::x, engine_)
+ : nullptr;
+}
+
+void MKLDNNFcLayer::resetOutValue(MKLDNNMatrixPtr& out) {
+ out = MKLDNNMatrix::create(output_.value, {bs_, oc_}, format::nc, engine_);
if (!outputIsOnlyMKLDNN()) {
- copyOutputInfoToOtherDevice();
// fc cpu output value do not need create convert
// just share point
- getOutput(CPU_DEVICE).value->setData(output_.value->getData());
+ getOutput(CPU_DEVICE).value->setData(out->getData());
}
+}
- // create forward handle
+void MKLDNNFcLayer::resetFwdPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr in,
+ MKLDNNMatrixPtr wgt,
+ MKLDNNMatrixPtr bias,
+ MKLDNNMatrixPtr out) {
+ CHECK(in);
+ CHECK(wgt);
+ CHECK(out);
prop_kind pk = prop_kind::forward;
- fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk,
- inVal_->getMemoryDesc(),
- wgtVal_->getMemoryDesc(),
- biasVal_->getMemoryDesc(),
- outVal_->getMemoryDesc())
- : fc_fwd::desc(pk,
- inVal_->getMemoryDesc(),
- wgtVal_->getMemoryDesc(),
- outVal_->getMemoryDesc());
- fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
- if (hasBias) {
- fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *biasVal_, *outVal_));
+ fc_fwd::desc fwdDesc = bias != nullptr ? fc_fwd::desc(pk,
+ in->getMemoryDesc(),
+ wgt->getMemoryDesc(),
+ bias->getMemoryDesc(),
+ out->getMemoryDesc())
+ : fc_fwd::desc(pk,
+ in->getMemoryDesc(),
+ wgt->getMemoryDesc(),
+ out->getMemoryDesc());
+ pd.reset(new fc_fwd::primitive_desc(fwdDesc, engine_));
+}
+
+void MKLDNNFcLayer::resetFwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+
+ if (bias) {
+ fwd_.reset(new fc_fwd(*pd, *in, *wgt, *bias, *out));
} else {
- fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *outVal_));
+ fwd_.reset(new fc_fwd(*pd, *in, *wgt, *out));
}
- printValueFormatFlow();
- pipelineFwd_.clear();
- pipelineFwd_.push_back(*fwd_);
+ pipeline.push_back(*fwd_);
}
-void MKLDNNFcLayer::resetBwd() {
- if (!needResetBwd_) {
- return;
- }
- needResetBwd_ = false;
- bool hasBias = biases_ && biases_->getWGrad();
+void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ resetOutGrad(out);
+
+ resetWgtBiasGrad(wgt, bias);
- /// backward weight
- CHECK(inVal_) << "Should have input value";
- const MatrixPtr& wgt = weight_->getWGrad();
- const MatrixPtr& bias = hasBias ? biases_->getWGrad() : nullptr;
+ resetInGrad(in);
+}
+void MKLDNNFcLayer::resetOutGrad(MKLDNNMatrixPtr& out) {
// TODO(TJ): merge outgrad
int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE;
+ output_.grad->setData(getOutput(device).grad->getData());
// for MKLDNN device:
// can not directly cast outputgrad to mkldnnmatrix,
// since each layer can not write the inputgrad to mkldnn inputgrad.
@@ -187,107 +240,88 @@ void MKLDNNFcLayer::resetBwd() {
// for CPU device:
// fc do not need to convert from cpu device since output is always nc format
// only need create from cpu device
- const MatrixPtr& out = getOutput(device).grad;
- outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc());
- wgtGrad_ = MKLDNNMatrix::create(wgt, wgtVal_->getPrimitiveDesc());
- biasGrad_ = hasBias ? MKLDNNMatrix::create(bias, biasVal_->getPrimitiveDesc())
- : nullptr;
-
- // create memory primitive desc
- fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward,
- inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc());
- fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_);
- fc_bwdWgt::desc bwdWgtDesc = hasBias
- ? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- biasGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc())
- : fc_bwdWgt::desc(inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc());
- fc_bwdWgt::primitive_desc bwdWgtPD =
- fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD);
-
- if (hasBias) {
- bwdWgt_.reset(
- new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_, *biasGrad_));
- } else {
- bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_));
- }
- pipelineBwd_.clear();
- pipelineBwd_.push_back(*bwdWgt_);
+ CHECK(outVal_);
+ out =
+ MKLDNNMatrix::create(getOutput(device).grad, outVal_->getPrimitiveDesc());
+}
- /// backward data
- const MatrixPtr& in = inputLayers_[0]->getOutput().grad;
- if (in == nullptr) {
+void MKLDNNFcLayer::resetWgtBiasGrad(MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias) {
+ CHECK(wgtVal_);
+ wgt = MKLDNNMatrix::create(weight_->getWGrad(), wgtVal_->getPrimitiveDesc());
+
+ bias = nullptr;
+ if (biasVal_ == nullptr) {
return;
}
- if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) {
- // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
- } else {
- inGrad_ = MKLDNNMatrix::create(in, inVal_->getPrimitiveDesc());
- }
-
- fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(inVal_->getMemoryDesc(),
- wgtGrad_->getMemoryDesc(),
- outGrad_->getMemoryDesc());
- fc_bwdData::primitive_desc bwdDataPD =
- fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD);
-
- CHECK(wgtVal_) << "Should have weight memory";
- bwdData_.reset(new fc_bwdData(bwdDataPD, *outGrad_, *wgtVal_, *inGrad_));
- printGradFormatFlow();
- pipelineBwd_.push_back(*bwdData_);
+ bias =
+ MKLDNNMatrix::create(biases_->getWGrad(), biasVal_->getPrimitiveDesc());
}
-void MKLDNNFcLayer::updateInputData() {
- if (inputLayers_[0]->getType() != "data") {
+void MKLDNNFcLayer::resetInGrad(MKLDNNMatrixPtr& in) {
+ in = nullptr;
+ const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad;
+ if (inGrad == nullptr) {
return;
}
- real* iData = getInputValue(0, CPU_DEVICE)->getData();
- inVal_->setData(iData);
+ // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done
+ CHECK(inVal_);
+ in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
}
-void MKLDNNFcLayer::forward(PassType passType) {
- Layer::forward(passType);
- reshape();
-
- {
- REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
- updateInputData();
-
- // just submit forward pipeline
- stream_->submit(pipelineFwd_);
- }
-
- /* activation */ {
- REGISTER_TIMER_INFO("FwActTimer", getName().c_str());
- forwardActivation();
- }
+void MKLDNNFcLayer::resetBwdWgtPD(
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ CHECK(inVal_);
+ fc_bwdWgt::desc bwdWgtDesc = bias ? fc_bwdWgt::desc(inVal_->getMemoryDesc(),
+ wgt->getMemoryDesc(),
+ bias->getMemoryDesc(),
+ out->getMemoryDesc())
+ : fc_bwdWgt::desc(inVal_->getMemoryDesc(),
+ wgt->getMemoryDesc(),
+ out->getMemoryDesc());
+ pd.reset(new fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_));
}
-void MKLDNNFcLayer::backward(const UpdateCallback& callback) {
- /* Do derivation */ {
- REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
- backwardActivation();
+void MKLDNNFcLayer::resetBwdDataPD(
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out) {
+ pd = nullptr;
+ if (in == nullptr) {
+ return;
}
+ CHECK(wgtVal_);
+ fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(
+ in->getMemoryDesc(), wgtVal_->getMemoryDesc(), out->getMemoryDesc());
+ pd.reset(new fc_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_));
+}
- {
- REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
- resetBwd();
-
- // just sumbmit backward pipeline
- stream_->submit(pipelineBwd_);
+void MKLDNNFcLayer::resetBwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& bwdWgtPD,
+ std::shared_ptr& bwdDataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+ CHECK(inVal_);
+ if (bias) {
+ bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt, *bias));
+ } else {
+ bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt));
}
+ pipeline.push_back(*bwdWgt_);
- {
- REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
- weight_->getParameterPtr()->incUpdate(callback);
- if (biases_ && biases_->getWGrad()) {
- biases_->getParameterPtr()->incUpdate(callback);
- }
+ if (bwdDataPD == nullptr) {
+ return;
}
+ CHECK(wgtVal_) << "Should have weight memory";
+ bwdData_.reset(new fc_bwdData(*bwdDataPD, *out, *wgtVal_, *in));
+ pipeline.push_back(*bwdData_);
}
+
} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h
index 4ad67a16e0..c76878aafa 100644
--- a/paddle/gserver/layers/MKLDNNFcLayer.h
+++ b/paddle/gserver/layers/MKLDNNFcLayer.h
@@ -18,6 +18,9 @@ limitations under the License. */
#include "mkldnn.hpp"
namespace paddle {
+typedef mkldnn::inner_product_forward fc_fwd;
+typedef mkldnn::inner_product_backward_weights fc_bwdWgt;
+typedef mkldnn::inner_product_backward_data fc_bwdData;
/**
* @brief A subclass of MKLDNNLayer fc layer.
@@ -32,6 +35,9 @@ protected:
// if has already init the weight
bool hasInitedWgt_;
+ // save forward primitive_desc, which can be used backward
+ std::shared_ptr fwdPD_;
+
// fc weight and bias
std::unique_ptr weight_;
std::unique_ptr biases_;
@@ -45,35 +51,81 @@ public:
bool init(const LayerMap& layerMap,
const ParameterMap& parameterMap) override;
- void convertWeightsFromPaddle() override;
-
- void convertWeightsToPaddle() override;
+ void reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
- void forward(PassType passType) override;
+ void resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
- void backward(const UpdateCallback& callback) override;
+ void resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
void updateInputData() override;
-protected:
- /**
- * reshape the input image sizes
- * and reset output buffer size
- * and reset mkldnn forward
- */
- void reshape();
+ void updateWeights(const UpdateCallback& callback) override;
+
+ void convertWeightsFromPaddle() override;
+ void convertWeightsToPaddle() override;
+
+protected:
/**
- * reset the forward primitve and memory
- * only would be called when input size changes
+ * Forward functions: reset buffers(input, output, weight and bias),
+ * reset primitive descriptor,
+ * reset pipeline.
*/
- void resetFwd();
+ void resetFwdBuffers(MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+ void resetInValue(MKLDNNMatrixPtr& in);
+ void resetWgtBiasValue(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias);
+ void resetOutValue(MKLDNNMatrixPtr& out);
+ void resetFwdPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr in,
+ MKLDNNMatrixPtr wgt,
+ MKLDNNMatrixPtr bias,
+ MKLDNNMatrixPtr out);
+ void resetFwdPipeline(std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
/**
- * reset the backward primitve and memory for mkldnn fc
- * only would be called when needed
+ * Backward functions: reset buffers(input, output, weight and bias),
+ * reset primitive descriptor for backward weight,
+ * reset primitive descriptor for backward data,
+ * reset pipeline.
*/
- void resetBwd();
+ void resetBwdBuffers(MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+ void resetOutGrad(MKLDNNMatrixPtr& out);
+ void resetWgtBiasGrad(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias);
+ void resetInGrad(MKLDNNMatrixPtr& in);
+ void resetBwdWgtPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
+ void resetBwdDataPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out);
+ void resetBwdPipeline(std::vector& pipeline,
+ std::shared_ptr& bwdWgtPD,
+ std::shared_ptr& bwdDataPD,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out);
};
} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h
index 543364edce..d8555a8331 100644
--- a/paddle/gserver/layers/MKLDNNLayer.h
+++ b/paddle/gserver/layers/MKLDNNLayer.h
@@ -19,6 +19,7 @@ limitations under the License. */
#include "MKLDNNBase.h"
#include "mkldnn.hpp"
#include "paddle/math/MKLDNNMatrix.h"
+#include "paddle/utils/Stat.h"
DECLARE_bool(use_mkldnn);
@@ -33,6 +34,8 @@ typedef std::shared_ptr MKLDNNLayerPtr;
*/
class MKLDNNLayer : public Layer {
protected:
+ // input value element count
+ size_t inputElemenCnt_;
// batch size
int bs_;
// input image channel, height and width
@@ -52,7 +55,7 @@ protected:
std::vector pipelineFwd_;
std::vector pipelineBwd_;
- // MKLDNNMatrixPtr
+ // MKLDNNMatrixPtr with internal format
MKLDNNMatrixPtr inVal_;
MKLDNNMatrixPtr inGrad_;
MKLDNNMatrixPtr outVal_;
@@ -65,6 +68,7 @@ protected:
public:
explicit MKLDNNLayer(const LayerConfig& config)
: Layer(config),
+ inputElemenCnt_(0),
bs_(0),
ic_(0),
ih_(0),
@@ -95,12 +99,106 @@ public:
if (!Layer::init(layerMap, parameterMap)) {
return false;
}
+ checkCPUOutputsNumber();
stream_.reset(new MKLDNNStream());
engine_ = CPUEngine::Instance().getEngine();
return true;
}
+ void forward(PassType passType) override {
+ passType_ = passType;
+
+ {
+ REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str());
+ CHECK(!inputLayers_.empty());
+ copySeqInfoToOutputs();
+ size_t elemenCnt = inputLayers_[0]->getOutput().value->getElementCnt();
+ if (inputElemenCnt_ != elemenCnt) {
+ // reset when input total sizes changed, not only the batchsize
+ inputElemenCnt_ = elemenCnt;
+ reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_);
+ resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_);
+ if (outVal_) {
+ // change original output value to mkldnn output value
+ output_.value = std::dynamic_pointer_cast(outVal_);
+ }
+ convertWeightsFromPaddle();
+ needResetBwd_ = true;
+ }
+
+ if (inputLayers_[0]->getType() == "data") {
+ updateInputData();
+ }
+
+ stream_->submit(pipelineFwd_);
+ }
+
+ /* activation */ {
+ REGISTER_TIMER_INFO("FwActTimer", getName().c_str());
+ forwardActivation();
+ }
+ }
+
+ void backward(const UpdateCallback& callback) override {
+ if (needResetBwd_) {
+ resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_);
+ needResetBwd_ = false;
+ }
+ {
+ REGISTER_TIMER_INFO("BpActTimer", getName().c_str());
+ backwardActivation();
+ }
+ {
+ REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str());
+ stream_->submit(pipelineBwd_);
+ }
+
+ {
+ REGISTER_TIMER_INFO("WeightUpdate", getName().c_str());
+ updateWeights(callback);
+ }
+ }
+
+ /**
+ * reshape the input image sizes
+ * and reset output image and buffer size
+ * output channel can not be changed
+ */
+ virtual void reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) = 0;
+
+ /**
+ * reset the mkldnn forward primitve and memory
+ * only would be called when input size changes
+ */
+ virtual void resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) = 0;
+
+ /**
+ * reset the mkldnn backward primitve and memory for mkldnn fc
+ * only would be called when needed
+ */
+ virtual void resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) = 0;
+
+ /**
+ * Update input value data when input layer is "data" type.
+ * Since the input value data address might be changed.
+ */
+ virtual void updateInputData() {}
+
+ /**
+ * Update weights and biases if necessary.
+ */
+ virtual void updateWeights(const UpdateCallback& callback) {}
+
/**
* convert weight from paddle format to mkldnn format
* weight_ will be override
@@ -114,10 +212,38 @@ public:
virtual void convertWeightsToPaddle() {}
/**
- * Update input value data when input layer is "data" type.
- * Since the input value data address might be changed.
+ * add this interface as public for unit test
*/
- virtual void updateInputData() {}
+ void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); }
+
+protected:
+ /**
+ * reshape the input image sizes and input batchsize
+ */
+ virtual void reshapeInput(int& batchsize, int& height, int& width) {
+ const Argument& input = inputLayers_[0]->getOutput();
+ batchsize = input.getBatchSize();
+ int h = input.getFrameHeight();
+ int w = input.getFrameWidth();
+ if (h != 0) {
+ height = h;
+ }
+ if (w != 0) {
+ width = w;
+ }
+ }
+
+ /**
+ * reshape output image sizes
+ */
+ virtual void reshapeOutput(size_t height, size_t width) {
+ output_.setFrameHeight(height);
+ output_.setFrameWidth(width);
+ for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
+ outputOtherDevice_[i].setFrameHeight(height);
+ outputOtherDevice_[i].setFrameWidth(width);
+ }
+ }
/**
* print info about sizes
@@ -133,8 +259,8 @@ public:
*/
virtual void printValueFormatFlow() {
if (inVal_ && outVal_) {
- VLOG(MKLDNN_FMTS) << "value format flow --- " << inVal_->getFormat()
- << " >>> " << outVal_->getFormat();
+ VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>> "
+ << outVal_->getFormat();
}
}
@@ -143,36 +269,12 @@ public:
*/
virtual void printGradFormatFlow() {
if (inGrad_ && outGrad_) {
- VLOG(MKLDNN_FMTS) << "grad format flow --- " << inGrad_->getFormat()
- << " <<< " << outGrad_->getFormat();
+ VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<< "
+ << outGrad_->getFormat();
}
}
protected:
- /**
- * copy image size and sequence info to other device
- * @note: can not directly use Layer::copyOutputToOtherDevice since here only
- * copy base info and do not copy data value
- */
- void copyOutputInfoToOtherDevice() {
- int cnt = 0;
- for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
- outputOtherDevice_[i].setFrameHeight(output_.getFrameHeight());
- outputOtherDevice_[i].setFrameWidth(output_.getFrameWidth());
- outputOtherDevice_[i].sequenceStartPositions =
- output_.sequenceStartPositions;
- outputOtherDevice_[i].subSequenceStartPositions =
- output_.subSequenceStartPositions;
- outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
- if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
- ++cnt;
- }
- }
- if (cnt > 1) {
- LOG(WARNING) << "should not have more than one CPU devie";
- }
- }
-
/**
* If input only has MKLDNN device.
* Otherwise, only support the previous layer using CPU device.
@@ -205,6 +307,7 @@ protected:
*/
void setDevice(int id) { deviceId_ = id; }
+private:
/**
* Set deviceId of the params used in this layer.
*/
@@ -228,6 +331,42 @@ protected:
parameter->setDevice(id);
}
}
+
+ /**
+ * Check the cpu device number of outputOtherDevice_.
+ * should have only one at most.
+ */
+ void checkCPUOutputsNumber(int max = 1) {
+ int cnt = 0;
+ for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
+ if (outputOtherDevice_[i].deviceId == CPU_DEVICE) {
+ ++cnt;
+ }
+ }
+ CHECK_LE(cnt, max) << "too much CPU devies";
+ }
+
+ /**
+ * copy SeqInfo from input layer to this output and other output devices.
+ * @note: do not use getInput(0) since it used this deviceId_,
+ * use "inputLayers_[0]->getOutput()" instead.
+ */
+ void copySeqInfoToOutputs() {
+ if (inputLayers_.empty() || !needSequenceInfo_) {
+ return;
+ }
+ const Argument& input = inputLayers_[0]->getOutput();
+ output_.sequenceStartPositions = input.sequenceStartPositions;
+ output_.subSequenceStartPositions = input.subSequenceStartPositions;
+ output_.cpuSequenceDims = input.cpuSequenceDims;
+ for (size_t i = 0; i < outputOtherDevice_.size(); i++) {
+ outputOtherDevice_[i].sequenceStartPositions =
+ output_.sequenceStartPositions;
+ outputOtherDevice_[i].subSequenceStartPositions =
+ output_.subSequenceStartPositions;
+ outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims;
+ }
+ }
};
} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/gserver/layers/MKLDNNPoolLayer.cpp
new file mode 100644
index 0000000000..b62dfb7c54
--- /dev/null
+++ b/paddle/gserver/layers/MKLDNNPoolLayer.cpp
@@ -0,0 +1,276 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "MKLDNNPoolLayer.h"
+#include "paddle/math/MathUtils.h"
+#include "paddle/utils/Logging.h"
+
+using namespace mkldnn; // NOLINT
+typedef memory::format format;
+
+namespace paddle {
+
+REGISTER_LAYER(mkldnn_pool, MKLDNNPoolLayer);
+
+bool MKLDNNPoolLayer::init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) {
+ if (!MKLDNNLayer::init(layerMap, parameterMap)) {
+ return false;
+ }
+
+ /* the size of inputs for pool-layer is 1 */
+ CHECK_EQ(config_.inputs_size(), 1);
+ const PoolConfig& conf = config_.inputs(0).pool_conf();
+ ic_ = conf.channels();
+ ih_ = conf.img_size_y();
+ iw_ = conf.img_size();
+ oc_ = ic_;
+ oh_ = conf.output_y();
+ ow_ = conf.output_x();
+ fh_ = conf.size_y();
+ fw_ = conf.size_x();
+ ph_ = conf.padding_y();
+ pw_ = conf.padding();
+ sh_ = conf.stride_y();
+ sw_ = conf.stride();
+
+ const std::string& type = conf.pool_type();
+ if (type == "max-projection") {
+ poolAlgo_ = algorithm::pooling_max;
+ } else if (type == "avg-projection") {
+ // paddle only use exclude_padding
+ poolAlgo_ = algorithm::pooling_avg_exclude_padding;
+ } else {
+ LOG(FATAL) << "unknow pooling type!";
+ }
+ return true;
+}
+
+void MKLDNNPoolLayer::reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) {
+ reshapeInput(bs, ih, iw);
+ // ic_ and oc can not be changed
+ CHECK_EQ(inputElemenCnt_ / bs / ih / iw, (size_t)ic)
+ << "Input channel can not be changed";
+
+ // cal output sizes
+ // paddle used false caffeMode for pooling
+ oh = outputSize(ih, fh_, ph_, sh_, false);
+ ow = outputSize(iw, fw_, pw_, sw_, false);
+ reshapeOutput(oh, ow);
+
+ resizeOutput(bs, oc * oh * ow);
+
+ printSizeInfo();
+}
+
+void MKLDNNPoolLayer::resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ resetFwdBuffers(in, out);
+
+ resetFwdPD(fwdPD_, in, out);
+
+ resetFwdPipeline(pipeline, fwdPD_, in, out);
+
+ printValueFormatFlow();
+}
+
+void MKLDNNPoolLayer::resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) {
+ std::shared_ptr pd;
+
+ resetBwdBuffers(in, out);
+
+ resetBwdPD(pd, in, out);
+
+ resetBwdPipeline(pipeline, pd, in, out);
+
+ printGradFormatFlow();
+}
+
+void MKLDNNPoolLayer::updateInputData() {
+ inVal_->setData(getInputValue(0, CPU_DEVICE)->getData());
+}
+
+void MKLDNNPoolLayer::resetFwdBuffers(MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out) {
+ resetInValue(in);
+
+ resetOutValue(out);
+}
+
+void MKLDNNPoolLayer::resetInValue(MKLDNNMatrixPtr& in) {
+ if (inputIsOnlyMKLDNN()) {
+ const MatrixPtr& dnnIn = getInputValue(0);
+ in = std::dynamic_pointer_cast(dnnIn);
+ CHECK(in) << "Input should be MKLDNNMatrix";
+ } else {
+ CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet";
+ const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE);
+ in = MKLDNNMatrix::create(
+ cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_);
+ }
+}
+
+void MKLDNNPoolLayer::resetOutValue(MKLDNNMatrixPtr& out) {
+ CHECK(inVal_) << "Should reset input value first";
+ memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
+ out = MKLDNNMatrix::create(
+ output_.value, outDims, inVal_->getFormat(), engine_);
+
+ // create reorder if output value has cpu device and pd do not match
+ cpuOutVal_ = nullptr;
+ cvtOutVal_ = nullptr;
+ if (!outputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value;
+ cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_);
+ if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) {
+ cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_);
+ CHECK(cvtOutVal_) << "should not be emptry";
+ } else {
+ // CPU output share the same data of MKLDNN output
+ cpuOut->setData(out->getData());
+ cpuOutVal_ = out;
+ }
+ }
+}
+
+void MKLDNNPoolLayer::resetFwdPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr in,
+ MKLDNNMatrixPtr out) {
+ memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_};
+ memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_};
+ memory::dims kernels = memory::dims{fh_, fw_};
+ memory::dims strides = memory::dims{sh_, sw_};
+ memory::dims padL = memory::dims{ph_, pw_};
+ memory::dims padR = getPaddingR();
+ padding_kind padKind = padding_kind::zero;
+ prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring
+ : prop_kind::forward_training;
+ auto fwdDesc = pool_fwd::desc(pk,
+ poolAlgo_,
+ in->getMemoryDesc(),
+ out->getMemoryDesc(),
+ strides,
+ kernels,
+ padL,
+ padR,
+ padKind);
+ pd.reset(new pool_fwd::primitive_desc(fwdDesc, engine_));
+
+ // prepare workspace if necessary
+ workspace_ =
+ (passType_ != PASS_TEST && poolAlgo_ == algorithm::pooling_max)
+ ? std::make_shared(memory(pd->workspace_primitive_desc()))
+ : nullptr;
+}
+
+void MKLDNNPoolLayer::resetFwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+ fwd_ = workspace_
+ ? std::make_shared(pool_fwd(*pd, *in, *out, *workspace_))
+ : std::make_shared(pool_fwd(*pd, *in, *out));
+ pipeline.push_back(*fwd_);
+
+ if (cvtOutVal_) {
+ pipeline.push_back(*cvtOutVal_);
+ }
+}
+
+void MKLDNNPoolLayer::resetBwdBuffers(MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out) {
+ resetOutGrad(out);
+
+ resetInGrad(in);
+}
+void MKLDNNPoolLayer::resetOutGrad(MKLDNNMatrixPtr& out) {
+ CHECK(outVal_) << "Should have output value";
+ out = MKLDNNMatrix::create(output_.grad, outVal_->getPrimitiveDesc());
+
+ // create reorder if output value has cpu device and pd do not match
+ cpuOutGrad_ = nullptr;
+ cvtOutGrad_ = nullptr;
+ if (!outputIsOnlyMKLDNN()) {
+ const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad;
+ cpuOutGrad_ = MKLDNNMatrix::create(
+ cpuOut, memory::dims{bs_, oc_, oh_, ow_}, format::nchw, engine_);
+ if (cpuOutGrad_->getPrimitiveDesc() != out->getPrimitiveDesc()) {
+ cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out);
+ CHECK(cvtOutGrad_) << "should not be emptry";
+ } else {
+ // share the same data of CPU output
+ output_.grad->setData(cpuOut->getData());
+ out = cpuOutGrad_;
+ }
+ }
+}
+
+void MKLDNNPoolLayer::resetInGrad(MKLDNNMatrixPtr& in) {
+ in = nullptr;
+ const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad;
+ if (inGrad == nullptr) {
+ return;
+ }
+ CHECK(inVal_);
+ in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc());
+}
+
+void MKLDNNPoolLayer::resetBwdPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out) {
+ memory::dims kernels = memory::dims{fh_, fw_};
+ memory::dims strides = memory::dims{sh_, sw_};
+ memory::dims padL = memory::dims{ph_, pw_};
+ memory::dims padR = getPaddingR();
+ CHECK(in);
+ CHECK(out);
+ auto bwdDesc = pool_bwd::desc(poolAlgo_,
+ in->getMemoryDesc(),
+ out->getMemoryDesc(),
+ strides,
+ kernels,
+ padL,
+ padR,
+ padding_kind::zero);
+ pd.reset(new pool_bwd::primitive_desc(bwdDesc, engine_, *fwdPD_));
+}
+
+void MKLDNNPoolLayer::resetBwdPipeline(
+ std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out) {
+ pipeline.clear();
+ if (cvtOutGrad_) {
+ pipeline.push_back(*cvtOutGrad_);
+ }
+
+ bwdData_ =
+ workspace_
+ ? std::make_shared(pool_bwd(*pd, *out, *workspace_, *in))
+ : std::make_shared(pool_bwd(*pd, *out, *in));
+ pipeline.push_back(*bwdData_);
+}
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.h b/paddle/gserver/layers/MKLDNNPoolLayer.h
new file mode 100644
index 0000000000..891e15a7ef
--- /dev/null
+++ b/paddle/gserver/layers/MKLDNNPoolLayer.h
@@ -0,0 +1,138 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "MKLDNNLayer.h"
+#include "mkldnn.hpp"
+
+namespace paddle {
+typedef mkldnn::pooling_forward pool_fwd;
+typedef mkldnn::pooling_backward pool_bwd;
+
+/**
+ * @brief A subclass of MKLDNNLayer pool layer.
+ *
+ * The config file api is mkldnn_pool
+ */
+class MKLDNNPoolLayer : public MKLDNNLayer {
+protected:
+ // padding height and width
+ int ph_, pw_;
+ // stride height and width
+ int sh_, sw_;
+ // filter(kenerl) height and width
+ int fh_, fw_;
+
+ // pooling_avg or pooling_max
+ mkldnn::algorithm poolAlgo_;
+
+ // MKLDNNMatrixPtr which should be created from CPU Device
+ MKLDNNMatrixPtr cpuOutVal_;
+ MKLDNNMatrixPtr cpuOutGrad_;
+ // convert handle between CPU device and MKLDNN device
+ std::shared_ptr cvtOutVal_;
+ std::shared_ptr cvtOutGrad_;
+
+ // save forward primitive_desc, which can be used backward
+ std::shared_ptr fwdPD_;
+ // according to https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
+ // test_pooling_forward.cpp, pool need workspace for backward
+ std::shared_ptr workspace_;
+
+public:
+ explicit MKLDNNPoolLayer(const LayerConfig& config) : MKLDNNLayer(config) {}
+
+ ~MKLDNNPoolLayer() {}
+
+ bool init(const LayerMap& layerMap,
+ const ParameterMap& parameterMap) override;
+
+ void reshape(
+ int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override;
+
+ void resetFwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
+
+ void resetBwd(std::vector& pipeline,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& wgt,
+ MKLDNNMatrixPtr& bias,
+ MKLDNNMatrixPtr& out) override;
+
+ void updateInputData() override;
+
+ void printSizeInfo() override {
+ MKLDNNLayer::printSizeInfo();
+ VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_
+ << ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_
+ << ", sw: " << sw_;
+ }
+
+protected:
+ /**
+ * Forward functions: reset buffers(input, output),
+ * reset primitive descriptor,
+ * reset pipeline.
+ */
+ void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
+ void resetInValue(MKLDNNMatrixPtr& in);
+ void resetOutValue(MKLDNNMatrixPtr& out);
+ void resetFwdPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr in,
+ MKLDNNMatrixPtr out);
+ void resetFwdPipeline(std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * Backward functions: reset buffers(input, output),
+ * reset primitive descriptor,
+ * reset pipeline.
+ */
+ void resetBwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out);
+ void resetOutGrad(MKLDNNMatrixPtr& out);
+ void resetInGrad(MKLDNNMatrixPtr& in);
+ void resetBwdPD(std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out);
+ void resetBwdPipeline(std::vector& pipeline,
+ std::shared_ptr& pd,
+ MKLDNNMatrixPtr& in,
+ MKLDNNMatrixPtr& out);
+
+ /**
+ * get padding_r according to
+ * https://github.com/01org/mkl-dnn/blob/master/tests/gtests/
+ * test_pooling_forward.cpp
+ */
+ mkldnn::memory::dims getPaddingR() const {
+ mkldnn::memory::dims padR = {ph_, pw_};
+ for (int i = 0; i < 2; ++i) {
+ if ((ih_ + ph_ + padR[0] - fh_) / sh_ + 1 < oh_) {
+ ++padR[0];
+ }
+ if ((iw_ + pw_ + padR[1] - fw_) / sw_ + 1 < ow_) {
+ ++padR[1];
+ }
+ }
+ return padR;
+ }
+};
+
+} // namespace paddle
diff --git a/paddle/gserver/layers/SequenceSliceLayer.cpp b/paddle/gserver/layers/SequenceSliceLayer.cpp
index d3a83fad27..ce68ca4494 100644
--- a/paddle/gserver/layers/SequenceSliceLayer.cpp
+++ b/paddle/gserver/layers/SequenceSliceLayer.cpp
@@ -73,9 +73,10 @@ void SequenceSliceLayer::checkInputs() {
CHECK(inputSeq.hasSeq()) << "The first input of sequence slice layer "
<< "must be a sequence.";
const MatrixPtr indices1 = getInputValue(1);
- CHECK_EQ(static_cast(indices1->getHeight()),
- inputSeq.hasSubseq() ? inputSeq.getNumSubSequences()
- : inputSeq.getNumSequences())
+ CHECK_EQ(
+ indices1->getHeight(),
+ static_cast(inputSeq.hasSubseq() ? inputSeq.getNumSubSequences()
+ : inputSeq.getNumSequences()))
<< "Height of the second input should be equal to number of sequence "
<< "in the first input.";
if (inputLayers_.size() == 3) {
@@ -151,7 +152,7 @@ void SequenceSliceLayer::calSelectedRows(const MatrixPtr starts,
if (ends) endPos = inputSeqInfoVec_[i][j] + ends->getElement(rowIdx, k);
int seqLen = endPos - begPos + 1;
- CHECK_GT(seqLen, 0U);
+ CHECK_GT(seqLen, 0);
for (int m = begPos; m <= endPos; ++m) selectedRows_.push_back(m);
hasSubseq
? outSubSeqStartPos_.push_back(outSubSeqStartPos_.back() + seqLen)
diff --git a/paddle/gserver/layers/SwitchOrderLayer.cpp b/paddle/gserver/layers/SwitchOrderLayer.cpp
index d7eee6eaf0..e97809141a 100644
--- a/paddle/gserver/layers/SwitchOrderLayer.cpp
+++ b/paddle/gserver/layers/SwitchOrderLayer.cpp
@@ -83,8 +83,7 @@ void SwitchOrderLayer::forward(PassType passType) {
setOutDims();
resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]);
if (heightAxis_.size() > 0) {
- getOutputValue()->reshape(reshapeHeight_, reshapeWidth_);
- getOutputGrad()->reshape(reshapeHeight_, reshapeWidth_);
+ resetOutput(reshapeHeight_, reshapeWidth_);
}
// switch NCHW to NHWC
diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp
index de1635be2a..f59618be9d 100644
--- a/paddle/gserver/tests/MKLDNNTester.cpp
+++ b/paddle/gserver/tests/MKLDNNTester.cpp
@@ -63,12 +63,18 @@ void MKLDNNTester::reset(const TestConfig& dnn,
initTestLayer(
configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i]));
}
- dnnLayer_ = testLayers_[DNN];
refLayer_ = testLayers_[REF];
+ dnnLayer_ = testLayers_[DNN];
EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size());
EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size());
-
setInputImgSize();
+
+ // for comparison with Paddle reference results,
+ // need manually add cpu device output for test
+ MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast(dnnLayer_);
+ if (dnnLayer) {
+ dnnLayer->addOutputArgument(CPU_DEVICE);
+ }
}
void MKLDNNTester::setInputImgSize() {
@@ -109,20 +115,22 @@ void MKLDNNTester::randomBotDatas() {
void MKLDNNTester::randomTopDiffs() {
refLayer_->getOutputGrad()->randomizeUniform();
- dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad()));
- VLOG(lvl_) << "Random dom Backward Input, TopDiff: ";
+ dnnLayer_->getOutput(CPU_DEVICE)
+ .grad->copyFrom(*(refLayer_->getOutputGrad()));
+ VLOG(lvl_) << "Random Backward Input, TopDiff: ";
printMatrix(refLayer_->getOutputGrad());
}
void MKLDNNTester::checkForward() {
- printTopDatas();
- double delta = compareMatrix(testLayers_[DNN]->getOutputValue(),
- testLayers_[REF]->getOutputValue());
VLOG(MKLDNN_ALL) << "Check Forward";
+ printTopDatas();
+ double delta = compareMatrix(dnnLayer_->getOutput(CPU_DEVICE).value,
+ refLayer_->getOutputValue());
EXPECT_LE(fabs(delta), eps_);
}
void MKLDNNTester::checkBackwardData() {
+ VLOG(MKLDNN_ALL) << "Check Backward Data";
// TODO(TJ): uncomment me when batch norm ready
// const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm";
for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) {
@@ -144,14 +152,15 @@ void MKLDNNTester::checkBackwardData() {
}
void MKLDNNTester::checkBackwardWgts() {
+ VLOG(MKLDNN_ALL) << "Check Backward Weight";
CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size());
vector dnnWgts; // used to temply save mkldnn weights
saveWgt(parameters_[DNN], dnnWgts);
- const MKLDNNLayerPtr dnnlayer =
- std::dynamic_pointer_cast(dnnLayer_);
- CHECK(dnnlayer);
- dnnlayer->convertWeightsToPaddle();
+ MKLDNNLayerPtr dnnLayer = std::dynamic_pointer_cast(dnnLayer_);
+ if (dnnLayer) {
+ dnnLayer->convertWeightsToPaddle();
+ }
for (size_t i = 0; i < parameters_[DNN].size(); ++i) {
const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE);
const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE);
@@ -189,38 +198,38 @@ void MKLDNNTester::restoreWgt(const vector& from,
}
// clear parameters grad
-void MKLDNNTester::clearWgtDiffs() {
+void MKLDNNTester::clearWgtDiffs(size_t id) {
+ CHECK_LE(id, parameters_.size());
for (size_t n = 0; n < parameters_.size(); ++n) {
- for (size_t i = 0; i < parameters_[n].size(); ++i) {
- const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
- if (grad) {
- grad->zeroMem();
+ if (id == n || id == parameters_.size()) {
+ for (size_t i = 0; i < parameters_[n].size(); ++i) {
+ const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT);
+ if (grad) {
+ grad->zeroMem();
+ }
}
}
}
}
-void MKLDNNTester::clearBotDiffs() {
- // dnn and ref
+void MKLDNNTester::clearBotDiffs(size_t id) {
+ CHECK_LE(id, dataLayers_.size());
for (size_t n = 0; n < dataLayers_.size(); ++n) {
- // all inputs layers
- for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
- dataLayers_[n][i]->getOutputGrad()->zeroMem();
+ if (id == n || id == dataLayers_.size()) {
+ // clear inputs layers of this specific layer
+ for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
+ dataLayers_[n][i]->getOutputGrad()->zeroMem();
+ }
}
}
}
-void MKLDNNTester::clearBotDiffs(int n) {
- CHECK_LT(n, NUM);
- // all inputs layers
- for (size_t i = 0; i < dataLayers_[n].size(); ++i) {
- dataLayers_[n][i]->getOutputGrad()->zeroMem();
- }
-}
-
-void MKLDNNTester::clearTopDatas() {
+void MKLDNNTester::clearTopDatas(size_t id) {
+ CHECK_LE(id, testLayers_.size());
for (size_t i = 0; i < testLayers_.size(); ++i) {
- testLayers_[i]->getOutputValue()->zeroMem();
+ if (id == i || id == testLayers_.size()) {
+ testLayers_[i]->getOutputValue()->zeroMem();
+ }
}
}
@@ -300,16 +309,28 @@ void MKLDNNTester::runOnce() {
checkForward();
// test backward
+ // simple updater
+ UpdateCallback updateCallback = [](Parameter* para) {
+ auto& grad = para->getBuf(PARAMETER_GRADIENT);
+ auto& value = para->getBuf(PARAMETER_VALUE);
+ real lr = 1e-3;
+ value->add(*grad, lr);
+ };
randomTopDiffs();
- dnnLayer_->backward(nullptr);
- refLayer_->backward(nullptr);
+ dnnLayer_->backward(updateCallback);
+ refLayer_->backward(updateCallback);
checkBackwardData();
checkBackwardWgts();
// clear buffers
// ref code will addto the diff, dnn code will writeto it
- // and clearTopDatas() and clearWgtDiffs() should be coverd by test layers
+ // and clearTopDatas(REF) should be coverd by ref layers
clearBotDiffs(REF);
+ clearWgtDiffs(REF);
+ // it is necessary to clear bottom diffs when only activation is dnn type
+ if (configs_[DNN].layerConfig.active_type().compare(0, 7, "mkldnn_") == 0) {
+ clearBotDiffs(DNN);
+ }
}
void MKLDNNTester::run(const TestConfig& dnn,
@@ -321,8 +342,19 @@ void MKLDNNTester::run(const TestConfig& dnn,
float epsilon,
bool log,
int level) {
- VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: " << dnn.layerConfig.type()
- << " vs " << ref.layerConfig.type();
+ CHECK(dnn.layerConfig.type().compare(0, 7, "mkldnn_") == 0 ||
+ dnn.layerConfig.active_type().compare(0, 7, "mkldnn_") == 0)
+ << "should be MKLDNN layer or MKLDNN activation";
+ if (dnn.layerConfig.type() == ref.layerConfig.type()) {
+ VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: "
+ << dnn.layerConfig.active_type() << " vs "
+ << ref.layerConfig.active_type();
+ } else {
+ VLOG(MKLDNN_TESTS) << "Test MKLDNN functionality: "
+ << dnn.layerConfig.type() << " vs "
+ << ref.layerConfig.type();
+ }
+
ih_ = inputImgH;
iw_ = inputImgW;
iter_ = iter;
diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/gserver/tests/MKLDNNTester.h
index e55e4493ff..171d176ee7 100644
--- a/paddle/gserver/tests/MKLDNNTester.h
+++ b/paddle/gserver/tests/MKLDNNTester.h
@@ -18,6 +18,7 @@ limitations under the License. */
#include
#include "LayerGradUtil.h"
#include "paddle/gserver/layers/MKLDNNBase.h"
+#include "paddle/gserver/layers/MKLDNNLayer.h"
namespace paddle {
@@ -40,7 +41,7 @@ protected:
vector layerMaps_;
vector> parameters_;
vector testLayers_;
- LayerPtr dnnLayer_, refLayer_;
+ LayerPtr refLayer_, dnnLayer_;
/// run some iterations, all the result should pass
size_t iter_;
@@ -88,10 +89,10 @@ private:
void checkBackwardData();
void checkBackwardWgts();
- void clearWgtDiffs();
- void clearBotDiffs();
- void clearBotDiffs(int n); // clear specific layer
- void clearTopDatas();
+ // clear specific layer, clear all when id equals NUM
+ void clearWgtDiffs(size_t id = NUM);
+ void clearBotDiffs(size_t id = NUM);
+ void clearTopDatas(size_t id = NUM);
void printTopDatas();
void printMatrix(const MatrixPtr& m);
diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp
index e1d2270df2..1bfbbde424 100644
--- a/paddle/gserver/tests/test_MKLDNN.cpp
+++ b/paddle/gserver/tests/test_MKLDNN.cpp
@@ -17,6 +17,8 @@ limitations under the License. */
#include
#include "MKLDNNTester.h"
#include "ModelConfig.pb.h"
+#include "paddle/gserver/activations/MKLDNNActivation.h"
+#include "paddle/math/MathUtils.h"
using namespace paddle; // NOLINT
@@ -24,17 +26,26 @@ DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu);
DECLARE_bool(use_mkldnn);
-struct testFCDesc {
+#define RUN_MKLDNN_TEST(DNN_CONFIG, REF_CONFIG, DESC) \
+ MKLDNNTester tester; \
+ for (auto bs : {DESC.bs, 1}) { \
+ tester.run(DNN_CONFIG, REF_CONFIG, bs, DESC.ih, DESC.iw); \
+ }
+
+#define RUN_MKLDNN_TEST_LAYER(DNN_CONFIG, REF_TYPE, DESC) \
+ TestConfig ref = DNN_CONFIG; \
+ ref.layerConfig.set_type(REF_TYPE); \
+ RUN_MKLDNN_TEST(DNN_CONFIG, ref, DESC)
+
+struct testFcDesc {
int bs;
int ic;
int oc;
int ih, iw; // oh == ow == 1
};
-void testFcLayer(const testFCDesc& pm) {
- const std::string compareTypes[] = {"mkldnn_fc", "fc"};
- TestConfig cfg;
- cfg.layerConfig.set_type(compareTypes[0]);
+static void getMKLDNNFcConfig(TestConfig& cfg, const testFcDesc& pm) {
+ cfg.layerConfig.set_type("mkldnn_fc");
cfg.layerConfig.set_size(pm.oc);
cfg.inputDefs.push_back(
{INPUT_DATA,
@@ -42,25 +53,194 @@ void testFcLayer(const testFCDesc& pm) {
/* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
/* size of weight= */ size_t(pm.oc * pm.ic * pm.ih * pm.iw)});
cfg.layerConfig.add_inputs();
+}
- MKLDNNTester tester;
+void testFcLayer(const testFcDesc& pm) {
+ TestConfig dnnConfig;
+ getMKLDNNFcConfig(dnnConfig, pm);
for (auto biasSize : {pm.oc, 0}) {
- cfg.biasSize = biasSize;
- TestConfig ref = cfg;
- ref.layerConfig.set_type(compareTypes[1]);
- for (auto bs : {pm.bs, 1}) {
- tester.run(cfg, ref, bs, pm.ih, pm.iw);
- }
+ dnnConfig.biasSize = biasSize;
+ RUN_MKLDNN_TEST_LAYER(dnnConfig, "fc", pm)
}
}
TEST(MKLDNNLayer, FcLayer) {
- testFcLayer({/*bs*/ 2, /*ic*/ 2, /*oc*/ 3, /*ih*/ 1, /*iw*/ 1});
- testFcLayer({/*bs*/ 3, /*ic*/ 7, /*oc*/ 19, /*ih*/ 1, /*iw*/ 1});
- testFcLayer({/*bs*/ 8, /*ic*/ 16, /*oc*/ 32, /*ih*/ 13, /*iw*/ 13});
- testFcLayer({/*bs*/ 4, /*ic*/ 12, /*oc*/ 18, /*ih*/ 13, /*iw*/ 11});
- testFcLayer({/*bs*/ 2, /*ic*/ 64, /*oc*/ 32, /*ih*/ 16, /*iw*/ 16});
- testFcLayer({/*bs*/ 15, /*ic*/ 3, /*oc*/ 6, /*ih*/ 16, /*iw*/ 16});
+ /* bs, ic, ih, iw, oc */
+ testFcLayer({2, 2, 1, 1, 3});
+ testFcLayer({3, 7, 1, 1, 19});
+ testFcLayer({8, 16, 13, 13, 32});
+ testFcLayer({4, 12, 13, 13, 18});
+ testFcLayer({2, 64, 16, 16, 32});
+ testFcLayer({15, 3, 16, 16, 6});
+}
+
+struct testConvDesc {
+ int bs, gp;
+ int ic, ih, iw;
+ int oc, oh, ow;
+ int fh, fw;
+ int ph, pw;
+ int sh, sw;
+ int dh, dw;
+};
+
+static void getMKLDNNConvConfig(TestConfig& cfg, const testConvDesc& pm) {
+ cfg.layerConfig.set_type("mkldnn_conv");
+ cfg.layerConfig.set_num_filters(pm.oc);
+ cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow);
+ cfg.layerConfig.set_shared_biases(true);
+ cfg.inputDefs.push_back(
+ {INPUT_DATA,
+ "layer_0",
+ /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
+ /* size of weight= */ size_t(pm.oc * pm.ic * pm.fh * pm.fw / pm.gp)});
+ LayerInputConfig* input = cfg.layerConfig.add_inputs();
+ ConvConfig* conv = input->mutable_conv_conf();
+ conv->set_groups(pm.gp);
+ conv->set_img_size(pm.iw);
+ conv->set_img_size_y(pm.ih);
+ conv->set_output_x(pm.ow);
+ conv->set_output_y(pm.oh);
+ conv->set_filter_size(pm.fw);
+ conv->set_filter_size_y(pm.fh);
+ conv->set_channels(pm.ic);
+ conv->set_padding(pm.pw);
+ conv->set_padding_y(pm.ph);
+ conv->set_stride(pm.sw);
+ conv->set_stride_y(pm.sh);
+ conv->set_dilation(pm.dw);
+ conv->set_dilation_y(pm.dh);
+ conv->set_caffe_mode(true);
+ conv->set_filter_channels(conv->channels() / conv->groups());
+ CHECK_EQ(conv->filter_channels() * pm.gp, conv->channels())
+ << "it is indivisible";
+
+ int fh = (pm.fh - 1) * pm.dh + 1;
+ int fw = (pm.fw - 1) * pm.dw + 1;
+ int ow = outputSize(pm.iw, fw, pm.pw, pm.sw, true);
+ int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true);
+ CHECK_EQ(ow, pm.ow) << "output size check failed";
+ CHECK_EQ(oh, pm.oh) << "output size check failed";
+}
+
+void testConvLayer(const testConvDesc& pm) {
+ TestConfig dnnConfig;
+ getMKLDNNConvConfig(dnnConfig, pm);
+ for (auto biasSize : {pm.oc, 0}) {
+ dnnConfig.biasSize = biasSize;
+ RUN_MKLDNN_TEST_LAYER(dnnConfig, "exconv", pm)
+ }
+}
+
+TEST(MKLDNNLayer, ConvLayer) {
+ /* bs, gp, ic, ih, iw, oc, oh, ow, fh, fw, ph, pw, sh, sw, dh, dw */
+ testConvLayer({2, 1, 3, 32, 32, 16, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({2, 1, 8, 16, 16, 8, 16, 16, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({3, 1, 16, 32, 32, 3, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({8, 1, 16, 18, 18, 32, 18, 18, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({16, 1, 1, 42, 31, 32, 23, 11, 4, 5, 3, 2, 2, 3, 1, 1});
+ testConvLayer({2, 1, 8, 16, 16, 8, 8, 8, 3, 3, 1, 1, 2, 2, 1, 1});
+ testConvLayer({3, 1, 8, 13, 13, 8, 7, 7, 3, 3, 1, 1, 2, 2, 1, 1});
+ // with groups
+ testConvLayer({2, 2, 4, 5, 5, 8, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({2, 3, 3, 5, 5, 3, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1});
+ testConvLayer({4, 4, 16, 3, 3, 16, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1});
+}
+
+struct testPoolDesc {
+ int bs, ic; // input channel and output channel are the same
+ int ih, iw;
+ int oh, ow;
+ int fh, fw;
+ int ph, pw;
+ int sh, sw;
+};
+
+static void getMKLDNNPoolConfig(TestConfig& cfg, const testPoolDesc& pm) {
+ cfg.layerConfig.set_type("mkldnn_pool");
+ cfg.layerConfig.set_size(pm.ic * pm.oh * pm.ow);
+ cfg.inputDefs.push_back(
+ {INPUT_DATA,
+ "layer_0",
+ /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw),
+ 0});
+ LayerInputConfig* input = cfg.layerConfig.add_inputs();
+ PoolConfig* pool = input->mutable_pool_conf();
+ pool->set_pool_type("avg-projection");
+ pool->set_channels(pm.ic);
+ pool->set_img_size(pm.iw);
+ pool->set_img_size_y(pm.ih);
+ pool->set_output_x(pm.ow);
+ pool->set_output_y(pm.oh);
+ pool->set_size_x(pm.fw);
+ pool->set_size_y(pm.fh);
+ pool->set_padding(pm.pw);
+ pool->set_padding_y(pm.ph);
+ pool->set_stride(pm.sw);
+ pool->set_stride_y(pm.sh);
+
+ int oh = outputSize(pm.ih, pm.fh, pm.ph, pm.sh, false);
+ int ow = outputSize(pm.iw, pm.fw, pm.pw, pm.sw, false);
+ CHECK_EQ(ow, pm.ow) << "output size check failed";
+ CHECK_EQ(oh, pm.oh) << "output size check failed";
+}
+
+void testPoolLayer(const testPoolDesc& pm) {
+ TestConfig dnnConfig;
+ getMKLDNNPoolConfig(dnnConfig, pm);
+ LayerInputConfig* input = dnnConfig.layerConfig.mutable_inputs(0);
+ PoolConfig* pool = input->mutable_pool_conf();
+ for (auto type : {"max-projection", "avg-projection"}) {
+ pool->set_pool_type(type);
+ RUN_MKLDNN_TEST_LAYER(dnnConfig, "pool", pm)
+ }
+}
+
+TEST(MKLDNNLayer, PoolLayer) {
+ /* bs, ch, ih, iw, oh, ow, fh, fw, ph, pw, sh, sw */
+ testPoolLayer({2, 1, 4, 4, 2, 2, 3, 3, 0, 0, 2, 2});
+ testPoolLayer({10, 8, 16, 16, 8, 8, 2, 2, 0, 0, 2, 2});
+ testPoolLayer({4, 2, 5, 5, 3, 3, 3, 3, 1, 1, 2, 2});
+ testPoolLayer({8, 16, 56, 56, 28, 28, 3, 3, 0, 0, 2, 2});
+ testPoolLayer({8, 16, 14, 14, 7, 7, 3, 3, 0, 0, 2, 2});
+ testPoolLayer({4, 16, 7, 7, 1, 1, 7, 7, 0, 0, 1, 1});
+ testPoolLayer({4, 2, 5, 5, 3, 3, 5, 5, 1, 1, 1, 1});
+ testPoolLayer({2, 8, 56, 56, 29, 29, 3, 3, 1, 1, 2, 2});
+}
+
+struct testActDesc {
+ int bs, ic, ih, iw;
+};
+
+static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) {
+ cfg.biasSize = 0;
+ cfg.layerConfig.set_type("addto");
+ size_t layerSize = pm.ih * pm.ih * pm.iw;
+ cfg.layerConfig.set_size(layerSize);
+ cfg.inputDefs.push_back({INPUT_DATA, "layer_0", layerSize, 0});
+ cfg.layerConfig.add_inputs();
+}
+
+void testActivation(std::string& actType, const testActDesc& pm) {
+ // TODO(TJ): mkldnn_softmax not implemented, paddle do not have elu activation
+ if (actType == "mkldnn_softmax" || actType == "mkldnn_elu") {
+ return;
+ }
+ const std::string compareTypes[] = {actType, actType.erase(0, 7)};
+ TestConfig cfg;
+ getAddtoConfig(cfg, pm);
+ TestConfig ref = cfg;
+ cfg.layerConfig.set_active_type(compareTypes[0]);
+ ref.layerConfig.set_active_type(compareTypes[1]);
+ RUN_MKLDNN_TEST(cfg, ref, pm)
+}
+
+TEST(MKLDNNActivation, Activations) {
+ auto types = MKLDNNActivation::getAllRegisteredTypes();
+ for (auto type : types) {
+ /* bs, c, h, w*/
+ testActivation(type, {16, 64, 32, 32});
+ }
}
// TODO(TJ): add branch test
diff --git a/paddle/math/BaseMatrix.cu b/paddle/math/BaseMatrix.cu
index 5435808fb7..53dd538360 100644
--- a/paddle/math/BaseMatrix.cu
+++ b/paddle/math/BaseMatrix.cu
@@ -17,6 +17,7 @@ limitations under the License. */
#include
#include "BaseMatrix.h"
#include "MathFunctions.h"
+#include "NEONFunctions.h"
#include "SIMDFunctions.h"
#include "hl_matrix_apply.cuh"
#include "hl_matrix_base.cuh"
@@ -666,6 +667,13 @@ void BaseMatrixT::relu(BaseMatrixT& b) {
applyBinary(binary::Relu(), b);
}
+#if defined(__ARM_NEON__) || defined(__ARM_NEON)
+template <>
+void BaseMatrixT::relu(BaseMatrixT& b) {
+ neon::relu(data_, b.data_, height_ * width_);
+}
+#endif
+
DEFINE_MATRIX_BINARY_OP(ReluDerivative, a *= (b > 0.0f ? 1.0f : 0.0f));
template
void BaseMatrixT::reluDerivative(BaseMatrixT& b) {
diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp
index c4063e5069..0778bb63b7 100644
--- a/paddle/math/MKLDNNMatrix.cpp
+++ b/paddle/math/MKLDNNMatrix.cpp
@@ -49,6 +49,27 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m,
return create(m, memory::primitive_desc(memory::desc(dims, dtype, fmt), eg));
}
+std::shared_ptr MKLDNNMatrix::createReorder(const MKLDNNMatrixPtr& src,
+ const MKLDNNMatrixPtr& dst,
+ bool checkData) {
+ if (src == dst || src->getPrimitiveDesc() == dst->getPrimitiveDesc()) {
+ return nullptr;
+ }
+
+ if (checkData && (src->getData() == dst->getData())) {
+ LOG(FATAL) << "can not create reorder with inplace data";
+ return nullptr;
+ }
+
+ memory::dims srcDims = src->getDims();
+ memory::dims dstDims = dst->getDims();
+ CHECK_EQ(srcDims.size(), dstDims.size());
+ for (size_t i = 0; i < srcDims.size(); ++i) {
+ CHECK_EQ(srcDims[i], dstDims[i]);
+ }
+ return std::make_shared(*src, *dst);
+}
+
void MKLDNNMatrix::reorderDataFrom(const MKLDNNMatrixPtr& m,
memory::format srcFmt,
memory::dims targetDim) {
diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h
index eef3b429e6..c843115eb9 100644
--- a/paddle/math/MKLDNNMatrix.h
+++ b/paddle/math/MKLDNNMatrix.h
@@ -52,6 +52,32 @@ public:
mkldnn::engine& eg,
mkldnn::memory::data_type dtype = mkldnn::memory::data_type::f32);
+ /**
+ * Create Memory descriptor.
+ * default with any format and f32 dtype
+ */
+ static mkldnn::memory::desc createMemoryDesc(
+ const mkldnn::memory::dims& dims,
+ const mkldnn::memory::format& fmt = mkldnn::memory::format::any,
+ const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) {
+ return mkldnn::memory::desc(dims, dtype, fmt);
+ }
+
+ /**
+ * Create reorder primitive.
+ * Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst.
+ * checkData: whether to check the data handle of src and dst.
+ * if true, it will check the data and do not allow them equal;
+ * otherwise, it will not check them, then the reorder created
+ * may have inplace buffer.
+ * Do not set false, if you can not guarantee the inplace logical
+ * would work with your reorder.
+ */
+ static std::shared_ptr createReorder(
+ const MKLDNNMatrixPtr& src,
+ const MKLDNNMatrixPtr& dst,
+ bool checkData = true);
+
public:
/**
* Reorder this MKLDNNMatrix from other format.
diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h
index e8ea6e37ac..8193aa4adf 100644
--- a/paddle/math/MathFunctions.h
+++ b/paddle/math/MathFunctions.h
@@ -26,7 +26,7 @@ limitations under the License. */
#include
#endif
-#ifdef PADDLE_USE_ATLAS
+#if defined(PADDLE_USE_ATLAS) || defined(PADDLE_USE_VECLIB)
extern "C" {
#include
#include
diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp
index 4a2132c8d1..0023b4d0f5 100644
--- a/paddle/math/Matrix.cpp
+++ b/paddle/math/Matrix.cpp
@@ -1033,17 +1033,15 @@ void GpuMatrix::maxPoolForward(Matrix& inputMat,
real* inputData = inputMat.getData();
size_t frameNum = inputMat.getHeight();
- size_t width = imgSizeW;
- size_t height = imgSizeH;
- CHECK(height * width * channels == inputMat.getWidth());
+ CHECK(imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputH * outputW * channels);
hl_maxpool_forward(frameNum,
inputData,
channels,
- height,
- width,
+ imgSizeH,
+ imgSizeW,
outputH,
outputW,
sizeX,
@@ -1080,11 +1078,8 @@ void GpuMatrix::maxPoolBackward(Matrix& inputMat,
real* outDiff = outGrad.getData();
size_t frameNum = inputMat.getHeight();
size_t channels = outV.getWidth() / outputH / outputW;
- size_t width = imgSizeW;
- size_t height = imgSizeH;
- CHECK(height * width * channels == inputMat.getWidth());
+ CHECK(imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
- CHECK(width_ == width * height * channels);
CHECK(outGrad.getHeight() == outV.getHeight() &&
outGrad.getWidth() == outV.getWidth());
@@ -1093,8 +1088,8 @@ void GpuMatrix::maxPoolBackward(Matrix& inputMat,
outData,
outDiff,
channels,
- height,
- width,
+ imgSizeH,
+ imgSizeW,
outputH,
outputW,
sizeX,
@@ -1125,17 +1120,15 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat,
real* inputData = inputMat.getData();
size_t frameNum = inputMat.getHeight();
- size_t height = imgSizeH;
- size_t width = imgSizeW;
- CHECK(height * width * channels == inputMat.getWidth());
+ CHECK(imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputH * outputW * channels);
hl_avgpool_forward(frameNum,
inputData,
channels,
- height,
- width,
+ imgSizeH,
+ imgSizeW,
outputH,
outputW,
sizeX,
@@ -1166,17 +1159,15 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad,
real* outDiff = outGrad.getData();
size_t frameNum = outGrad.getHeight();
size_t channels = outGrad.getWidth() / outputH / outputW;
- size_t height = imgSizeH;
- size_t width = imgSizeW;
- CHECK(height * width * channels == width_);
+ CHECK(imgSizeH * imgSizeW * channels == width_);
CHECK(height_ == outGrad.getHeight());
CHECK(outGrad.getWidth() == outputH * outputW * channels);
hl_avgpool_backward(frameNum,
outDiff,
channels,
- height,
- width,
+ imgSizeH,
+ imgSizeW,
outputH,
outputW,
sizeX,
@@ -1214,19 +1205,16 @@ void GpuMatrix::maxPool3DForward(Matrix& inputMat,
real* inputData = inputMat.getData();
real* maxPoolIdxData = maxPoolIdx.getData();
size_t num = inputMat.getHeight();
- size_t width = imgSizeW;
- size_t height = imgSizeH;
- size_t depth = imgSizeD;
- CHECK(depth * height * width * channels == inputMat.getWidth());
+ CHECK(imgSizeD * imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputD * outputH * outputW * channels);
hl_maxpool3D_forward(num,
inputData,
channels,
- depth,
- height,
- width,
+ imgSizeD,
+ imgSizeH,
+ imgSizeW,
outputD,
outputH,
outputW,
@@ -1269,20 +1257,16 @@ void GpuMatrix::maxPool3DBackward(Matrix& outGrad,
real* maxPoolIdxData = maxPoolIdx.getData();
size_t frameNum = getHeight();
size_t channels = outGrad.getWidth() / outputD / outputH / outputW;
- size_t width = imgSizeW;
- size_t height = imgSizeH;
- size_t depth = imgSizeD;
- CHECK(depth * height * width * channels == getWidth());
- CHECK(width_ == depth * width * height * channels);
+ CHECK(imgSizeD * imgSizeH * imgSizeW * channels == getWidth());
CHECK(outGrad.getHeight() == maxPoolIdx.getHeight() &&
outGrad.getWidth() == maxPoolIdx.getWidth());
hl_maxpool3D_backward(frameNum,
outDiff,
channels,
- depth,
- height,
- width,
+ imgSizeD,
+ imgSizeH,
+ imgSizeW,
outputD,
outputH,
outputW,
@@ -1323,19 +1307,16 @@ void GpuMatrix::avgPool3DForward(Matrix& inputMat,
real* inputData = inputMat.getData();
size_t frameNum = inputMat.getHeight();
- size_t height = imgSizeH;
- size_t width = imgSizeW;
- size_t depth = imgSizeD;
- CHECK(depth * height * width * channels == inputMat.getWidth());
+ CHECK(imgSizeD * imgSizeH * imgSizeW * channels == inputMat.getWidth());
CHECK(height_ == inputMat.getHeight());
CHECK(width_ == outputD * outputH * outputW * channels);
hl_avgpool3D_forward(frameNum,
inputData,
channels,
- depth,
- height,
- width,
+ imgSizeD,
+ imgSizeH,
+ imgSizeW,
outputD,
outputH,
outputW,
@@ -1375,19 +1356,16 @@ void GpuMatrix::avgPool3DBackward(Matrix& outGrad,
real* outDiff = outGrad.getData();
size_t frameNum = outGrad.getHeight();
size_t channels = outGrad.getWidth() / outputD / outputH / outputW;
- size_t height = imgSizeH;
- size_t width = imgSizeW;
- size_t depth = imgSizeD;
- CHECK(depth * height * width * channels == width_);
+ CHECK(imgSizeD * imgSizeH * imgSizeW * channels == width_);
CHECK(height_ == outGrad.getHeight());
CHECK(outGrad.getWidth() == outputD * outputH * outputW * channels);
hl_avgpool3D_backward(frameNum,
outDiff,
channels,
- depth,
- height,
- width,
+ imgSizeD,
+ imgSizeH,
+ imgSizeW,
outputD,
outputH,
outputW,
@@ -1999,11 +1977,11 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
real* inputData = inputMat.getData();
real* outData = data_;
size_t num = inputMat.getHeight();
- size_t inWidth = imgSizeW;
- size_t inHeight = imgSizeH;
- CHECK(inHeight * inWidth == inputMat.getWidth() / channels);
+ size_t inLength = imgSizeH * imgSizeW;
+ size_t outLength = outputH * outputW;
+ CHECK(inLength == inputMat.getWidth() / channels);
CHECK_EQ(num, this->getHeight());
- CHECK_EQ(channels * outputH * outputW, this->getWidth());
+ CHECK_EQ(channels * outLength, this->getWidth());
size_t outStride = getStride();
/* initialize the data_ */
@@ -2020,24 +1998,24 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat,
}
for (size_t c = 0; c < channels; ++c) { // channel by channel
for (size_t ph = 0; ph < outputH; ++ph) {
+ int hstart = ph * strideH - paddingH;
+ int hend = std::min(hstart + sizeY, imgSizeH);
+ hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
- int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
- int hend = std::min(hstart + sizeY, inHeight);
- int wend = std::min(wstart + sizeX, inWidth);
- hstart = std::max(hstart, 0);
+ int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
- outData[ph * outputW + pw] = std::max(outData[ph * outputW + pw],
- inputData[h * inWidth + w]);
+ outData[ph * outputW + pw] = std::max(
+ outData[ph * outputW + pw], inputData[h * imgSizeW + w]);
}
}
}
}
// compute offset
- inputData += inHeight * inWidth;
- outData += outputH * outputW;
+ inputData += inLength;
+ outData += outLength;
}
}
}
@@ -2058,8 +2036,10 @@ void CpuMatrix::maxPoolBackward(Matrix& image,
size_t paddingH,
size_t paddingW) {
size_t num = image.getHeight();
- size_t channels = size_t(width_ / imgSizeH / imgSizeW);
- CHECK(image.getWidth() == imgSizeH * imgSizeW * channels);
+ size_t inLength = imgSizeH * imgSizeW;
+ size_t outLength = outputH * outputW;
+ size_t channels = size_t(width_ / inLength);
+ CHECK(image.getWidth() == inLength * channels);
CHECK(image.getHeight() == height_ && image.getWidth() == width_);
CHECK(outV.getHeight() == outGrad.getHeight() &&
outV.getWidth() == outGrad.getWidth());
@@ -2080,12 +2060,12 @@ void CpuMatrix::maxPoolBackward(Matrix& image,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t ph = 0; ph < outputH; ++ph) {
+ int hstart = ph * strideH - paddingH;
+ int hend = std::min(hstart + sizeY, imgSizeH);
+ hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
- int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
- int hend = std::min(hstart + sizeY, imgSizeH);
int wend = std::min(wstart + sizeX, imgSizeW);
- hstart = std::max(hstart, 0);
wstart = std::max(wstart, 0);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
@@ -2098,10 +2078,10 @@ void CpuMatrix::maxPoolBackward(Matrix& image,
}
}
// offset
- inData += imgSizeH * imgSizeW;
- tgtGrad += imgSizeH * imgSizeW;
- otData += outputH * outputW;
- otGrad += outputH * outputW;
+ inData += inLength;
+ tgtGrad += inLength;
+ otData += outLength;
+ otGrad += outLength;
}
}
}
@@ -2120,10 +2100,10 @@ void CpuMatrix::avgPoolForward(Matrix& input,
size_t paddingW) {
// The main loop
size_t num = input.getHeight();
- size_t inHeight = imgSizeH;
- size_t inWidth = imgSizeW;
- CHECK(inHeight * inWidth * channels == input.getWidth());
- CHECK(outputH * outputW * channels * num == height_ * width_);
+ size_t inLength = imgSizeH * imgSizeW;
+ size_t outLength = outputH * outputW;
+ CHECK(inLength * channels == input.getWidth());
+ CHECK(outLength * channels * num == height_ * width_);
real* tgtData = data_;
real* inData = input.getData();
@@ -2133,30 +2113,27 @@ void CpuMatrix::avgPoolForward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t ph = 0; ph < outputH; ++ph) {
+ int hstart = ph * strideH - paddingH;
+ int hend = std::min(hstart + sizeY, imgSizeH);
+ hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
- int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
- int hend = std::min(hstart + sizeY, inHeight + paddingH);
- int wend = std::min(wstart + sizeX, inWidth + paddingW);
- int poolSize = (hend - hstart) * (wend - wstart);
- hstart = std::max(hstart, 0);
+ int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
- hend = std::min(hend, static_cast(inHeight));
- wend = std::min(wend, static_cast(inWidth));
-
- CHECK(poolSize);
tgtData[ph * outputW + pw] = 0; // clear
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
- tgtData[ph * outputW + pw] += inData[h * inWidth + w];
+ tgtData[ph * outputW + pw] += inData[h * imgSizeW + w];
}
}
+ int poolSize = (hend - hstart) * (wend - wstart);
+ CHECK(poolSize);
tgtData[ph * outputW + pw] /= poolSize;
}
}
// compute offset
- inData += inHeight * inWidth;
- tgtData += outputH * outputW;
+ inData += inLength;
+ tgtData += outLength;
}
}
}
@@ -2176,7 +2153,9 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
size_t paddingW) {
size_t num = input.getHeight();
size_t channels = input.getWidth() / outputH / outputW;
- CHECK(imgSizeH * imgSizeW * channels == getWidth());
+ size_t inLength = imgSizeH * imgSizeW;
+ size_t outLength = outputH * outputW;
+ CHECK(inLength * channels == getWidth());
real* inData = input.getData();
real* outData = getData();
@@ -2186,16 +2165,14 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t ph = 0; ph < outputH; ++ph) {
+ int hstart = ph * strideH - paddingH;
+ int hend = std::min(hstart + sizeY, imgSizeH);
+ hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
- int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
- int hend = std::min(hstart + sizeY, imgSizeH + paddingH);
- int wend = std::min(wstart + sizeX, imgSizeW + paddingW);
- int poolSize = (hend - hstart) * (wend - wstart);
- hstart = std::max(hstart, 0);
+ int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
- hend = std::min(hend, static_cast(imgSizeH));
- wend = std::min(wend, static_cast(imgSizeW));
+ int poolSize = (hend - hstart) * (wend - wstart);
CHECK(poolSize);
for (int h = hstart; h < hend; ++h) {
@@ -2206,8 +2183,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
}
}
// offset
- outData += imgSizeH * imgSizeW;
- inData += outputH * outputW;
+ outData += inLength;
+ inData += outLength;
}
}
}
@@ -2234,12 +2211,11 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
real* outData = getData();
real* maxPoolIdxData = maxPoolIdx.getData();
size_t num = inputMat.getHeight();
- size_t inWidth = imgSizeW;
- size_t inHeight = imgSizeH;
- size_t inDepth = imgSizeD;
- CHECK(inHeight * inWidth * inDepth == inputMat.getWidth() / channels);
+ size_t inLength = imgSizeH * imgSizeW * imgSizeD;
+ size_t outLength = outputH * outputW * outputD;
+ CHECK(inLength == inputMat.getWidth() / channels);
CHECK_EQ(num, this->getHeight());
- CHECK_EQ(channels * outputH * outputW * outputD, this->getWidth());
+ CHECK_EQ(channels * outLength, this->getWidth());
size_t outStride = getStride();
/* initialize the data_ */
@@ -2258,16 +2234,16 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
}
for (size_t c = 0; c < channels; ++c) { // channel by channel
for (size_t pd = 0; pd < outputD; ++pd) {
+ int dstart = pd * strideD - paddingD;
+ int dend = std::min(dstart + sizeZ, imgSizeD);
+ dstart = std::max(dstart, 0);
for (size_t ph = 0; ph < outputH; ++ph) {
+ int hstart = ph * strideH - paddingH;
+ int hend = std::min(hstart + sizeY, imgSizeH);
+ hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
- int dstart = pd * strideD - paddingD;
- int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
- int dend = std::min(dstart + sizeZ, inDepth);
- int hend = std::min(hstart + sizeY, inHeight);
- int wend = std::min(wstart + sizeX, inWidth);
- dstart = std::max(dstart, 0);
- hstart = std::max(hstart, 0);
+ int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
int maxIdx = -1;
real maxOutData = outData[(pd * outputH + ph) * outputW + pw];
@@ -2275,9 +2251,9 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (maxOutData <
- inputData[(d * inHeight + h) * inWidth + w]) {
- maxOutData = inputData[(d * inHeight + h) * inWidth + w];
- maxIdx = (d * inHeight + h) * inWidth + w;
+ inputData[(d * imgSizeH + h) * imgSizeW + w]) {
+ maxOutData = inputData[(d * imgSizeH + h) * imgSizeW + w];
+ maxIdx = (d * imgSizeH + h) * imgSizeW + w;
}
}
}
@@ -2288,9 +2264,9 @@ void CpuMatrix::maxPool3DForward(Matrix& inputMat,
}
}
// compute offset
- inputData += inDepth * inHeight * inWidth;
- outData += outputD * outputH * outputW;
- maxPoolIdxData += outputD * outputH * outputW;
+ inputData += inLength;
+ outData += outLength;
+ maxPoolIdxData += outLength;
}
}
}
@@ -2315,7 +2291,9 @@ void CpuMatrix::maxPool3DBackward(Matrix& outGrad,
real scaleTargets,
real scaleOutput) {
size_t num = getHeight();
- size_t channels = size_t(width_ / imgSizeD / imgSizeH / imgSizeW);
+ size_t inLength = imgSizeH * imgSizeW * imgSizeD;
+ size_t outLength = outputH * outputW * outputD;
+ size_t channels = size_t(width_ / inLength);
CHECK(maxPoolIdx.getHeight() == outGrad.getHeight() &&
maxPoolIdx.getWidth() == outGrad.getWidth());
@@ -2341,9 +2319,9 @@ void CpuMatrix::maxPool3DBackward(Matrix& outGrad,
}
}
// offset
- tgtGrad += imgSizeD * imgSizeH * imgSizeW;
- otGrad += outputD * outputH * outputW;
- maxPoolIdxData += outputD * outputH * outputW;
+ tgtGrad += inLength;
+ otGrad += outLength;
+ maxPoolIdxData += outLength;
}
}
}
@@ -2367,11 +2345,10 @@ void CpuMatrix::avgPool3DForward(Matrix& input,
size_t paddingW) {
// The main loop
size_t num = input.getHeight();
- size_t inDepth = imgSizeD;
- size_t inHeight = imgSizeH;
- size_t inWidth = imgSizeW;
- CHECK(inDepth * inHeight * inWidth * channels == input.getWidth());
- CHECK(outputD * outputH * outputW * channels * num == height_ * width_);
+ size_t inLength = imgSizeH * imgSizeW * imgSizeD;
+ size_t outLength = outputH * outputW * outputD;
+ CHECK(inLength * channels == input.getWidth());
+ CHECK(outLength * channels * num == height_ * width_);
real* tgtData = getData();
real* inData = input.getData();
@@ -2381,39 +2358,36 @@ void CpuMatrix::avgPool3DForward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t pd = 0; pd < outputD; ++pd) {
+ int dstart = pd * strideD - paddingD;
+ int dend = std::min(dstart + sizeZ, imgSizeD);
+ dstart = std::max(dstart, 0);
for (size_t ph = 0; ph < outputH; ++ph) {
+ int hstart = ph * strideH - paddingH;
+ int hend = std::min(hstart + sizeY, imgSizeH);
+ hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
- int dstart = pd * strideD - paddingD;
- int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
- int dend = std::min(dstart + sizeZ, inDepth + paddingD);
- int hend = std::min(hstart + sizeY, inHeight + paddingH);
- int wend = std::min(wstart + sizeX, inWidth + paddingW);
- int poolSize = (dend - dstart) * (hend - hstart) * (wend - wstart);
- dstart = std::max(dstart, 0);
- hstart = std::max(hstart, 0);
+ int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
- dend = std::min(dend, static_cast(inDepth));
- hend = std::min(hend, static_cast(inHeight));
- wend = std::min(wend, static_cast(inWidth));
- CHECK(poolSize);
tgtData[(pd * outputH + ph) * outputW + pw] = 0; // clear
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
tgtData[(pd * outputH + ph) * outputW + pw] +=
- inData[(d * inHeight + h) * inWidth + w];
+ inData[(d * imgSizeH + h) * imgSizeW + w];
}
}
}
+ int poolSize = (dend - dstart) * (hend - hstart) * (wend - wstart);
+ CHECK(poolSize);
tgtData[(pd * outputH + ph) * outputW + pw] /= poolSize;
}
}
}
// compute offset
- inData += inDepth * inHeight * inWidth;
- tgtData += outputD * outputH * outputW;
+ inData += inLength;
+ tgtData += outLength;
}
}
}
@@ -2437,8 +2411,10 @@ void CpuMatrix::avgPool3DBackward(Matrix& input,
real scaleTargets,
real scaleOutput) {
size_t num = input.getHeight();
- size_t channels = input.getWidth() / outputD / outputH / outputW;
- CHECK(imgSizeD * imgSizeH * imgSizeW * channels == getWidth());
+ size_t inLength = imgSizeH * imgSizeW * imgSizeD;
+ size_t outLength = outputH * outputW * outputD;
+ size_t channels = input.getWidth() / outLength;
+ CHECK(inLength * channels == getWidth());
real* inData = input.getData();
real* outData = getData();
@@ -2448,21 +2424,18 @@ void CpuMatrix::avgPool3DBackward(Matrix& input,
}
for (size_t c = 0; c < channels; ++c) {
for (size_t pd = 0; pd < outputD; ++pd) {
+ int dstart = pd * strideD - paddingD;
+ int dend = std::min(dstart + sizeZ, imgSizeD);
+ dstart = std::max(dstart, 0);
for (size_t ph = 0; ph < outputH; ++ph) {
+ int hstart = ph * strideH - paddingH;
+ int hend = std::min(hstart + sizeY, imgSizeH);
+ hstart = std::max(hstart, 0);
for (size_t pw = 0; pw < outputW; ++pw) {
- int dstart = pd * strideD - paddingD;
- int hstart = ph * strideH - paddingH;
int wstart = pw * strideW - paddingW;
- int dend = std::min(dstart + sizeZ, imgSizeD + paddingD);
- int hend = std::min(hstart + sizeY, imgSizeH + paddingH);
- int wend = std::min(wstart + sizeX, imgSizeW + paddingW);
- int poolSize = (dend - dstart) * (hend - hstart) * (wend - wstart);
- dstart = std::max(dstart, 0);
- hstart = std::max(hstart, 0);
+ int wend = std::min(wstart + sizeX, imgSizeW);
wstart = std::max(wstart, 0);
- dend = std::min(dend, static_cast