fix conficts

avx_docs
wangyanfei01 8 years ago
commit 3ca5750ba8

3
.gitignore vendored

@ -9,3 +9,6 @@ build/
.pydevproject .pydevproject
Makefile Makefile
.test_env/ .test_env/
*~
bazel-*

3
.gitmodules vendored

@ -0,0 +1,3 @@
[submodule "warp-ctc"]
path = warp-ctc
url = https://github.com/baidu-research/warp-ctc.git

@ -2,10 +2,12 @@
sha: c25201a00e6b0514370501050cf2a8538ac12270 sha: c25201a00e6b0514370501050cf2a8538ac12270
hooks: hooks:
- id: remove-crlf - id: remove-crlf
files: (?!.*warp-ctc)^.*$
- repo: https://github.com/reyoung/mirrors-yapf.git - repo: https://github.com/reyoung/mirrors-yapf.git
sha: v0.13.2 sha: v0.13.2
hooks: hooks:
- id: yapf - id: yapf
files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ # Bazel BUILD files follow Python syntax.
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
sha: 7539d8bd1a00a3c1bfd34cdb606d3a6372e83469 sha: 7539d8bd1a00a3c1bfd34cdb606d3a6372e83469
hooks: hooks:
@ -13,6 +15,7 @@
- id: check-merge-conflict - id: check-merge-conflict
- id: check-symlinks - id: check-symlinks
- id: detect-private-key - id: detect-private-key
files: (?!.*warp-ctc)^.*$
- id: end-of-file-fixer - id: end-of-file-fixer
- repo: https://github.com/PaddlePaddle/clang-format-pre-commit-hook.git - repo: https://github.com/PaddlePaddle/clang-format-pre-commit-hook.git
sha: 28c0ea8a67a3e2dbbf4822ef44e85b63a0080a29 sha: 28c0ea8a67a3e2dbbf4822ef44e85b63a0080a29

@ -8,10 +8,13 @@ os:
env: env:
- JOB=DOCS - JOB=DOCS
- JOB=BUILD_AND_TEST - JOB=BUILD_AND_TEST
- JOB=PRE_COMMIT
matrix: matrix:
exclude: exclude:
- os: osx - os: osx
env: JOB=DOCS # Only generate documentation in linux env: JOB=DOCS # Only generate documentation in linux.
- os: osx
env: JOB=PRE_COMMIT # Only check pre-commit hook in linux
addons: addons:
apt: apt:
@ -26,10 +29,6 @@ addons:
- python-pip - python-pip
- python2.7-dev - python2.7-dev
- m4 - m4
- libprotobuf-dev
- doxygen
- protobuf-compiler
- python-protobuf
- python-numpy - python-numpy
- python-wheel - python-wheel
- libgoogle-glog-dev - libgoogle-glog-dev
@ -39,18 +38,25 @@ addons:
- lcov - lcov
- graphviz - graphviz
- swig - swig
- clang-format-3.8
- automake
- libtool
before_install: before_install:
- | - |
if [ ${JOB} == "BUILD_AND_TEST" ]; then if [ ${JOB} == "BUILD_AND_TEST" ]; then
if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)|(\.rst$)|(\.jpg$)|(\.png$)' local change_list=`git diff --name-only $TRAVIS_COMMIT_RANGE`
then if [ $? -eq 0 ]; then # if git diff return no zero, then rerun unit test.
echo "Only markdown docs were updated, stopping build process." if ! echo ${change_list} | grep -qvE '(\.md$)|(\.rst$)|(\.jpg$)|(\.png$)'
exit then
echo "Only markdown docs were updated, stopping build process."
exit
fi
fi fi
fi fi
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi
- pip install wheel protobuf sphinx breathe recommonmark virtualenv numpy sphinx_rtd_theme - if [[ "$JOB" == "PRE_COMMIT" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
- pip install wheel protobuf sphinx recommonmark virtualenv numpy sphinx_rtd_theme pre-commit requests==2.9.2 LinkChecker
script: script:
- paddle/scripts/travis/main.sh - paddle/scripts/travis/main.sh
notifications: notifications:

@ -11,7 +11,7 @@ find_package(Protobuf REQUIRED)
# Check protobuf library version. # Check protobuf library version.
execute_process(COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --version execute_process(COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --version
OUTPUT_VARIABLE PROTOBUF_VERSION) OUTPUT_VARIABLE PROTOBUF_VERSION)
string(REPLACE "libprotoc " "" PROTOBUF_VERSION ${PROTOBUF_VERSION}) string(REPLACE "libprotoc " "" PROTOBUF_VERSION ${PROTOBUF_VERSION})
set(PROTOBUF_3 OFF) set(PROTOBUF_3 OFF)
@ -25,8 +25,8 @@ find_package(ZLIB REQUIRED)
find_package(NumPy REQUIRED) find_package(NumPy REQUIRED)
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
find_package(AVX QUIET) find_package(AVX QUIET)
find_package(Glog) find_package(Glog REQUIRED)
find_package(Gflags QUIET) find_package(Gflags REQUIRED)
find_package(GTest) find_package(GTest)
find_package(Sphinx) find_package(Sphinx)
find_package(Doxygen) find_package(Doxygen)
@ -40,8 +40,6 @@ option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND})
option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON)
option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND}) option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND})
option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF) option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF)
option(WITH_GLOG "Compile PaddlePaddle use glog, otherwise use a log implement internally" ${LIBGLOG_FOUND})
option(WITH_GFLAGS "Compile PaddlePaddle use gflags, otherwise use a flag implement internally" ${GFLAGS_FOUND})
option(WITH_TIMER "Compile PaddlePaddle use timer" OFF) option(WITH_TIMER "Compile PaddlePaddle use timer" OFF)
option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF) option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF)
option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND}) option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND})
@ -51,13 +49,7 @@ option(ON_TRAVIS "Running test on travis-ci or not." OFF)
option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF) option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF)
option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON) option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel"
FORCE)
endif()
include(enableCXX11)
include(cpplint) include(cpplint)
include(ccache) include(ccache)
if(WITH_RDMA) if(WITH_RDMA)
@ -75,26 +67,21 @@ include(coveralls)
find_package(Git REQUIRED) find_package(Git REQUIRED)
# version.cmake will get the current PADDLE_VERSION # version.cmake will get the current PADDLE_VERSION
include(version) include(version)
add_definitions(-DPADDLE_VERSION=\"${PADDLE_VERSION}\") add_definitions(-DPADDLE_VERSION=${PADDLE_VERSION})
if(NOT WITH_GPU) if(NOT WITH_GPU)
add_definitions(-DPADDLE_ONLY_CPU) add_definitions(-DPADDLE_ONLY_CPU)
add_definitions(-DHPPL_STUB_FUNC) add_definitions(-DHPPL_STUB_FUNC)
list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu)
else() else()
if(${CUDA_VERSION_MAJOR} GREATER 6) if(${CUDA_VERSION_MAJOR} VERSION_LESS 7)
if(COMPILER_SUPPORT_CXX11) message(FATAL_ERROR "Paddle need CUDA >= 7.0 to compile")
LIST(APPEND CUDA_NVCC_FLAGS -std=c++11)
endif()
endif() endif()
# TODO(yuyang18): Change it to remove std=c++11 in cuda compile.
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
if(NOT CUDNN_FOUND) if(NOT CUDNN_FOUND)
message(FATAL_ERROR "Paddle need cudnn to compile") message(FATAL_ERROR "Paddle need cudnn to compile")
endif() endif()
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-g -O3 --use_fast_math")
if(WITH_AVX) if(WITH_AVX)
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}")
@ -102,15 +89,15 @@ else()
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}")
endif(WITH_AVX) endif(WITH_AVX)
if(WITH_DSO)
add_definitions(-DPADDLE_USE_DSO)
endif(WITH_DSO)
# Include cuda and cudnn # Include cuda and cudnn
include_directories(${CUDNN_INCLUDE_DIR}) include_directories(${CUDNN_INCLUDE_DIR})
include_directories(${CUDA_TOOLKIT_INCLUDE}) include_directories(${CUDA_TOOLKIT_INCLUDE})
endif(NOT WITH_GPU) endif(NOT WITH_GPU)
if(WITH_DSO)
add_definitions(-DPADDLE_USE_DSO)
endif(WITH_DSO)
if(WITH_DOUBLE) if(WITH_DOUBLE)
add_definitions(-DPADDLE_TYPE_DOUBLE) add_definitions(-DPADDLE_TYPE_DOUBLE)
set(ACCURACY double) set(ACCURACY double)
@ -147,16 +134,12 @@ else(WITH_RDMA)
add_definitions(-DPADDLE_DISABLE_RDMA) add_definitions(-DPADDLE_DISABLE_RDMA)
endif(WITH_RDMA) endif(WITH_RDMA)
if(WITH_GLOG) # glog
add_definitions(-DPADDLE_USE_GLOG) include_directories(${LIBGLOG_INCLUDE_DIR})
include_directories(${LIBGLOG_INCLUDE_DIR})
endif()
if(WITH_GFLAGS) #gflags
add_definitions(-DPADDLE_USE_GFLAGS) add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE})
add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE}) include_directories(${GFLAGS_INCLUDE_DIRS})
include_directories(${GFLAGS_INCLUDE_DIRS})
endif()
if(WITH_TESTING) if(WITH_TESTING)
enable_testing() enable_testing()
@ -180,5 +163,4 @@ add_subdirectory(paddle)
add_subdirectory(python) add_subdirectory(python)
if(WITH_DOC) if(WITH_DOC)
add_subdirectory(doc) add_subdirectory(doc)
add_subdirectory(doc_cn)
endif() endif()

@ -0,0 +1 @@
./doc/howto/dev/contribute_to_paddle_en.md

@ -1,4 +1,4 @@
Copyright (c) 2016 Baidu, Inc. All Rights Reserved Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
Apache License Apache License
Version 2.0, January 2004 Version 2.0, January 2004
@ -188,7 +188,7 @@ Copyright (c) 2016 Baidu, Inc. All Rights Reserved
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright (c) 2016 Baidu, Inc. All Rights Reserve. Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

@ -0,0 +1,31 @@
# External dependency to Google protobuf.
http_archive(
name="protobuf",
url="http://github.com/google/protobuf/archive/v3.1.0.tar.gz",
sha256="0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7",
strip_prefix="protobuf-3.1.0")
# External dependency to gtest 1.7.0. This method comes from
# https://www.bazel.io/versions/master/docs/tutorial/cpp.html.
new_http_archive(
name="gtest",
url="https://github.com/google/googletest/archive/release-1.7.0.zip",
sha256="b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0",
build_file="third_party/gtest.BUILD",
strip_prefix="googletest-release-1.7.0")
# External dependency to gflags. This method comes from
# https://github.com/gflags/example/blob/master/WORKSPACE.
new_git_repository(
name="gflags",
tag="v2.2.0",
remote="https://github.com/gflags/gflags.git",
build_file="third_party/gflags.BUILD")
# External dependency to glog. This method comes from
# https://github.com/reyoung/bazel_playground/blob/master/WORKSPACE
new_git_repository(
name="glog",
remote="https://github.com/google/glog.git",
commit="b6a5e0524c28178985f0d228e9eaa43808dbec3c",
build_file="third_party/glog.BUILD")

@ -25,4 +25,3 @@ test 4 2 256 512
test 4 2 512 128 test 4 2 512 128
test 4 2 512 256 test 4 2 512 256
test 4 2 512 512 test 4 2 512 512

@ -72,6 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination )
${source} ${source}
${destination} ${destination}
COMMENT "Generating sphinx documentation: ${builder}" COMMENT "Generating sphinx documentation: ${builder}"
COMMAND ln -sf ${destination}/index_*.html ${destination}/index.html
) )
set_property( set_property(
@ -143,4 +144,4 @@ function( Sphinx_add_targets target_base_name conf source base_destination )
add_dependencies( ${target_base_name}_linkcheck ${_dependencies} ) add_dependencies( ${target_base_name}_linkcheck ${_dependencies} )
endif() endif()
endfunction() endfunction()

@ -14,13 +14,9 @@ if(WITH_STYLE_CHECK)
find_package(PythonInterp REQUIRED) find_package(PythonInterp REQUIRED)
endif() endif()
if(WITH_GLOG) find_package(Glog REQUIRED)
find_package(Glog REQUIRED)
endif()
if(WITH_GFLAGS) find_package(Gflags REQUIRED)
find_package(Gflags REQUIRED)
endif()
if(WITH_TESTING) if(WITH_TESTING)
find_package(GTest REQUIRED) find_package(GTest REQUIRED)
@ -28,9 +24,7 @@ endif()
if(WITH_DOC) if(WITH_DOC)
find_package(Sphinx REQUIRED) find_package(Sphinx REQUIRED)
find_package(Doxygen REQUIRED)
find_python_module(recommonmark REQUIRED) find_python_module(recommonmark REQUIRED)
find_python_module(breathe REQUIRED)
endif() endif()
if(WITH_SWIG_PY) if(WITH_SWIG_PY)

@ -1,13 +0,0 @@
# Enable C++ 11 for GCC.
# NOTE: It's only tested for gcc.
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORT_CXX11)
CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORT_CXX0X)
if(COMPILER_SUPPORT_CXX11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
elseif(COMPILER_SUPPORT_CXX0X)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
else()
message(FATAL_ERROR "Your compiler must support c++11")
endif()

@ -2,6 +2,37 @@
include(CheckCXXCompilerFlag) include(CheckCXXCompilerFlag)
include(CheckCCompilerFlag) include(CheckCCompilerFlag)
include(CheckCXXSymbolExists) include(CheckCXXSymbolExists)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel"
FORCE)
endif()
function(CheckCompilerCXX11Flag)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8)
message(FATAL_ERROR "Unsupported GCC version. GCC >= 4.8 required.")
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# cmake >= 3.0 compiler id "AppleClang" on Mac OS X, otherwise "Clang"
# Apple Clang is a different compiler than upstream Clang which havs different version numbers.
# https://gist.github.com/yamaya/2924292
if(APPLE) # cmake < 3.0 compiler id "Clang" on Mac OS X
if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 5.1)
message(FATAL_ERROR "Unsupported AppleClang version. AppleClang >= 5.1 required.")
endif()
else()
if (${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 3.3)
message(FATAL_ERROR "Unsupported Clang version. Clang >= 3.3 required.")
endif()
endif()
endif()
endfunction()
CheckCompilerCXX11Flag()
LIST(APPEND CMAKE_CXX_FLAGS -std=c++11)
# safe_set_flag # safe_set_flag
# #
# Set a compile flag only if compiler is support # Set a compile flag only if compiler is support
@ -41,9 +72,7 @@ macro(safe_set_nvflag flag_name)
CHECK_C_COMPILER_FLAG(${flag_name} C_COMPILER_SUPPORT_FLAG_${safe_name}) CHECK_C_COMPILER_FLAG(${flag_name} C_COMPILER_SUPPORT_FLAG_${safe_name})
set(safe_name C_COMPILER_SUPPORT_FLAG_${safe_name}) set(safe_name C_COMPILER_SUPPORT_FLAG_${safe_name})
if(${safe_name}) if(${safe_name})
set(CUDA_NVCC_FLAGS LIST(APPEND CUDA_NVCC_FLAGS -Xcompiler ${flag_name})
--compiler-options;${flag_name}
${CUDA_NVCC_FLAGS})
endif() endif()
endmacro() endmacro()
@ -109,8 +138,22 @@ foreach(flag ${GPU_COMMON_FLAGS})
endforeach() endforeach()
set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here. # So, don't set these flags here.
LIST(APPEND CUDA_NVCC_FLAGS -std=c++11)
LIST(APPEND CUDA_NVCC_FLAGS --use_fast_math)
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG})
elseif(CMAKE_BUILD_TYPE STREQUAL "Release")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE})
elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO})
elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
LIST(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL})
endif()
function(specify_cuda_arch cuda_version cuda_arch) function(specify_cuda_arch cuda_version cuda_arch)
if(${cuda_version} VERSION_GREATER "8.0") if(${cuda_version} VERSION_GREATER "8.0")

@ -65,7 +65,7 @@ endmacro()
# link_paddle_exe # link_paddle_exe
# add paddle library for a paddle executable, such as trainer, pserver. # add paddle library for a paddle executable, such as trainer, pserver.
# #
# It will handle WITH_PYTHON/WITH_GLOG etc. # It will handle WITH_PYTHON etc.
function(link_paddle_exe TARGET_NAME) function(link_paddle_exe TARGET_NAME)
if(WITH_RDMA) if(WITH_RDMA)
generate_rdma_links() generate_rdma_links()
@ -96,6 +96,7 @@ function(link_paddle_exe TARGET_NAME)
target_circle_link_libraries(${TARGET_NAME} target_circle_link_libraries(${TARGET_NAME}
ARCHIVE_START ARCHIVE_START
paddle_gserver paddle_gserver
paddle_function
${METRIC_LIBS} ${METRIC_LIBS}
ARCHIVE_END ARCHIVE_END
paddle_pserver paddle_pserver
@ -106,8 +107,11 @@ function(link_paddle_exe TARGET_NAME)
paddle_parameter paddle_parameter
paddle_proto paddle_proto
paddle_cuda paddle_cuda
paddle_test_main
${METRIC_LIBS} ${METRIC_LIBS}
${PROTOBUF_LIBRARY} ${PROTOBUF_LIBRARY}
${LIBGLOG_LIBRARY}
${GFLAGS_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT} ${CMAKE_THREAD_LIBS_INIT}
${CBLAS_LIBS} ${CBLAS_LIBS}
${ZLIB_LIBRARIES} ${ZLIB_LIBRARIES}
@ -119,27 +123,17 @@ function(link_paddle_exe TARGET_NAME)
${RDMA_LD_FLAGS} ${RDMA_LD_FLAGS}
${RDMA_LIBS}) ${RDMA_LIBS})
endif() endif()
if(WITH_PYTHON) if(WITH_PYTHON)
target_link_libraries(${TARGET_NAME} target_link_libraries(${TARGET_NAME}
${PYTHON_LIBRARIES}) ${PYTHON_LIBRARIES})
endif() endif()
if(WITH_GLOG)
target_link_libraries(${TARGET_NAME}
${LIBGLOG_LIBRARY})
endif()
if(WITH_GFLAGS)
target_link_libraries(${TARGET_NAME}
${GFLAGS_LIBRARIES})
endif()
if(WITH_GPU) if(WITH_GPU)
if(NOT WITH_DSO OR WITH_METRIC) if(NOT WITH_DSO OR WITH_METRIC)
target_link_libraries(${TARGET_NAME} target_link_libraries(${TARGET_NAME}
${CUDNN_LIBRARY} ${CUDNN_LIBRARY}
${CUDA_curand_LIBRARY}) ${CUDA_curand_LIBRARY})
CUDA_ADD_CUBLAS_TO_TARGET(${TARGET_NAME}) CUDA_ADD_CUBLAS_TO_TARGET(${TARGET_NAME})
endif() endif()
@ -148,6 +142,11 @@ function(link_paddle_exe TARGET_NAME)
target_link_libraries(${TARGET_NAME} rt) target_link_libraries(${TARGET_NAME} rt)
endif() endif()
endif() endif()
if(NOT WITH_DSO)
target_link_libraries(${TARGET_NAME}
${WARPCTC_LIBRARY})
endif()
endfunction() endfunction()
# link_paddle_test # link_paddle_test
@ -201,5 +200,5 @@ function(create_resources res_file output)
# Convert hex data for C compatibility # Convert hex data for C compatibility
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," filedata ${filedata}) string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," filedata ${filedata})
# Append data to output file # Append data to output file
file(APPEND ${output} "const unsigned char ${filename}[] = {${filedata}};\nconst unsigned ${filename}_size = sizeof(${filename});\n") file(APPEND ${output} "const unsigned char ${filename}[] = {${filedata}0};\nconst unsigned ${filename}_size = sizeof(${filename});\n")
endfunction() endfunction()

@ -10,4 +10,4 @@ Then you can run the command below. The flag -d specifies the training data (cif
$python gan_trainer.py -d cifar --use_gpu 1 $python gan_trainer.py -d cifar --use_gpu 1
The generated images will be stored in ./cifar_samples/ The generated images will be stored in ./cifar_samples/
The corresponding models will be stored in ./cifar_params/ The corresponding models will be stored in ./cifar_params/

@ -1,4 +1,5 @@
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved #!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -15,4 +16,3 @@ set -e
wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
tar zxf cifar-10-python.tar.gz tar zxf cifar-10-python.tar.gz
rm cifar-10-python.tar.gz rm cifar-10-python.tar.gz

@ -15,5 +15,3 @@ do
gunzip ${fname}.gz gunzip ${fname}.gz
fi fi
done done

@ -1,4 +1,4 @@
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -14,10 +14,9 @@
from paddle.trainer_config_helpers import * from paddle.trainer_config_helpers import *
mode = get_config_arg("mode", str, "generator") mode = get_config_arg("mode", str, "generator")
assert mode in set(["generator", assert mode in set([
"discriminator", "generator", "discriminator", "generator_training", "discriminator_training"
"generator_training", ])
"discriminator_training"])
is_generator_training = mode == "generator_training" is_generator_training = mode == "generator_training"
is_discriminator_training = mode == "discriminator_training" is_discriminator_training = mode == "discriminator_training"
@ -38,8 +37,8 @@ sample_dim = 2
settings( settings(
batch_size=128, batch_size=128,
learning_rate=1e-4, learning_rate=1e-4,
learning_method=AdamOptimizer(beta1=0.5) learning_method=AdamOptimizer(beta1=0.5))
)
def discriminator(sample): def discriminator(sample):
""" """
@ -50,70 +49,87 @@ def discriminator(sample):
of the sample is from real data. of the sample is from real data.
""" """
param_attr = ParamAttr(is_static=is_generator_training) param_attr = ParamAttr(is_static=is_generator_training)
bias_attr = ParamAttr(is_static=is_generator_training, bias_attr = ParamAttr(
initial_mean=1.0, is_static=is_generator_training, initial_mean=1.0, initial_std=0)
initial_std=0)
hidden = fc_layer(
hidden = fc_layer(input=sample, name="dis_hidden", size=hidden_dim, input=sample,
bias_attr=bias_attr, name="dis_hidden",
param_attr=param_attr, size=hidden_dim,
act=ReluActivation()) bias_attr=bias_attr,
param_attr=param_attr,
hidden2 = fc_layer(input=hidden, name="dis_hidden2", size=hidden_dim, act=ReluActivation())
bias_attr=bias_attr,
param_attr=param_attr, hidden2 = fc_layer(
act=LinearActivation()) input=hidden,
name="dis_hidden2",
hidden_bn = batch_norm_layer(hidden2, size=hidden_dim,
act=ReluActivation(), bias_attr=bias_attr,
name="dis_hidden_bn", param_attr=param_attr,
bias_attr=bias_attr, act=LinearActivation())
param_attr=ParamAttr(is_static=is_generator_training,
initial_mean=1.0, hidden_bn = batch_norm_layer(
initial_std=0.02), hidden2,
use_global_stats=False) act=ReluActivation(),
name="dis_hidden_bn",
return fc_layer(input=hidden_bn, name="dis_prob", size=2, bias_attr=bias_attr,
bias_attr=bias_attr, param_attr=ParamAttr(
param_attr=param_attr, is_static=is_generator_training, initial_mean=1.0,
act=SoftmaxActivation()) initial_std=0.02),
use_global_stats=False)
return fc_layer(
input=hidden_bn,
name="dis_prob",
size=2,
bias_attr=bias_attr,
param_attr=param_attr,
act=SoftmaxActivation())
def generator(noise): def generator(noise):
""" """
generator generates a sample given noise generator generates a sample given noise
""" """
param_attr = ParamAttr(is_static=is_discriminator_training) param_attr = ParamAttr(is_static=is_discriminator_training)
bias_attr = ParamAttr(is_static=is_discriminator_training, bias_attr = ParamAttr(
initial_mean=1.0, is_static=is_discriminator_training, initial_mean=1.0, initial_std=0)
initial_std=0)
hidden = fc_layer(
hidden = fc_layer(input=noise, input=noise,
name="gen_layer_hidden", name="gen_layer_hidden",
size=hidden_dim, size=hidden_dim,
bias_attr=bias_attr, bias_attr=bias_attr,
param_attr=param_attr, param_attr=param_attr,
act=ReluActivation()) act=ReluActivation())
hidden2 = fc_layer(input=hidden, name="gen_hidden2", size=hidden_dim, hidden2 = fc_layer(
bias_attr=bias_attr, input=hidden,
param_attr=param_attr, name="gen_hidden2",
act=LinearActivation()) size=hidden_dim,
bias_attr=bias_attr,
hidden_bn = batch_norm_layer(hidden2, param_attr=param_attr,
act=ReluActivation(), act=LinearActivation())
name="gen_layer_hidden_bn",
bias_attr=bias_attr, hidden_bn = batch_norm_layer(
param_attr=ParamAttr(is_static=is_discriminator_training, hidden2,
initial_mean=1.0, act=ReluActivation(),
initial_std=0.02), name="gen_layer_hidden_bn",
use_global_stats=False) bias_attr=bias_attr,
param_attr=ParamAttr(
return fc_layer(input=hidden_bn, is_static=is_discriminator_training,
name="gen_layer1", initial_mean=1.0,
size=sample_dim, initial_std=0.02),
bias_attr=bias_attr, use_global_stats=False)
param_attr=param_attr,
act=LinearActivation()) return fc_layer(
input=hidden_bn,
name="gen_layer1",
size=sample_dim,
bias_attr=bias_attr,
param_attr=param_attr,
act=LinearActivation())
if is_generator_training: if is_generator_training:
noise = data_layer(name="noise", size=noise_dim) noise = data_layer(name="noise", size=noise_dim)
@ -126,7 +142,8 @@ if is_generator_training or is_discriminator_training:
label = data_layer(name="label", size=1) label = data_layer(name="label", size=1)
prob = discriminator(sample) prob = discriminator(sample)
cost = cross_entropy(input=prob, label=label) cost = cross_entropy(input=prob, label=label)
classification_error_evaluator(input=prob, label=label, name=mode+'_error') classification_error_evaluator(
input=prob, label=label, name=mode + '_error')
outputs(cost) outputs(cost)
if is_generator: if is_generator:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,4 +1,5 @@
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved #!/bin/bash
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.

@ -1,4 +1,4 @@
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.

@ -1,4 +1,4 @@
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -21,7 +21,7 @@ from paddle.trainer.PyDataProvider2 import *
# #
# {'img_size': 32, # {'img_size': 32,
# 'settings': <paddle.trainer.PyDataProviderWrapper.Cls instance at 0x7fea27cb6050>, # 'settings': a global object,
# 'color': True, # 'color': True,
# 'mean_img_size': 32, # 'mean_img_size': 32,
# 'meta': './data/cifar-out/batches/batches.meta', # 'meta': './data/cifar-out/batches/batches.meta',
@ -50,10 +50,10 @@ def hook(settings, img_size, mean_img_size, num_classes, color, meta, use_jpeg,
settings.logger.info('Image size: %s', settings.img_size) settings.logger.info('Image size: %s', settings.img_size)
settings.logger.info('Meta path: %s', settings.meta_path) settings.logger.info('Meta path: %s', settings.meta_path)
settings.input_types = [ settings.input_types = {
dense_vector(settings.img_raw_size), # image feature 'image': dense_vector(settings.img_raw_size),
integer_value(settings.num_classes) 'label': integer_value(settings.num_classes)
] # labels }
settings.logger.info('DataProvider Initialization finished') settings.logger.info('DataProvider Initialization finished')
@ -83,4 +83,7 @@ def processData(settings, file_list):
img, settings.img_mean, settings.img_size, img, settings.img_mean, settings.img_size,
settings.is_train, settings.color) settings.is_train, settings.color)
label = data['labels'][i] label = data['labels'][i]
yield img_feat.astype('float32'), int(label) yield {
'image': img_feat.astype('float32'),
'label': int(label)
}

@ -1,4 +1,4 @@
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.

@ -1,5 +1,5 @@
#!/bin/bash #!/bin/bash
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save