Merge remote-tracking branch 'origin/develop' into feature/recordio

shanyi15-patch-2
dongzhihong 7 years ago
commit 6540cda109

@ -4,13 +4,17 @@ cache:
- $HOME/.ccache
- $HOME/.cache/pip
- $TRAVIS_BUILD_DIR/build/third_party
- $TRAVIS_BUILD_DIR/build_android/third_party
sudo: required
dist: trusty
services:
- docker
os:
- linux
env:
- JOB=build_doc
- JOB=check_style
- JOB=build_android
addons:
apt:
packages:
@ -41,8 +45,10 @@ before_install:
function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; }
script:
- |
timeout 2580 paddle/scripts/travis/${JOB}.sh # 43min timeout
RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true ;else exit 1; fi;
# 43min timeout
if [[ "$JOB" == "build_android" ]]; then timeout 2580 docker run -it --rm -v "$TRAVIS_BUILD_DIR:/paddle" paddlepaddle/paddle:latest-dev-android;
else timeout 2580 paddle/scripts/travis/${JOB}.sh; fi;
RESULT=$?; if [ $RESULT -eq 0 ] || [ $RESULT -eq 142 ]; then true; else exit 1; fi;
- |
if [[ "$JOB" != "build_doc" ]]; then exit 0; fi;
if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi;

@ -1,5 +1,6 @@
| Github account | name |
|---|---|
| abhinavarora | Abhinav Arora |
| backyes | Yan-Fei Wang |
| beckett1124 | Bin Qi |
| JiayiFeng | Jia-Yi Feng |

@ -19,7 +19,7 @@ set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
include(system)
project(paddle CXX C Go)
project(paddle CXX C)
message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}, version: "
"${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}")
message(STATUS "C compiler: ${CMAKE_C_COMPILER}, version: "
@ -166,16 +166,16 @@ include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto")
include_directories("${CMAKE_CURRENT_BINARY_DIR}/go/pserver/client/c")
set(EXTERNAL_LIBS
${GFLAGS_LIBRARIES}
${GLOG_LIBRARIES}
gflags
glog
${CBLAS_LIBRARIES}
${PROTOBUF_LIBRARY}
${ZLIB_LIBRARIES}
protobuf
zlib
${PYTHON_LIBRARIES}
)
if(WITH_GPU)
include(cuda)
include(cuda)
endif(WITH_GPU)
if(WITH_MKLML)
@ -202,17 +202,18 @@ endif()
# "add_subdirectory(paddle)" and "add_subdirectory(python)" should be
# placed after this block, because they depends on it.
if(WITH_GOLANG)
enable_language(Go)
add_subdirectory(go)
endif(WITH_GOLANG)
set(PADDLE_PYTHON_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/python/build")
SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG")
SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG")
add_subdirectory(paddle)
if(WITH_PYTHON)
add_subdirectory(python)
add_subdirectory(python)
endif()
if(WITH_DOC)

@ -21,16 +21,6 @@ RUN apt-get update && \
wget curl tar unzip gcc g++ locales clang-format-3.8 swig cmake && \
apt-get clean -y
# Install Go and glide
RUN wget -qO- go.tgz https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \
tar -xz -C /usr/local && \
mkdir /root/gopath && \
mkdir /root/gopath/bin && \
mkdir /root/gopath/src
ENV GOROOT=/usr/local/go GOPATH=/root/gopath
# should not be in the same line with GOROOT definition, otherwise docker build could not find GOROOT.
ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
# git credential to skip password typing
RUN git config --global credential.helper store

@ -59,7 +59,6 @@ endif(NOT WITH_GOLANG)
if(NOT WITH_GPU)
add_definitions(-DHPPL_STUB_FUNC)
add_definitions("-DCUPTI_LIB_PATH=\"\"")
list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu)
else()
@ -77,9 +76,7 @@ else()
if(CUPTI_FOUND)
include_directories(${CUPTI_INCLUDE_DIR})
add_definitions(-DPADDLE_WITH_CUPTI)
add_definitions("-DCUPTI_LIB_PATH=\"${CUPTI_LIBRARY_PATH}\"")
else()
add_definitions("-DCUPTI_LIB_PATH=\"\"")
message(STATUS "Cannot find CUPTI, GPU Profiling is incorrect.")
endif()
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SIMD_FLAG}")

@ -28,7 +28,7 @@ ENDIF(WIN32)
INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR})
ExternalProject_Add(
zlib
extern_zlib
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/madler/zlib.git"
GIT_TAG "v1.2.8"
@ -49,9 +49,11 @@ ExternalProject_Add(
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
)
ADD_LIBRARY(zlib STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET zlib PROPERTY IMPORTED_LOCATION ${ZLIB_LIBRARIES})
ADD_DEPENDENCIES(zlib extern_zlib)
LIST(APPEND external_project_dependencies zlib)
ADD_LIBRARY(zlib_target STATIC IMPORTED GLOBAL)
SET_PROPERTY(TARGET zlib_target PROPERTY IMPORTED_LOCATION ${ZLIB_LIBRARIES})
IF(WITH_C_API)
INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib)

@ -104,7 +104,9 @@ function(merge_static_libs TARGET_NAME)
foreach(lib ${libs})
list(APPEND libs_deps ${${lib}_LIB_DEPENDS})
endforeach()
list(REMOVE_DUPLICATES libs_deps)
if(libs_deps)
list(REMOVE_DUPLICATES libs_deps)
endif()
# To produce a library we need at least one source file.
# It is created by add_custom_command below and will helps
@ -191,10 +193,13 @@ function(cc_library TARGET_NAME)
list(REMOVE_ITEM cc_library_DEPS warpctc)
add_dependencies(${TARGET_NAME} warpctc)
endif()
# Support linking flags: --whole-archive (Linux) / -force_load (MacOS)
target_circle_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
if("${cc_library_DEPS}" MATCHES "ARCHIVE_START")
# Support linking flags: --whole-archive (Linux) / -force_load (MacOS).
# WARNING: Please don't use ARCHIVE_START&ARCHIVE_END if TARGET_NAME will be linked by other libraries.
target_circle_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
list(REMOVE_ITEM cc_library_DEPS ARCHIVE_START ARCHIVE_END)
else()
target_link_libraries(${TARGET_NAME} ${cc_library_DEPS})
endif()
add_dependencies(${TARGET_NAME} ${cc_library_DEPS})
endif()

@ -72,7 +72,7 @@ copy(inference_lib DEPENDS paddle_fluid_shared
)
set(module "platform")
copy(platform_lib
copy(platform_lib DEPS profiler_py_proto
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h ${src_dir}/${module}/details/*.h
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/dynload ${dst_dir}/${module}/details
)

@ -9,7 +9,7 @@
为了编译PaddlePaddle我们需要
1. 一台电脑,可以装的是 Linux, Windows 或者 MacOS 操作系统
1. Docker
2. Docker
不需要依赖其他任何软件了。即便是 Python 和 GCC 都不需要,因为我们会把所有编译工具都安装进一个 Docker 镜像里。
@ -189,7 +189,7 @@ PaddlePaddle的编译选项包括生成CPU/GPU二进制文件、链接何种B
"WITH_TESTING", "是否开启单元测试", "OFF"
"WITH_DOC", "是否编译中英文文档", "OFF"
"WITH_SWIG_PY", "是否编译PYTHON的SWIG接口该接口可用于预测和定制化训练", "Auto"
"WITH_GOLANG", "是否编译go语言的可容错parameter server", "ON"
"WITH_GOLANG", "是否编译go语言的可容错parameter server", "OFF"
"WITH_MKL", "是否使用MKL数学库如果为否则是用OpenBLAS", "ON"
BLAS

@ -9,7 +9,7 @@ Requirements
To build PaddlePaddle, you need
1. A computer -- Linux, Windows, MacOS.
1. Docker.
2. Docker.
Nothing else. Not even Python and GCC, because you can install all build tools into a Docker image.
We run all the tools by running this image.
@ -191,7 +191,7 @@ You can add :code:`-D` argument to pass such options, like:
"WITH_TESTING", "Build unit tests", "OFF"
"WITH_DOC", "Build documentations", "OFF"
"WITH_SWIG_PY", "Build Python SWIG interface for V2 API", "Auto"
"WITH_GOLANG", "Build fault-tolerant parameter server written in go", "ON"
"WITH_GOLANG", "Build fault-tolerant parameter server written in go", "OFF"
"WITH_MKL", "Use MKL as BLAS library, else use OpenBLAS", "ON"

@ -1,4 +1,4 @@
Install Using pip
Install using pip
================================
You can use current widely used Python package management
@ -8,7 +8,7 @@ most of current Linux systems or MacOS.
.. _pip_install:
Install Using pip
Install using pip
------------------------------
Run the following command to install PaddlePaddle on the current

@ -39,15 +39,16 @@ In the backward pass
This implementation allows to write mixed device program like this
```python
# get embedding feature on CPU
feature = some_cpu_only_op(data)
W1 = fluid.tensor(size=[100,20], parameter=true)
W2 = fluid.tensor(size=[20,15], parameter=true)
gpu_places = get_place(use_gpu=True)
data = layers.data()
gpu_places = layers.get_place(use_gpu=True)
# parallel processing on multiple GPUs
pd = ParallelDo(gpu_places)
with pd.do():
read_input(feature)
prediction = my_net(feature)
with pd.do(input=data):
prediction = softmax(fc(fc(data, W1), W2))
write_output(prediction)
prediction = pd()
loss = cross_entropy(prediction, label)
@ -66,20 +67,20 @@ start_program
main_program
{
block0 {
vars: data, places, w1, w2
vars: data, places, w1, w2, w1_grad, w2_grad,
ops: data, get_place, parallel_do(block1),
parallel_do_grad(block2),
sgd(w2, w2_grad),
sgd(w1, w1_grad)
}
block1 {
block1 { # the forward pass
parent_block: 0
vars: data, h1, h2, loss
ops: fc, fc, softmax
}
block2 {
block2 { # the backward pass
parent_block: 1
vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad
vars: data_grad, h1_grad, h2_grad, loss_gard, local_w1_grad, local_w2_grad
ops: softmax_grad,
fc_grad
fc_grad

@ -222,6 +222,7 @@ upstream
## 提交代码的一些约定
为了使评审人在评审代码时更好地专注于代码本身,请您每次提交代码时,遵守以下约定:
1. 请保证Travis-CI 中单元测试能顺利通过。如果没过,说明提交的代码存在问题,评审人一般不做评审。
2. 提交PUll Request前
- 请注意commit的数量
@ -231,6 +232,7 @@ upstream
3. 如果解决了某个Issue的问题请在该PUll Request的**第一个**评论框中加上:`fix #issue_number`这样当该PUll Request被合并后会自动关闭对应的Issue。关键词包括close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved请选择合适的词汇。详细可参考[Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages)。
此外,在回复评审人意见时,请您遵守以下约定:
1. 评审人的每个意见都必须回复(这是开源社区的基本礼貌,别人帮了忙,应该说谢谢):
- 对评审意见同意且按其修改完的,给个简单的`Done`即可;
- 对评审意见不同意的,请给出您自己的反驳理由。

@ -4,6 +4,5 @@ Development
.. toctree::
:maxdepth: 1
new_layer_en.rst
contribute_to_paddle_en.md
write_docs_en.rst

@ -16,7 +16,7 @@
$ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
$ docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu
更多关于Docker的安装与使用, 请参考 `PaddlePaddle Docker 文档 <http://www.paddlepaddle.org/doc_cn/build_and_install/install/docker_install.html>`_
更多关于Docker的安装与使用, 请参考 `PaddlePaddle Docker 文档 <http://www.paddlepaddle.org/docs/0.11.0/documentation/zh/getstarted/build_and_install/docker_install_cn.html>`_
2. CMake源码编译, 找到的PythonLibs和PythonInterp版本不一致

@ -0,0 +1,5 @@
############################
Install, Build and Unit test
############################
TBD

@ -0,0 +1,5 @@
###############################
Cluster Training and Prediction
###############################
TBD

@ -1,6 +1,8 @@
FAQ
====
本文档对关于PaddlePaddle的一些常见问题提供了解答。如果您的问题未在此处请您到 `PaddlePaddle社区 <https://github.com/PaddlePaddle/Paddle/issues>`_ 查找答案或直接提 `issue <https://github.com/PaddlePaddle/Paddle/issues/new>`_ ,我们会及时进行回复。
.. toctree::
:maxdepth: 1

@ -0,0 +1,12 @@
FAQ
====
.. toctree::
:maxdepth: 1
build_and_install/index_en.rst
model/index_en.rst
parameter/index_en.rst
local/index_en.rst
cluster/index_en.rst

@ -148,10 +148,10 @@ Paddle二进制在运行时捕获了浮点数异常只要出现浮点数异
.. code-block:: python
optimizer = paddle.optimizer.RMSProp(
learning_rate=1e-3,
gradient_clipping_threshold=10.0,
regularization=paddle.optimizer.L2Regularization(rate=8e-4))
optimizer = paddle.optimizer.RMSProp(
learning_rate=1e-3,
gradient_clipping_threshold=10.0,
regularization=paddle.optimizer.L2Regularization(rate=8e-4))
具体可以参考 `nmt_without_attention <https://github.com/PaddlePaddle/models/blob/develop/nmt_without_attention/train.py#L35>`_ 示例。
@ -159,13 +159,13 @@ optimizer = paddle.optimizer.RMSProp(
.. code-block:: python
decoder_inputs = paddle.layer.fc(
act=paddle.activation.Linear(),
size=decoder_size * 3,
bias_attr=False,
input=[context, current_word],
layer_attr=paddle.attr.ExtraLayerAttribute(
error_clipping_threshold=100.0))
decoder_inputs = paddle.layer.fc(
act=paddle.activation.Linear(),
size=decoder_size * 3,
bias_attr=False,
input=[context, current_word],
layer_attr=paddle.attr.ExtraLayerAttribute(
error_clipping_threshold=100.0))
完整代码可以参考示例 `machine translation <https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/train.py#L66>`_

@ -0,0 +1,5 @@
#############################
Local Training and Prediction
#############################
TBD

@ -0,0 +1,5 @@
###################
Model Configuration
###################
TBD

@ -196,6 +196,6 @@ PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数
obj="process",
args={"src_dict_path": src_dict_path})
完整源码可参考 `seqToseq <https://github.com/PaddlePaddle/Paddle/tree/develop/demo/seqToseq>`_ 示例。
完整源码可参考 `sequence_recurrent <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/gserver/tests/sequence_recurrent.py>`_ 示例。

@ -0,0 +1,5 @@
#################
Parameter Setting
#################
TBD

@ -0,0 +1,3 @@
Basic Concept
=============
TBD

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save