Merge branch 'develop' into dir

avx_docs
Luo Tao 8 years ago
commit 1de901ab8e

@ -25,8 +25,8 @@ find_package(ZLIB REQUIRED)
find_package(NumPy REQUIRED) find_package(NumPy REQUIRED)
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
find_package(AVX QUIET) find_package(AVX QUIET)
find_package(Glog) find_package(Glog REQUIRED)
find_package(Gflags QUIET) find_package(Gflags REQUIRED)
find_package(GTest) find_package(GTest)
find_package(Sphinx) find_package(Sphinx)
find_package(Doxygen) find_package(Doxygen)
@ -40,8 +40,6 @@ option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND})
option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON)
option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND}) option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND})
option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF) option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF)
option(WITH_GLOG "Compile PaddlePaddle use glog, otherwise use a log implement internally" ${LIBGLOG_FOUND})
option(WITH_GFLAGS "Compile PaddlePaddle use gflags, otherwise use a flag implement internally" ${GFLAGS_FOUND})
option(WITH_TIMER "Compile PaddlePaddle use timer" OFF) option(WITH_TIMER "Compile PaddlePaddle use timer" OFF)
option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF) option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF)
option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND}) option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND})
@ -136,16 +134,12 @@ else(WITH_RDMA)
add_definitions(-DPADDLE_DISABLE_RDMA) add_definitions(-DPADDLE_DISABLE_RDMA)
endif(WITH_RDMA) endif(WITH_RDMA)
if(WITH_GLOG) # glog
add_definitions(-DPADDLE_USE_GLOG) include_directories(${LIBGLOG_INCLUDE_DIR})
include_directories(${LIBGLOG_INCLUDE_DIR})
endif()
if(WITH_GFLAGS) #gflags
add_definitions(-DPADDLE_USE_GFLAGS) add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE})
add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE}) include_directories(${GFLAGS_INCLUDE_DIRS})
include_directories(${GFLAGS_INCLUDE_DIRS})
endif()
if(WITH_TESTING) if(WITH_TESTING)
enable_testing() enable_testing()

@ -0,0 +1 @@
./doc/howto/contribute_to_paddle_en.md

@ -3,7 +3,7 @@ http_archive(
name="protobuf", name="protobuf",
url="http://github.com/google/protobuf/archive/v3.1.0.tar.gz", url="http://github.com/google/protobuf/archive/v3.1.0.tar.gz",
sha256="0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7", sha256="0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7",
strip_prefix="protobuf-3.1.0", ) strip_prefix="protobuf-3.1.0")
# External dependency to gtest 1.7.0. This method comes from # External dependency to gtest 1.7.0. This method comes from
# https://www.bazel.io/versions/master/docs/tutorial/cpp.html. # https://www.bazel.io/versions/master/docs/tutorial/cpp.html.
@ -12,4 +12,20 @@ new_http_archive(
url="https://github.com/google/googletest/archive/release-1.7.0.zip", url="https://github.com/google/googletest/archive/release-1.7.0.zip",
sha256="b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0", sha256="b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0",
build_file="third_party/gtest.BUILD", build_file="third_party/gtest.BUILD",
strip_prefix="googletest-release-1.7.0", ) strip_prefix="googletest-release-1.7.0")
# External dependency to gflags. This method comes from
# https://github.com/gflags/example/blob/master/WORKSPACE.
new_git_repository(
name="gflags",
tag="v2.2.0",
remote="https://github.com/gflags/gflags.git",
build_file="third_party/gflags.BUILD")
# External dependency to glog. This method comes from
# https://github.com/reyoung/bazel_playground/blob/master/WORKSPACE
new_git_repository(
name="glog",
remote="https://github.com/google/glog.git",
commit="b6a5e0524c28178985f0d228e9eaa43808dbec3c",
build_file="third_party/glog.BUILD")

@ -14,13 +14,9 @@ if(WITH_STYLE_CHECK)
find_package(PythonInterp REQUIRED) find_package(PythonInterp REQUIRED)
endif() endif()
if(WITH_GLOG) find_package(Glog REQUIRED)
find_package(Glog REQUIRED)
endif()
if(WITH_GFLAGS) find_package(Gflags REQUIRED)
find_package(Gflags REQUIRED)
endif()
if(WITH_TESTING) if(WITH_TESTING)
find_package(GTest REQUIRED) find_package(GTest REQUIRED)

@ -65,7 +65,7 @@ endmacro()
# link_paddle_exe # link_paddle_exe
# add paddle library for a paddle executable, such as trainer, pserver. # add paddle library for a paddle executable, such as trainer, pserver.
# #
# It will handle WITH_PYTHON/WITH_GLOG etc. # It will handle WITH_PYTHON etc.
function(link_paddle_exe TARGET_NAME) function(link_paddle_exe TARGET_NAME)
if(WITH_RDMA) if(WITH_RDMA)
generate_rdma_links() generate_rdma_links()
@ -108,6 +108,8 @@ function(link_paddle_exe TARGET_NAME)
paddle_cuda paddle_cuda
${METRIC_LIBS} ${METRIC_LIBS}
${PROTOBUF_LIBRARY} ${PROTOBUF_LIBRARY}
${LIBGLOG_LIBRARY}
${GFLAGS_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT} ${CMAKE_THREAD_LIBS_INIT}
${CBLAS_LIBS} ${CBLAS_LIBS}
${ZLIB_LIBRARIES} ${ZLIB_LIBRARIES}
@ -125,16 +127,6 @@ function(link_paddle_exe TARGET_NAME)
${PYTHON_LIBRARIES}) ${PYTHON_LIBRARIES})
endif() endif()
if(WITH_GLOG)
target_link_libraries(${TARGET_NAME}
${LIBGLOG_LIBRARY})
endif()
if(WITH_GFLAGS)
target_link_libraries(${TARGET_NAME}
${GFLAGS_LIBRARIES})
endif()
if(WITH_GPU) if(WITH_GPU)
if(NOT WITH_DSO OR WITH_METRIC) if(NOT WITH_DSO OR WITH_METRIC)
target_link_libraries(${TARGET_NAME} target_link_libraries(${TARGET_NAME}

@ -43,13 +43,13 @@ def extract_dict_features(pair_file, feature_file):
mark[verb_index] = 1 mark[verb_index] = 1
ctx_0 = sentence_list[verb_index] ctx_0 = sentence_list[verb_index]
if verb_index < len(labels_list) - 2: if verb_index < len(labels_list) - 1:
mark[verb_index + 1] = 1 mark[verb_index + 1] = 1
ctx_p1 = sentence_list[verb_index + 1] ctx_p1 = sentence_list[verb_index + 1]
else: else:
ctx_p1 = 'eos' ctx_p1 = 'eos'
if verb_index < len(labels_list) - 3: if verb_index < len(labels_list) - 2:
mark[verb_index + 2] = 1 mark[verb_index + 2] = 1
ctx_p2 = sentence_list[verb_index + 2] ctx_p2 = sentence_list[verb_index + 2]
else: else:

@ -49,10 +49,8 @@ PaddlePaddle supports some build options. To enable it, first you need to instal
<tbody> <tbody>
<tr><td class="left">WITH_GPU</td><td class="left">Compile with GPU mode.</td></tr> <tr><td class="left">WITH_GPU</td><td class="left">Compile with GPU mode.</td></tr>
<tr><td class="left">WITH_DOUBLE</td><td class="left">Compile with double precision floating-point, default: single precision.</td></tr> <tr><td class="left">WITH_DOUBLE</td><td class="left">Compile with double precision floating-point, default: single precision.</td></tr>
<tr><td class="left">WITH_GLOG</td><td class="left">Compile with glog. If not found, default: an internal log implementation.</td></tr>
<tr><td class="left">WITH_GFLAGS</td><td class="left">Compile with gflags. If not found, default: an internal flag implementation.</td></tr>
<tr><td class="left">WITH_TESTING</td><td class="left">Compile with gtest for PaddlePaddle's unit testing.</td></tr> <tr><td class="left">WITH_TESTING</td><td class="left">Compile with gtest for PaddlePaddle's unit testing.</td></tr>
<tr><td class="left">WITH_DOC</td><td class="left"> Compile to generate PaddlePaddle's docs, default: disabled (OFF).</td></tr> <tr><td class="left">WITH_DOC</td><td class="left"> Compile to generate PaddlePaddle's docs, default: disabled (OFF).</td></tr>
<tr><td class="left">WITH_SWIG_PY</td><td class="left">Compile with python predict API, default: disabled (OFF).</td></tr> <tr><td class="left">WITH_SWIG_PY</td><td class="left">Compile with python predict API, default: disabled (OFF).</td></tr>
<tr><td class="left">WITH_STYLE_CHECK</td><td class="left">Compile with code style check, default: enabled (ON).</td></tr> <tr><td class="left">WITH_STYLE_CHECK</td><td class="left">Compile with code style check, default: enabled (ON).</td></tr>
</tbody> </tbody>

@ -6,8 +6,6 @@ WITH_AVX,是否编译含有AVX指令集的PaddlePaddle二进制文件,是
WITH_PYTHON,是否内嵌PYTHON解释器。方便今后的嵌入式移植工作。,是 WITH_PYTHON,是否内嵌PYTHON解释器。方便今后的嵌入式移植工作。,是
WITH_STYLE_CHECK,是否编译时进行代码风格检查,是 WITH_STYLE_CHECK,是否编译时进行代码风格检查,是
WITH_RDMA,是否开启RDMA,否 WITH_RDMA,是否开启RDMA,否
WITH_GLOG,是否开启GLOG。如果不开启则会使用一个简化版的日志同时方便今后的嵌入式移植工作。,取决于是否寻找到GLOG
WITH_GFLAGS,是否使用GFLAGS。如果不开启则会使用一个简化版的命令行参数解析器同时方便今后的嵌入式移植工作。,取决于是否寻找到GFLAGS
WITH_TIMER,是否开启计时功能。如果开启会导致运行略慢打印的日志变多但是方便调试和测Benchmark,否 WITH_TIMER,是否开启计时功能。如果开启会导致运行略慢打印的日志变多但是方便调试和测Benchmark,否
WITH_TESTING,是否开启单元测试,取决于是否寻找到GTEST WITH_TESTING,是否开启单元测试,取决于是否寻找到GTEST
WITH_DOC,是否编译中英文文档,否 WITH_DOC,是否编译中英文文档,否

1 选项 说明 默认值
6 WITH_PYTHON 是否内嵌PYTHON解释器。方便今后的嵌入式移植工作。
7 WITH_STYLE_CHECK 是否编译时进行代码风格检查
8 WITH_RDMA 是否开启RDMA
WITH_GLOG 是否开启GLOG。如果不开启,则会使用一个简化版的日志,同时方便今后的嵌入式移植工作。 取决于是否寻找到GLOG
WITH_GFLAGS 是否使用GFLAGS。如果不开启,则会使用一个简化版的命令行参数解析器,同时方便今后的嵌入式移植工作。 取决于是否寻找到GFLAGS
9 WITH_TIMER 是否开启计时功能。如果开启会导致运行略慢,打印的日志变多,但是方便调试和测Benchmark
10 WITH_TESTING 是否开启单元测试 取决于是否寻找到GTEST
11 WITH_DOC 是否编译中英文文档

@ -46,8 +46,6 @@ PaddlePaddle提供了ubuntu 14.04 deb安装包。
with_double: OFF with_double: OFF
with_python: ON with_python: ON
with_rdma: OFF with_rdma: OFF
with_glog: ON
with_gflags: ON
with_metric_learning: with_metric_learning:
with_timer: OFF with_timer: OFF
with_predict_sdk: with_predict_sdk:

@ -47,6 +47,22 @@ Then you can start to develop by making a local developement branch
git checkout -b MY_COOL_STUFF_BRANCH git checkout -b MY_COOL_STUFF_BRANCH
``` ```
## Using `pre-commit` hook
Paddle developers use [pre-commit](http://pre-commit.com/) tool to manage git
pre-commit hooks. It can help us format source codes (cpp, python), check some
basic thing before commit (only one EOL for each file, do not add a huge file
in git). `pre-commit` tests is a part of unit tests in Travis-CI now, every
PR doesn't fit hook can not be merged into Paddle.
To use [pre-commit](http://pre-commit.com/), you should install it by
`pip install pre-commit`, and currently, Paddle uses `clang-format` to format
c/cpp sources. Please make sure clang-format 3.8+ installed.
Then just run `pre-commit install` in your Paddle clone directory. When you
commit your code, the pre-commit hook will check the local code if there is
anything not suitable to commit, and so on.
## Commit ## Commit
Commit your changes by following command lines: Commit your changes by following command lines:

@ -17,22 +17,18 @@ add_library(paddle_api STATIC
${API_SOURCES}) ${API_SOURCES})
add_dependencies(paddle_api gen_proto_cpp) add_dependencies(paddle_api gen_proto_cpp)
list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH)
if(WITH_GFLAGS) if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}")
list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH) # Because gflags compiled by cmake, so it is imported by cmake target,
# not a real library path. Get the real library path here.
if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}") message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}")
# Because gflags compiled by cmake, so it is imported by cmake target, get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION)
# not a real library path. Get the real library path here. message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}")
message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}") else()
get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION) set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES})
message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}")
else()
set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES})
endif()
endif() endif()
configure_file( configure_file(
paddle_api_config.py.in paddle_api_config.py.in
${PROJ_ROOT}/paddle/api/paddle_api_config.py ${PROJ_ROOT}/paddle/api/paddle_api_config.py
@ -57,7 +53,7 @@ add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp
paddle_trainer paddle_trainer
paddle_api paddle_api
paddle_cuda paddle_cuda
${PY_PADDLE_PYTHON_FILES} ${PY_PADDLE_PYTHON_FILES}
) )
install(DIRECTORY ${PROJ_ROOT}/paddle/dist/ install(DIRECTORY ${PROJ_ROOT}/paddle/dist/

@ -27,9 +27,9 @@ limitations under the License. */
using paddle::real; using paddle::real;
P_DECLARE_string(config); DECLARE_string(config);
P_DECLARE_string(init_model_path); DECLARE_string(init_model_path);
P_DECLARE_int32(start_pass); DECLARE_int32(start_pass);
struct TrainerPrivate : public paddle::Trainer { struct TrainerPrivate : public paddle::Trainer {
bool _trainOneBatch(size_t batchSize); bool _trainOneBatch(size_t batchSize);

@ -8,9 +8,7 @@ CMAKE_DL_LIBS="@CMAKE_DL_LIBS@"
WITH_PYTHON="@WITH_PYTHON@" WITH_PYTHON="@WITH_PYTHON@"
PYTHON_LIBRARIES="@PYTHON_LIBRARIES@" PYTHON_LIBRARIES="@PYTHON_LIBRARIES@"
WITH_GLOG="@WITH_GLOG@"
LIBGLOG_LIBRARY="@LIBGLOG_LIBRARY@" LIBGLOG_LIBRARY="@LIBGLOG_LIBRARY@"
WITH_GFLAGS="@WITH_GFLAGS@"
GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@" GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@"
GFLAGS_LOCATION="@GFLAGS_LOCATION@" GFLAGS_LOCATION="@GFLAGS_LOCATION@"
CBLAS_LIBRARIES="@CBLAS_LIBS@" CBLAS_LIBRARIES="@CBLAS_LIBS@"

@ -47,10 +47,8 @@ try:
self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON) self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON)
self.python_libs = PYTHON_LIBRARIES self.python_libs = PYTHON_LIBRARIES
self.with_glog = PaddleLDFlag.cmake_bool(WITH_GLOG)
self.glog_libs = LIBGLOG_LIBRARY self.glog_libs = LIBGLOG_LIBRARY
self.with_gflags = PaddleLDFlag.cmake_bool(WITH_GFLAGS)
self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS)
self.gflags_libs = GFLAGS_LIBRARIES self.gflags_libs = GFLAGS_LIBRARIES
self.gflags_location = GFLAGS_LOCATION self.gflags_location = GFLAGS_LOCATION
@ -88,6 +86,8 @@ try:
"-lpaddle_cuda", "-lpaddle_cuda",
"-lpaddle_api", "-lpaddle_api",
self.normalize_flag(self.protolib), self.normalize_flag(self.protolib),
self.normalize_flag(self.glog_libs),
self.normalize_flag(self.gflags_libs),
self.normalize_flag(self.zlib), self.normalize_flag(self.zlib),
self.normalize_flag(self.thread), self.normalize_flag(self.thread),
self.normalize_flag(self.dl_libs), self.normalize_flag(self.dl_libs),
@ -96,10 +96,6 @@ try:
if self.with_python: if self.with_python:
libs.append(self.normalize_flag(self.python_libs)) libs.append(self.normalize_flag(self.python_libs))
if self.with_glog:
libs.append(self.normalize_flag(self.glog_libs))
if self.with_gflags:
libs.append(self.normalize_flag(self.gflags_libs))
if self.with_gpu: if self.with_gpu:
libs.append(self.normalize_flag(self.curt)) libs.append(self.normalize_flag(self.curt))
if self.with_coverage: if self.with_coverage:

@ -21,10 +21,10 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h" #include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
P_DEFINE_int32(cudnn_conv_workspace_limit_in_mb, DEFINE_int32(cudnn_conv_workspace_limit_in_mb,
4096, 4096,
"Specify cuDNN max workspace limit, in units MB, " "Specify cuDNN max workspace limit, in units MB, "
"4096MB=4GB by default."); "4096MB=4GB by default.");
namespace dynload { namespace dynload {

@ -22,6 +22,7 @@ limitations under the License. */
#include <sys/time.h> #include <sys/time.h>
#include <unistd.h> #include <unistd.h>
#include <mutex> #include <mutex>
#include "hl_cuda.h"
#include "hl_cuda.ph" #include "hl_cuda.ph"
#include "hl_dso_loader.h" #include "hl_dso_loader.h"
#include "hl_thread.ph" #include "hl_thread.ph"

@ -16,21 +16,21 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h" #include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
P_DEFINE_string(cudnn_dir, DEFINE_string(cudnn_dir,
"", "",
"Specify path for loading libcudnn.so. For instance, " "Specify path for loading libcudnn.so. For instance, "
"/usr/local/cudnn/lib. If empty [default], dlopen " "/usr/local/cudnn/lib. If empty [default], dlopen "
"will search cudnn from LD_LIBRARY_PATH"); "will search cudnn from LD_LIBRARY_PATH");
P_DEFINE_string(cuda_dir, DEFINE_string(cuda_dir,
"", "",
"Specify path for loading cuda library, such as libcublas, " "Specify path for loading cuda library, such as libcublas, "
"libcurand. For instance, /usr/local/cuda/lib64. (Note: " "libcurand. For instance, /usr/local/cuda/lib64. (Note: "
"libcudart can not be specified by cuda_dir, since some " "libcudart can not be specified by cuda_dir, since some "
"build-in function in cudart already ran before main entry). " "build-in function in cudart already ran before main entry). "
"If default, dlopen will search cuda from LD_LIBRARY_PATH"); "If default, dlopen will search cuda from LD_LIBRARY_PATH");
P_DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so."); DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so.");
static inline std::string join(const std::string& part1, static inline std::string join(const std::string& part1,
const std::string& part2) { const std::string& part2) {

@ -22,9 +22,9 @@ limitations under the License. */
#include "DataProviderGroup.h" #include "DataProviderGroup.h"
#include "paddle/utils/Logging.h" #include "paddle/utils/Logging.h"
P_DEFINE_double(memory_threshold_on_load_data, DEFINE_double(memory_threshold_on_load_data,
1.0, 1.0,
"stop loading data when memory is not sufficient"); "stop loading data when memory is not sufficient");
namespace paddle { namespace paddle {

@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/gserver/gradientmachines/NeuralNetwork.h" #include "paddle/gserver/gradientmachines/NeuralNetwork.h"
P_DECLARE_int32(trainer_id); DECLARE_int32(trainer_id);
namespace paddle { namespace paddle {

@ -21,11 +21,11 @@ limitations under the License. */
#include "NeuralNetwork.h" #include "NeuralNetwork.h"
#include "ParallelNeuralNetwork.h" #include "ParallelNeuralNetwork.h"
P_DEFINE_bool(allow_only_one_model_on_one_gpu, DEFINE_bool(allow_only_one_model_on_one_gpu,
true, true,
"If true, do not allow multiple models on one GPU device"); "If true, do not allow multiple models on one GPU device");
#ifdef PADDLE_METRIC_LEARNING #ifdef PADDLE_METRIC_LEARNING
P_DECLARE_bool(external); DECLARE_bool(external);
#endif #endif
namespace paddle { namespace paddle {

@ -24,7 +24,7 @@ limitations under the License. */
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
#include "paddle/utils/Util.h" #include "paddle/utils/Util.h"
P_DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so"); DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so");
static const char* DIY_CALC_PROB_SYMBOL_NAME = "calc_prob"; static const char* DIY_CALC_PROB_SYMBOL_NAME = "calc_prob";
static const char* DIY_START_CALC_PROB_SYMBOL_NAME = "start_calc_prob"; static const char* DIY_START_CALC_PROB_SYMBOL_NAME = "start_calc_prob";

@ -54,7 +54,7 @@ void DataLayer::copyDataToOutput(Argument& output) {
output.setFrameWidth(config_.width()); output.setFrameWidth(config_.width());
} else { } else {
output.setFrameHeight(data_.getFrameHeight()); output.setFrameHeight(data_.getFrameHeight());
output.setFrameHeight(data_.getFrameHeight()); output.setFrameWidth(data_.getFrameWidth());
} }
output.cpuSequenceDims = data_.cpuSequenceDims; output.cpuSequenceDims = data_.cpuSequenceDims;
output.sequenceStartPositions = data_.sequenceStartPositions; output.sequenceStartPositions = data_.sequenceStartPositions;

@ -33,7 +33,7 @@ limitations under the License. */
#include "TransLayer.h" #include "TransLayer.h"
#include "ValidationLayer.h" #include "ValidationLayer.h"
P_DEFINE_bool(log_error_clipping, false, "enable log error clipping or not"); DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
namespace paddle { namespace paddle {

@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
P_DECLARE_bool(prev_batch_state); DECLARE_bool(prev_batch_state);
namespace paddle { namespace paddle {

@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h" #include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Stat.h" #include "paddle/utils/Stat.h"
P_DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation."); DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation.");
namespace paddle { namespace paddle {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save