diff --git a/CMakeLists.txt b/CMakeLists.txt index b309ff37e5..5df83499d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,8 +16,6 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake") set(PADDLE_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) -SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") -SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") include(system) @@ -201,6 +199,10 @@ if(WITH_GOLANG) endif(WITH_GOLANG) set(PADDLE_PYTHON_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/python/build") + +SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") +SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") + add_subdirectory(paddle) if(WITH_PYTHON) add_subdirectory(python) diff --git a/README.md b/README.md index db0fbd88b2..577528e7aa 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,8 @@ [![Build Status](https://travis-ci.org/PaddlePaddle/Paddle.svg?branch=develop)](https://travis-ci.org/PaddlePaddle/Paddle) -[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://doc.paddlepaddle.org/develop/doc/) -[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://doc.paddlepaddle.org/develop/doc_cn/) +[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/index_en.html) +[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/index_cn.html) [![Coverage Status](https://coveralls.io/repos/github/PaddlePaddle/Paddle/badge.svg?branch=develop)](https://coveralls.io/github/PaddlePaddle/Paddle?branch=develop) [![Release](https://img.shields.io/github/release/PaddlePaddle/Paddle.svg)](https://github.com/PaddlePaddle/Paddle/releases) [![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) @@ -36,7 +36,7 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl examples: - Optimized math operations through SSE/AVX intrinsics, BLAS libraries - (e.g. MKL, ATLAS, cuBLAS) or customized CPU/GPU kernels. + (e.g. MKL, OpenBLAS, cuBLAS) or customized CPU/GPU kernels. - Highly optimized recurrent networks which can handle **variable-length** sequence without padding. - Optimized local and distributed training for models with high dimensional @@ -61,32 +61,32 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl ## Installation It is recommended to check out the -[Docker installation guide](http://doc.paddlepaddle.org/develop/doc/getstarted/build_and_install/docker_install_en.html) +[Docker installation guide](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/docker_install_en.html) before looking into the -[build from source guide](http://doc.paddlepaddle.org/develop/doc/getstarted/build_and_install/build_from_source_en.html). +[build from source guide](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/build_from_source_en.html). ## Documentation -We provide [English](http://doc.paddlepaddle.org/develop/doc/) and -[Chinese](http://doc.paddlepaddle.org/doc_cn/) documentation. +We provide [English](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/index_en.html) and +[Chinese](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/index_cn.html) documentation. -- [Deep Learning 101](http://book.paddlepaddle.org/index.html) +- [Deep Learning 101](http://www.paddlepaddle.org/docs/develop/book/01.fit_a_line/index.html) You might want to start from this online interactive book that can run in a Jupyter Notebook. -- [Distributed Training](http://doc.paddlepaddle.org/develop/doc/howto/usage/cluster/cluster_train_en.html) +- [Distributed Training](http://www.paddlepaddle.org/docs/develop/documentation/en/howto/usage/cluster/cluster_train_en.html) You can run distributed training jobs on MPI clusters. -- [Distributed Training on Kubernetes](http://doc.paddlepaddle.org/develop/doc/howto/usage/k8s/k8s_en.html) +- [Distributed Training on Kubernetes](http://www.paddlepaddle.org/docs/develop/documentation/en/howto/usage/cluster/k8s_en.html) You can also run distributed training jobs on Kubernetes clusters. -- [Python API](http://doc.paddlepaddle.org/develop/doc/api/index_en.html) +- [Python API](http://www.paddlepaddle.org/docs/develop/documentation/en/api/index_en.html) Our new API enables much shorter programs. -- [How to Contribute](http://doc.paddlepaddle.org/develop/doc/howto/dev/contribute_to_paddle_en.html) +- [How to Contribute](http://www.paddlepaddle.org/docs/develop/documentation/en/howto/dev/contribute_to_paddle_en.html) We appreciate your contributions! diff --git a/benchmark/IntelOptimizedPaddle.md b/benchmark/IntelOptimizedPaddle.md index 8ee7fd28c5..6cc9598947 100644 --- a/benchmark/IntelOptimizedPaddle.md +++ b/benchmark/IntelOptimizedPaddle.md @@ -22,6 +22,7 @@ On each machine, we will test and compare the performance of training on single #### Training Test on batch size 64, 128, 256 on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz +Pay attetion that the speed below includes forward, backward and parameter update time. So we can not directly compare the data with the benchmark of caffe `time` [command](https://github.com/PaddlePaddle/Paddle/blob/develop/benchmark/caffe/image/run.sh#L9), which only contain forward and backward. The updating time of parameter would become very heavy when the weight size are large, especially on alexnet. Input image size - 3 * 224 * 224, Time: images/second @@ -55,6 +56,16 @@ Input image size - 3 * 224 * 224, Time: images/second +- Alexnet + +| BatchSize | 64 | 128 | 256 | +|--------------|--------| ------ | -------| +| OpenBLAS | 2.13 | 2.45 | 2.68 | +| MKLML | 66.37 | 105.60 | 144.04 | +| MKL-DNN | 399.00 | 498.94 | 626.53 | + +chart TBD + #### Inference Test on batch size 1, 2, 4, 8, 16 on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz - VGG-19 diff --git a/benchmark/paddle/image/alexnet.py b/benchmark/paddle/image/alexnet.py index 3358d43a4b..77d130ae34 100644 --- a/benchmark/paddle/image/alexnet.py +++ b/benchmark/paddle/image/alexnet.py @@ -6,8 +6,18 @@ height = 227 width = 227 num_class = 1000 batch_size = get_config_arg('batch_size', int, 128) +gp = get_config_arg('layer_num', int, 1) +is_infer = get_config_arg("is_infer", bool, False) +num_samples = get_config_arg('num_samples', int, 2560) -args = {'height': height, 'width': width, 'color': True, 'num_class': num_class} +args = { + 'height': height, + 'width': width, + 'color': True, + 'num_class': num_class, + 'is_infer': is_infer, + 'num_samples': num_samples +} define_py_data_sources2( "train.list", None, module="provider", obj="process", args=args) @@ -31,7 +41,7 @@ net = img_pool_layer(input=net, pool_size=3, stride=2) # conv2 net = img_conv_layer( - input=net, filter_size=5, num_filters=256, stride=1, padding=2, groups=1) + input=net, filter_size=5, num_filters=256, stride=1, padding=2, groups=gp) net = img_cmrnorm_layer(input=net, size=5, scale=0.0001, power=0.75) net = img_pool_layer(input=net, pool_size=3, stride=2) @@ -40,11 +50,11 @@ net = img_conv_layer( input=net, filter_size=3, num_filters=384, stride=1, padding=1) # conv4 net = img_conv_layer( - input=net, filter_size=3, num_filters=384, stride=1, padding=1, groups=1) + input=net, filter_size=3, num_filters=384, stride=1, padding=1, groups=gp) # conv5 net = img_conv_layer( - input=net, filter_size=3, num_filters=256, stride=1, padding=1, groups=1) + input=net, filter_size=3, num_filters=256, stride=1, padding=1, groups=gp) net = img_pool_layer(input=net, pool_size=3, stride=2) net = fc_layer( @@ -59,6 +69,9 @@ net = fc_layer( layer_attr=ExtraAttr(drop_rate=0.5)) net = fc_layer(input=net, size=1000, act=SoftmaxActivation()) -lab = data_layer('label', num_class) -loss = cross_entropy(input=net, label=lab) -outputs(loss) +if is_infer: + outputs(net) +else: + lab = data_layer('label', num_class) + loss = cross_entropy(input=net, label=lab) + outputs(loss) diff --git a/benchmark/paddle/image/googlenet.py b/benchmark/paddle/image/googlenet.py index 7059c13bd2..2a850ccb7f 100644 --- a/benchmark/paddle/image/googlenet.py +++ b/benchmark/paddle/image/googlenet.py @@ -7,13 +7,15 @@ num_class = 1000 batch_size = get_config_arg('batch_size', int, 128) use_gpu = get_config_arg('use_gpu', bool, True) is_infer = get_config_arg("is_infer", bool, False) +num_samples = get_config_arg('num_samples', int, 2560) args = { 'height': height, 'width': width, 'color': True, 'num_class': num_class, - 'is_infer': is_infer + 'is_infer': is_infer, + 'num_samples': num_samples } define_py_data_sources2( "train.list" if not is_infer else None, diff --git a/benchmark/paddle/image/provider.py b/benchmark/paddle/image/provider.py index 927b175994..1018ec9ce1 100644 --- a/benchmark/paddle/image/provider.py +++ b/benchmark/paddle/image/provider.py @@ -14,6 +14,7 @@ def initHook(settings, height, width, color, num_class, **kwargs): else: settings.data_size = settings.height * settings.width settings.is_infer = kwargs.get('is_infer', False) + settings.num_samples = kwargs.get('num_samples', 2560) if settings.is_infer: settings.slots = [dense_vector(settings.data_size)] else: @@ -23,7 +24,7 @@ def initHook(settings, height, width, color, num_class, **kwargs): @provider( init_hook=initHook, min_pool_size=-1, cache=CacheType.CACHE_PASS_IN_MEM) def process(settings, file_list): - for i in xrange(2560 if settings.is_infer else 1024): + for i in xrange(settings.num_samples): img = np.random.rand(1, settings.data_size).reshape(-1, 1).flatten() if settings.is_infer: yield img.astype('float32') diff --git a/benchmark/paddle/image/resnet.py b/benchmark/paddle/image/resnet.py index 4a14363ff1..2846e4763f 100644 --- a/benchmark/paddle/image/resnet.py +++ b/benchmark/paddle/image/resnet.py @@ -7,13 +7,15 @@ num_class = 1000 batch_size = get_config_arg('batch_size', int, 64) layer_num = get_config_arg("layer_num", int, 50) is_infer = get_config_arg("is_infer", bool, False) +num_samples = get_config_arg('num_samples', int, 2560) args = { 'height': height, 'width': width, 'color': True, 'num_class': num_class, - 'is_infer': is_infer + 'is_infer': is_infer, + 'num_samples': num_samples } define_py_data_sources2( "train.list" if not is_infer else None, diff --git a/benchmark/paddle/image/run_mkldnn_infer.sh b/benchmark/paddle/image/run_mkl_infer.sh similarity index 95% rename from benchmark/paddle/image/run_mkldnn_infer.sh rename to benchmark/paddle/image/run_mkl_infer.sh index d795bcab1b..62c9bf6efd 100755 --- a/benchmark/paddle/image/run_mkldnn_infer.sh +++ b/benchmark/paddle/image/run_mkl_infer.sh @@ -37,7 +37,7 @@ function infer() { --trainer_count=1 \ --num_passes=1 \ --save_dir="models/${topology}-${layer_num}" \ - --config_args="batch_size=128,layer_num=${layer_num}" \ + --config_args="batch_size=128,layer_num=${layer_num},num_samples=256" \ > /dev/null 2>&1 echo "Done" fi @@ -79,8 +79,9 @@ fi # inference benchmark for use_mkldnn in True False; do for batchsize in 1 2 4 8 16; do - infer googlenet v1 $batchsize $use_mkldnn - infer resnet 50 $batchsize $use_mkldnn infer vgg 19 $batchsize $use_mkldnn + infer resnet 50 $batchsize $use_mkldnn + infer googlenet v1 $batchsize $use_mkldnn + infer alexnet 2 $batchsize $use_mkldnn done done diff --git a/benchmark/paddle/image/run_mkldnn_train.sh b/benchmark/paddle/image/run_mkl_train.sh similarity index 83% rename from benchmark/paddle/image/run_mkldnn_train.sh rename to benchmark/paddle/image/run_mkl_train.sh index 320206239a..03d2d378fb 100755 --- a/benchmark/paddle/image/run_mkldnn_train.sh +++ b/benchmark/paddle/image/run_mkl_train.sh @@ -28,6 +28,10 @@ function train() { --test_period=100 \ --config_args=$args \ 2>&1 | tee ${log} + + avg_time=`tail ${log} -n 1 | awk -F ' ' '{print $8}' | sed 's/avg=//'` + fps=`awk 'BEGIN{printf "%.2f",('$bs' / '$avg_time' * 1000)}'` + echo "FPS: $fps images/sec" 2>&1 | tee -a ${log} } if [ ! -f "train.list" ]; then @@ -43,5 +47,6 @@ for use_mkldnn in True False; do train vgg 19 $batchsize $use_mkldnn train resnet 50 $batchsize $use_mkldnn train googlenet v1 $batchsize $use_mkldnn + train alexnet 2 $batchsize $use_mkldnn done done diff --git a/benchmark/paddle/image/run_openblas_infer.sh b/benchmark/paddle/image/run_openblas_infer.sh new file mode 100755 index 0000000000..da034f3b9d --- /dev/null +++ b/benchmark/paddle/image/run_openblas_infer.sh @@ -0,0 +1,64 @@ +set -e + +function clock_to_seconds() { + hours=`echo $1 | awk -F ':' '{print $1}'` + mins=`echo $1 | awk -F ':' '{print $2}'` + secs=`echo $1 | awk -F ':' '{print $3}'` + echo `awk 'BEGIN{printf "%.2f",('$secs' + '$mins' * 60 + '$hours' * 3600)}'` +} + +function infer() { + unset OMP_NUM_THREADS MKL_NUM_THREADS OMP_DYNAMIC KMP_AFFINITY + topology=$1 + layer_num=$2 + bs=$3 + thread=`nproc` + if [ $thread -gt $bs ]; then + thread=$bs + fi + log="logs/infer-${topology}-${layer_num}-${thread}openblas-${bs}.log" + + models_in="models/${topology}-${layer_num}/pass-00000/" + if [ ! -d $models_in ]; then + echo "./run_mkl_infer.sh to save the model first" + exit 0 + fi + log_period=$((32 / bs)) + paddle train --job=test \ + --config="${topology}.py" \ + --use_mkldnn=False \ + --use_gpu=False \ + --trainer_count=$thread \ + --log_period=$log_period \ + --config_args="batch_size=${bs},layer_num=${layer_num},is_infer=True,num_samples=256" \ + --init_model_path=$models_in \ + 2>&1 | tee ${log} + + # calculate the last 5 logs period time of 160(=32*5) samples, + # the time before are burning time. + start=`tail ${log} -n 7 | head -n 1 | awk -F ' ' '{print $2}' | xargs` + end=`tail ${log} -n 2 | head -n 1 | awk -F ' ' '{print $2}' | xargs` + start_sec=`clock_to_seconds $start` + end_sec=`clock_to_seconds $end` + fps=`awk 'BEGIN{printf "%.2f",(160 / ('$end_sec' - '$start_sec'))}'` + echo "Last 160 samples start: ${start}(${start_sec} sec), end: ${end}(${end_sec} sec;" >> ${log} + echo "FPS: $fps images/sec" 2>&1 | tee -a ${log} +} + +if [ ! -f "train.list" ]; then + echo " " > train.list +fi +if [ ! -f "test.list" ]; then + echo " " > test.list +fi +if [ ! -d "logs" ]; then + mkdir logs +fi + +# inference benchmark +for batchsize in 1 2 4 8 16; do + infer vgg 19 $batchsize + infer resnet 50 $batchsize + infer googlenet v1 $batchsize + infer alexnet 2 $batchsize +done diff --git a/benchmark/paddle/image/run_openblas_train.sh b/benchmark/paddle/image/run_openblas_train.sh new file mode 100755 index 0000000000..e9df83fee2 --- /dev/null +++ b/benchmark/paddle/image/run_openblas_train.sh @@ -0,0 +1,41 @@ +set -e + +function train() { + unset OMP_NUM_THREADS MKL_NUM_THREADS OMP_DYNAMIC KMP_AFFINITY + topology=$1 + layer_num=$2 + bs=$3 + thread=`nproc` + # each trainer_count use only 1 core to avoid conflict + log="logs/train-${topology}-${layer_num}-${thread}openblas-${bs}.log" + args="batch_size=${bs},layer_num=${layer_num}" + config="${topology}.py" + paddle train --job=time \ + --config=$config \ + --use_mkldnn=False \ + --use_gpu=False \ + --trainer_count=$thread \ + --log_period=3 \ + --test_period=30 \ + --config_args=$args \ + 2>&1 | tee ${log} + + avg_time=`tail ${log} -n 1 | awk -F ' ' '{print $8}' | sed 's/avg=//'` + fps=`awk 'BEGIN{printf "%.2f",('$bs' / '$avg_time' * 1000)}'` + echo "FPS: $fps images/sec" 2>&1 | tee -a ${log} +} + +if [ ! -f "train.list" ]; then + echo " " > train.list +fi +if [ ! -d "logs" ]; then + mkdir logs +fi + +# training benchmark +for batchsize in 64 128 256; do + train vgg 19 $batchsize + train resnet 50 $batchsize + train googlenet v1 $batchsize + train alexnet 2 $batchsize +done diff --git a/benchmark/paddle/image/vgg.py b/benchmark/paddle/image/vgg.py index 8d0a1e97a4..ca0a6798fb 100644 --- a/benchmark/paddle/image/vgg.py +++ b/benchmark/paddle/image/vgg.py @@ -7,13 +7,15 @@ num_class = 1000 batch_size = get_config_arg('batch_size', int, 64) layer_num = get_config_arg('layer_num', int, 19) is_infer = get_config_arg("is_infer", bool, False) +num_samples = get_config_arg('num_samples', int, 2560) args = { 'height': height, 'width': width, 'color': True, 'num_class': num_class, - 'is_infer': is_infer + 'is_infer': is_infer, + 'num_samples': num_samples } define_py_data_sources2( "train.list" if not is_infer else None, diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 13294c0548..6320b17520 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -3,7 +3,7 @@ # It will search MKLML, atlas, OpenBlas, reference-cblas in order. # # If any cblas implementation found, the following variable will be set. -# CBLAS_PROVIDER # one of MKLML, ATLAS, OPENBLAS, REFERENCE +# CBLAS_PROVIDER # one of MKLML, OPENBLAS, REFERENCE # CBLAS_INC_DIR # the include directory for cblas. # CBLAS_LIBS # a list of libraries should be linked by paddle. # # Each library should be full path to object file. @@ -25,42 +25,6 @@ if(WITH_MKLML AND MKLML_INC_DIR AND MKLML_LIB) return() endif() -## Then find atlas. -set(ATLAS_ROOT $ENV{ATLAS_ROOT} CACHE PATH "Folder contains Atlas") -set(ATLAS_INCLUDE_SEARCH_PATHS - ${ATLAS_ROOT}/include - /usr/include - /usr/include/atlas) -set(ATLAS_LIB_SEARCH_PATHS - ${ATLAS_ROOT}/lib - /usr/lib - /usr/lib/blas/atlas - /usr/lib/atlas - /usr/lib/atlas-base # special for ubuntu 14.04. - ) -find_path(ATLAS_INC_DIR NAMES cblas.h - PATHS ${ATLAS_INCLUDE_SEARCH_PATHS}) -find_path(ATLAS_CLAPACK_INC_DIR NAMES clapack.h - PATHS ${ATLAS_INCLUDE_SEARCH_PATHS}) -find_library(ATLAS_CBLAS_LIB NAMES cblas libcblas.so.3 - PATHS ${ATLAS_LIB_SEARCH_PATHS}) -find_library(ATLAS_CLAPACK_LIB NAMES lapack_atlas liblapack_atlas.so.3 - PATHS ${ATLAS_LIB_SEARCH_PATHS}) - -if(ATLAS_CLAPACK_INC_DIR AND ATLAS_INC_DIR AND ATLAS_CBLAS_LIB AND ATLAS_CLAPACK_LIB) - set(CBLAS_FOUND ON) - set(CBLAS_PROVIDER ATLAS) - set(CBLAS_INC_DIR ${ATLAS_INC_DIR} ${ATLAS_CLAPACK_INC_DIR}) - set(CBLAS_LIBRARIES ${ATLAS_CLAPACK_LIB} ${ATLAS_CBLAS_LIB}) - - add_definitions(-DPADDLE_USE_ATLAS) - add_definitions(-DLAPACK_FOUND) - - message(STATUS "Found ATLAS (include: ${ATLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") - message(STATUS "Found lapack in ATLAS (include: ${ATLAS_CLAPACK_INC_DIR})") - return() -endif() - ## Then find openblas. set(OPENBLAS_ROOT $ENV{OPENBLAS_ROOT} CACHE PATH "Folder contains Openblas") set(OPENBLAS_INCLUDE_SEARCH_PATHS diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index fab2af362b..ff5855052d 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -253,9 +253,9 @@ IF(NOT PROTOBUF_FOUND) IF(WITH_C_API) INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf) IF(ANDROID) - INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI}) + INSTALL(FILES ${PROTOBUF_LITE_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI}) ELSE() - INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib) + INSTALL(FILES ${PROTOBUF_LITE_LIBRARY} DESTINATION third_party/protobuf/lib) ENDIF() ENDIF() diff --git a/doc/api/index_cn.rst b/doc/api/index_cn.rst index 9be0b370ee..84f9097a6c 100644 --- a/doc/api/index_cn.rst +++ b/doc/api/index_cn.rst @@ -7,3 +7,4 @@ API 模型配置 数据访问 训练与应用 + v2/fluid.rst diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index c3f9c18d06..d81481ca81 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -467,7 +467,7 @@ lambda_cost :noindex: square_error_cost --------- +----------------- .. autoclass:: paddle.v2.layer.square_error_cost :noindex: @@ -533,7 +533,7 @@ Miscs ===== dropout --------------- +-------- .. autoclass:: paddle.v2.layer.dropout :noindex: diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index 89e5fec13b..939731c0f3 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -19,17 +19,17 @@ dynamic_lstm :noindex: data ---------- +---- .. autofunction:: paddle.v2.fluid.layers.data :noindex: mean ---------- +---- .. autofunction:: paddle.v2.fluid.layers.mean :noindex: mul ---------- +--- .. autofunction:: paddle.v2.fluid.layers.mul :noindex: @@ -45,13 +45,13 @@ elementwise_div dropout ---------- +------- .. autofunction:: paddle.v2.fluid.layers.dropout :noindex: reshape ---------- +-------- .. autofunction:: paddle.v2.fluid.layers.reshape :noindex: @@ -81,67 +81,67 @@ transpose sigmoid_cross_entropy_with_logits ---------- +--------------------------------- .. autofunction:: paddle.v2.fluid.layers.esigmoid_cross_entropy_with_logits :noindex: cast ---------- +---- .. autofunction:: paddle.v2.fluid.layers.cast :noindex: concat ---------- +------- .. autofunction:: paddle.v2.fluid.layers.concat :noindex: sums ---------- +---- .. autofunction:: paddle.v2.fluid.layers.sums :noindex: linear_chain_crf ---------- +---------------- .. autofunction:: paddle.v2.fluid.layers.linear_chain_crf :noindex: assign ---------- +------- .. autofunction:: paddle.v2.fluid.layers.embedding :noindex: split_lod_tensor ---------- +---------------- .. autofunction:: paddle.v2.fluid.layers.split_lod_tensor :noindex: merge_lod_tensor ---------- +---------------- .. autofunction:: paddle.v2.fluid.layers.merge_lod_tensor :noindex: cos_sim ---------- +-------- .. autofunction:: paddle.v2.fluid.layers.cos_sim :noindex: cross_entropy ---------- +------------- .. autofunction:: paddle.v2.fluid.layers.cross_entropy :noindex: square_error_cost ---------- +----------------- .. autofunction:: paddle.v2.fluid.layers.square_error_cost :noindex: @@ -153,74 +153,80 @@ accuracy sequence_conv ---------- +------------- .. autofunction:: paddle.v2.fluid.layers.sequence_conv :noindex: conv2d ---------- +------ .. autofunction:: paddle.v2.fluid.layers.conv2d :noindex: sequence_pool ---------- +------------- .. autofunction:: paddle.v2.fluid.layers.sequence_pool :noindex: +sequence_first_step +------------------- +.. autofunction:: paddle.v2.fluid.layers.sequence_first_step + :noindex: + + +sequence_last_step +------------------ +.. autofunction:: paddle.v2.fluid.layers.sequence_last_step + :noindex: + + pool2d ---------- +------ .. autofunction:: paddle.v2.fluid.layers.pool2d :noindex: batch_norm ---------- +---------- .. autofunction:: paddle.v2.fluid.layers.batch_norm :noindex: beam_search_decode ---------- +------------------ .. autofunction:: paddle.v2.fluid.layers.beam_search_decode :noindex: -lstm ---------- -.. autofunction:: paddle.v2.fluid.layers.lstm - :noindex: - - lod_rank_table ---------- +-------------- .. autofunction:: paddle.v2.fluid.layers.lod_rank_table :noindex: max_sequence_len ---------- +---------------- .. autofunction:: paddle.v2.fluid.layers.max_sequence_len :noindex: topk ---------- +----- .. autofunction:: paddle.v2.fluid.layers.topk :noindex: lod_tensor_to_array ---------- +------------------- .. autofunction:: paddle.v2.fluid.layers.lod_tensor_to_array :noindex: array_to_lod_tensor ---------- +------------------- .. autofunction:: paddle.v2.fluid.layers.array_to_lod_tensor :noindex: @@ -228,26 +234,26 @@ array_to_lod_tensor fill_constant ---------- +------------- .. autofunction:: paddle.v2.fluid.layers.fill_constant :noindex: fill_constant_batch_size_like ---------- +----------------------------- .. autofunction:: paddle.v2.fluid.layers.fill_constant_batch_size_like :noindex: ones ---------- +---- .. autofunction:: paddle.v2.fluid.layers.ones :noindex: zeros ---------- +----- .. autofunction:: paddle.v2.fluid.layers.zeros :noindex: @@ -259,14 +265,14 @@ increment array_write ---------- +----------- .. autofunction:: paddle.v2.fluid.layers.array_write :noindex: create_array ---------- +------------ .. autofunction:: paddle.v2.fluid.layers.create_array :noindex: @@ -278,25 +284,67 @@ less_than array_read ---------- +---------- .. autofunction:: paddle.v2.fluid.layers.array_read :noindex: shrink_memory ---------- +-------------- .. autofunction:: paddle.v2.fluid.layers.shrink_memory :noindex: array_length ---------- +------------- .. autofunction:: paddle.v2.fluid.layers.array_length :noindex: conv2d_transpose ---------- +---------------- .. autofunction:: paddle.v2.fluid.layers.conv2d_transpose :noindex: + +sequence_expand +--------------- +.. autofunction:: paddle.v2.fluid.layers.sequence_expand + :noindex: + + +lstm_unit +--------- +.. autofunction:: paddle.v2.fluid.layers.lstm_unit + :noindex: + + +sequence_softmax +---------------- +.. autofunction:: paddle.v2.fluid.layers.sequence_softmax + :noindex: + + +reduce_sum +---------- +.. autofunction:: paddle.v2.fluid.layers.reduce_sum + :noindex: + + +reduce_mean +----------- +.. autofunction:: paddle.v2.fluid.layers.reduce_mean + :noindex: + + +reduce_max +---------- +.. autofunction:: paddle.v2.fluid.layers.reduce_max + :noindex: + + +reduce_min +---------- +.. autofunction:: paddle.v2.fluid.layers.reduce_min + :noindex: + diff --git a/doc/api/v2/fluid/nets.rst b/doc/api/v2/fluid/nets.rst index 2c3d075422..b792efb71f 100644 --- a/doc/api/v2/fluid/nets.rst +++ b/doc/api/v2/fluid/nets.rst @@ -3,19 +3,19 @@ Nets =========== simple_img_conv_pool ------------ +-------------------- .. autofunction:: paddle.v2.fluid.nets.simple_img_conv_pool :noindex: img_conv_group ------------ +--------------- .. autofunction:: paddle.v2.fluid.nets.img_conv_group :noindex: sequence_conv_pool ------------ +------------------ .. autofunction:: paddle.v2.fluid.nets.sequence_conv_pool :noindex: diff --git a/doc/api/v2/fluid/optimizer.rst b/doc/api/v2/fluid/optimizer.rst index 233762fcdf..19b4940f08 100644 --- a/doc/api/v2/fluid/optimizer.rst +++ b/doc/api/v2/fluid/optimizer.rst @@ -18,7 +18,7 @@ SGDOptimizer MomentumOptimizer ------------ +----------------- .. automodule:: paddle.v2.fluid.optimizer :members: MomentumOptimizer :noindex: @@ -26,14 +26,14 @@ MomentumOptimizer AdagradOptimizer ------------ +---------------- .. automodule:: paddle.v2.fluid.optimizer :members: AdagradOptimizer :noindex: AdamOptimizer ------------ +------------- .. automodule:: paddle.v2.fluid.optimizer :members: AdamOptimizer :noindex: @@ -47,7 +47,7 @@ AdamaxOptimizer DecayedAdagradOptimizer ------------ +----------------------- .. automodule:: paddle.v2.fluid.optimizer :members: DecayedAdagradOptimizer :noindex: diff --git a/doc/api/v2/fluid/regularizer.rst b/doc/api/v2/fluid/regularizer.rst index 3af2b07d2a..868e225ed3 100644 --- a/doc/api/v2/fluid/regularizer.rst +++ b/doc/api/v2/fluid/regularizer.rst @@ -3,14 +3,14 @@ Regularizer =========== WeightDecayRegularizer ------------ +---------------------- .. automodule:: paddle.v2.fluid.regularizer :members: WeightDecayRegularizer :noindex: L2DecayRegularizer ------------ +------------------ .. automodule:: paddle.v2.fluid.regularizer :members: L2DecayRegularizer :noindex: @@ -18,7 +18,7 @@ L2DecayRegularizer L1DecayRegularizer ------------ +------------------- .. automodule:: paddle.v2.fluid.regularizer :members: L1DecayRegularizer diff --git a/doc/design/block.md b/doc/design/block.md index 4066122c0e..fab7f2dc48 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -291,10 +291,10 @@ public: } void Run(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const override { + const platform::Place& place) const override { PADDLE_ENFORCE(symbols_ready_, "operators and variables should be created first."); for (auto& op : runtime_table_.ops()) { - op->Run(scope, dev_ctx); + op->Run(scope, place); } } diff --git a/doc/design/executor.md b/doc/design/executor.md index b5fb6c5c3c..2d4b371cc5 100644 --- a/doc/design/executor.md +++ b/doc/design/executor.md @@ -1,23 +1,29 @@ # Executor Design Doc ## Motivation +In [fluid](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/fluid.md), we encourage the user to use deep learning programming paradigms to describe the training process. When the user-written Python program is executed, it will first create a protobuf message +[`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/paddle/framework/framework.proto#L145) that describes the process and is conceptually like an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). -We use executor to do the runtime evaluation of a `ProgramDesc`. +The executor runs the `ProgramDesc` like an interpreter. `ProgramDesc` contains the intrinsics (operators in this case) and variables which will be used, executor explicitly executes the stored precompiled code. ## Overview -An executor takes a `ProgramDesc`, a `block_id` and a `Scope`. The `ProgramDesc` is a list of blocks and each block contains the protobuf definition of all the parameters and operators. The `block_id` specifies the entrance block. And the `Scope` is the container of all the variable instance, which is persistent throughout different runs. +An executor takes a `ProgramDesc`, a `block_id` and a `Scope`. The `ProgramDesc` is a list of blocks and each block contains the protobuf definition of all the parameters and operators in the block. The `block_id` specifies the entrance block. And the `Scope` is the container of all the variable instances, which is persistent throughout different runs. -### What does executor do? +## Executor -It evaluates all the operators in the `block_id`th block of a `ProgramDesc`. +The `Executor` explicitly executes all the intrinsics (operators here) in the `block_id`th block of a `ProgramDesc`. Essentially, it instantiates Variables and Operators, then runs all the operators in sequence one-by-one. +It is very similar to how a push stack frame works when entering a block, following which it cleans up all the temporary variables when a mini-batch is finished. It does not however, have the stack frame pop process. -### What does executor NOT do? +### The interface +```c++ + Executor(places); +``` +A executor does not own any computing resources, a user can only construct an executor using the specified places. -It does not do runtime optimization, meaning intelligently parse the dependency of each op a choose which one to be run and in which order they should be run. +### Running an Executor -It does not do graph partitioning, meaning dividing the `ProgramDesc` into several small pieces and executing them on different devices. - -## Implementation - -`Executor` evaluates a `ProgramDesc`. Essentially, it instantiates Variables and Operators, then run all the operators in sequence. [[code]](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc) +``` + void Run(ProgramDesc, Scope, block_id, create_local_scope); +``` +An `Executor` only provides a unified way to execute `ProgramDesc`. `ProgramDesc` is the target that will be executed, the `Scope` specifies the variable container, the `block_id` indicates the entrance block and `create_local_scope` is a boolean that states whether it will destroy the temporary variables after the execution is finished. diff --git a/doc/design/fluid-compiler.graffle b/doc/design/fluid-compiler.graffle new file mode 100644 index 0000000000..c933df2cb8 Binary files /dev/null and b/doc/design/fluid-compiler.graffle differ diff --git a/doc/design/fluid-compiler.png b/doc/design/fluid-compiler.png new file mode 100644 index 0000000000..1b0ffed203 Binary files /dev/null and b/doc/design/fluid-compiler.png differ diff --git a/doc/design/fluid.md b/doc/design/fluid.md new file mode 100644 index 0000000000..585dc8ef39 --- /dev/null +++ b/doc/design/fluid.md @@ -0,0 +1,122 @@ +# Design Doc: PaddlePaddle Fluid + +## Why Fluid + +When Baidu developed PaddlePaddle in 2013, the only well-known open source deep learning system at the time was Caffe. However, when PaddlePaddle was open-sourced in 2016, many other choices were available. There was a challenge -- what is the need for open sourcing yet another deep learning framework? + +Fluid is the answer. Fluid is similar to PyTorch and TensorFlow Eager Execution, which describes the "process" of training or inference using the concept of a model. In fact in PyTorch, TensorFlow Eager Execution and Fluid, there is no concept of a model at all. The details are covered in the sections below. Fluid is currently more extreme in the above mentioned idea than PyTorch and Eager Execution, and we are trying to push Fluid towards the directions of a compiler and a new programming language for deep learning. + +## The Evolution of Deep Learning Systems + +Deep learning infrastructure is one of the fastest evolving technologies. Within four years, there have already been three generations of technologies invented. + +| Existed since | model as sequence of layers | model as graph of operators | No model | +|--|--|--|--| +| 2013 | Caffe, Theano, Torch, PaddlePaddle | | | +| 2015 | | TensorFlow, MxNet, Caffe2, ONNX, n-graph | | +| 2016 | | | PyTorch, TensorFlow Eager Execution, PaddlePaddle Fluid | + +From the above table, we see that the deep learning technology is evolving towards getting rid of the concept of a model. To understand the reasons behind this direction, a comparison of the *programming paradigms* or the ways to program deep learning applications using these systems, would be helpful. The following section goes over these. + +## Deep Learning Programming Paradigms + +With the systems listed as the first or second generation, e.g., Caffe or TensorFlow, an AI application training program looks like the following: + +```python +x = layer.data("image") +l = layer.data("label") +f = layer.fc(x, W) +s = layer.softmax(f) +c = layer.mse(l, s) + +for i in xrange(1000): # train for 1000 iterations + m = read_minibatch() + forward({input=x, data=m}, minimize=c) + backward(...) + +print W # print the trained model parameters. +``` + +The above program includes two parts: + +1. The first part describes the model, and +2. The second part describes the training process (or inference process) for the model. + +This paradigm has a well-known problem that limits the productivity of programmers. If the programmer made a mistake in configuring the model, the error messages wouldn't show up until the second part is executed and `forward` and `backward` propagations are performed. This makes it difficult for the programmer to debug and locate a mistake that is located blocks away from the actual error prompt. + +This problem of being hard to debug and re-iterate fast on a program is the primary reason that programmers, in general, prefer PyTorch over the older systems. Using PyTorch, we would write the above program as following: + +```python +W = tensor(...) + +for i in xrange(1000): # train for 1000 iterations + m = read_minibatch() + x = m["image"] + l = m["label"] + f = layer.fc(x, W) + s = layer.softmax(f) + c = layer.mse(l, s) + backward() + +print W # print the trained model parameters. +``` + +We can see that the main difference is the moving the model configuration part (the first step) into the training loop. This change would allow the mistakes in model configuration to be reported where they actually appear in the programming block. This change also represents the model better, or its forward pass, by keeping the configuration process in the training loop. + +## Describe Arbitrary Models for the Future + +Describing the process instead of the model also brings Fluid, the flexibility to define different non-standard models that haven't been invented yet. + +As we write out the program for the process, we can write an RNN as a loop, instead of an RNN as a layer or as an operator. A PyTorch example would look like the following: + +```python +for i in xrange(1000): + m = read_minibatch() + x = m["sentence"] + for t in xrange x.len(): + h[t] = the_step(x[t]) +``` + +With Fluid, the training loop and the RNN in the above program are not really Python loops, but just a "loop structure" provided by Fluid and implemented in C++ as the following: + +```python +train_loop = layers.While(cond) +with train_loop.block(): + m = read_minibatch() + x = m["sentence"] + rnn = layers.While(...) + with rnn.block(): + h[t] = the_step(input[t]) +``` + +An actual Fluid example is described [here](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/python/paddle/v2/fluid/tests/test_while_op.py#L36-L44). + +From the example, the Fluid programs look very similar to their PyTorch equivalent programs, except that Fluid's loop structure, wrapped with Python's `with` statement, could run much faster than just a Python loop. + +We have more examples of the [`if-then-else`](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/if_else_op.md) structure of Fluid. + +## Turing Completeness + +In computability theory, a system of data-manipulation rules, such as a programming language, is said to be Turing complete if it can be used to simulate any Turing machine. For a programming language, if it provides if-then-else and loop, it is Turing complete. From the above examples, Fluid seems to be Turing complete; however, it is noteworthy to notice that there is a slight difference between the `if-then-else` of Fluid and that of a programming language. The difference being that the former runs both of its branches and splits the input mini-batch into two -- one for the True condition and another for the False condition. This hasn't been researched in depth if this is equivalent to the `if-then-else` in programming languages that makes them Turing-complete. Based on a conversation with [Yuang Yu](https://research.google.com/pubs/104812.html), it seems to be the case but this needs to be looked into in-depth. + +## The Execution of a Fluid Program + +There are two ways to execute a Fluid program. When a program is executed, it creates a protobuf message [`ProgramDesc`](https://github.com/PaddlePaddle/Paddle/blob/a91efdde6910ce92a78e3aa7157412c4c88d9ee8/paddle/framework/framework.proto#L145) that describes the process and is conceptually like an [abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree). + +There is a C++ class [`Executor`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.h), which runs a `ProgramDesc`, similar to how an interpreter runs a Python program. + +Fluid is moving towards the direction of a compiler, which is explain in more detail later in this article. + +## Backward Compatibility of Fluid + +Given all the advantages from the removal of the concept of a *model*, hardware manufacturers might still prefer the existence of the concept of a model, so it would be easier for them to support multiple frameworks all at once and could run a trained model during inference. For example, Nervana, a startup company acquired by Intel, has been working on an XPU that reads the models in the format known as [n-graph](https://github.com/NervanaSystems/ngraph). Similarly, [Movidius](https://www.movidius.com/) is producing a mobile deep learning chip that reads and runs graphs of operators. The well-known [ONNX](https://github.com/onnx/onnx) is also a file format of graphs of operators. + +For Fluid, we can write a converter that extracts the parts in the `ProgramDesc` protobuf message, converts them into a graph of operators, and exports the graph into the ONNX or n-graph format. + +## Towards a Deep Learning Language and the Compiler + +We can change the `if-then-else` and loop structure a little bit in the above Fluid example programs, to make it into a new programming language, different than Python. + +Even if we do not invent a new language, as long as we get the `ProgramDesc` message filled in, we can write a transpiler, which translates each invocation to an operator, into a C++ call to a kernel function of that operator. For example, a transpiler that weaves the CUDA kernels outputs an NVIDIA-friendly C++ program, which can be built using `nvcc`. Another transpiler could generate MKL-friendly code that should be built using `icc` from Intel. More interestingly, we can translate a Fluid program into its distributed version of two `ProgramDesc` messages, one for running on the trainer process, and the other one for the parameter server. For more details of the last example, the [concurrent programming design](concurrent_programming.md) document would be a good pointer. The following figure explains the proposed two-stage process: + +![](fluid-compiler.png) diff --git a/doc/design/images/multigpu_allreduce.graffle b/doc/design/images/multigpu_allreduce.graffle new file mode 100644 index 0000000000..cb5bc420ce Binary files /dev/null and b/doc/design/images/multigpu_allreduce.graffle differ diff --git a/doc/design/images/multigpu_allreduce.png b/doc/design/images/multigpu_allreduce.png new file mode 100644 index 0000000000..87a1b3e8f6 Binary files /dev/null and b/doc/design/images/multigpu_allreduce.png differ diff --git a/doc/design/images/multigpu_before_convert.graffle b/doc/design/images/multigpu_before_convert.graffle new file mode 100644 index 0000000000..6c35ab1b21 Binary files /dev/null and b/doc/design/images/multigpu_before_convert.graffle differ diff --git a/doc/design/images/multigpu_before_convert.png b/doc/design/images/multigpu_before_convert.png new file mode 100644 index 0000000000..9c8f771116 Binary files /dev/null and b/doc/design/images/multigpu_before_convert.png differ diff --git a/doc/design/kernel_hint_design.md b/doc/design/kernel_hint_design.md new file mode 100644 index 0000000000..a54b7da045 --- /dev/null +++ b/doc/design/kernel_hint_design.md @@ -0,0 +1,57 @@ +## Problem +In PaddlePaddle's [Design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/switch_kernel.md), one Operator may have multiple kernels. Users may have some personal preference to choose a certain type of kernel for an operator, such as `force_cpu` to choose a CPU kernel, `use_cudnn` to choose a CUDNN kernel, we need to provide a way for users to do this. + +In the current design, we use KernelType to describe one kernel. + +```cpp +struct KernelType { + Place place_; + DataType data_type_; + LayoutType layout_; +}; +``` + `place_` `data_type_` and `layout_` can be got from the input tensors of the operator, `GetActualKernelType(inputs)` use inputs to infer the proper kernel key that fit the incoming data, but users can not directly configure it. + +The [design](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/switch_kernel.md) also provides a virtual method `GetExpectedKernelType` that user can overload and use to choose the KernelType they want to use. + +So we should send the information user defined in proto to `GetExpectedKernelType` for choosing a kernel. + +The problem is, how should we define and send the information for `GetExpectedKernelType` to use? + +## Solution + +### Potential choice +1. Do nothing, let the user add the information they want to operator‘s attribute and get them inside `GetExpectedKernelType`, this can work properly. But there is a little problem that users may define many kinds of hints for the same purpose, such as `force_cpu`, `use_cpu`, `cpu_kernel` to choose CPU kernel, and `use_cudnn`, `force_cudnn`, `cudnn_kernel` to choose CUDNN kernel. + +2. Pre-define all the needed option and use a single attr key such as `kernel_hint` for the user, this is not so flexible if the user wants to define some more kind of hint. + +### Final choice +To provide enough flexibility while avoiding confusion definition, we can define some global constants for these attribute names, such as `force_cpu`, `use_cudnn`, `use_mkldnn` for a user to choose. + +In C++ + +```cpp +const std::string kForceCPU = "force_cpu"; +const std::string kUseCUDNN = "use_cudnn"; +const std::string kUseMKLDNN = "use_mkldnn"; + +KernelType GetExpectedKernelType() { + if (Attr(kForceCPU)) { + return KernelType(CPUPlace, ...) + } else { + ... + } +} +``` + +In Python code + +```python +FORCE_CPU = core.kForceCPU() + +def xx_layer(..., force_cpu=false): + layer_helper = LayerHelper(...) + layer_helper.append_op( + type="xx", + attr={FORCE_CPU: force_cpu}) +``` diff --git a/doc/design/mkldnn/image/engine.png b/doc/design/mkl/image/engine.png similarity index 100% rename from doc/design/mkldnn/image/engine.png rename to doc/design/mkl/image/engine.png diff --git a/doc/design/mkldnn/image/gradients.png b/doc/design/mkl/image/gradients.png similarity index 100% rename from doc/design/mkldnn/image/gradients.png rename to doc/design/mkl/image/gradients.png diff --git a/doc/design/mkldnn/image/layers.png b/doc/design/mkl/image/layers.png similarity index 100% rename from doc/design/mkldnn/image/layers.png rename to doc/design/mkl/image/layers.png diff --git a/doc/design/mkldnn/image/matrix.png b/doc/design/mkl/image/matrix.png similarity index 100% rename from doc/design/mkldnn/image/matrix.png rename to doc/design/mkl/image/matrix.png diff --git a/doc/design/mkldnn/image/overview.png b/doc/design/mkl/image/overview.png similarity index 100% rename from doc/design/mkldnn/image/overview.png rename to doc/design/mkl/image/overview.png diff --git a/doc/design/mkl/mkl_packed.md b/doc/design/mkl/mkl_packed.md new file mode 100644 index 0000000000..0123315ad4 --- /dev/null +++ b/doc/design/mkl/mkl_packed.md @@ -0,0 +1,108 @@ +# Intel® MKL Packed on PaddlePaddle: Design Doc + + +## Contents + +- [Overview](#overview) +- [Key Points](#key-points) + - [Background](#background) + - [Solution](#solution) +- [Actions](#actions) + - [CMake](#cmake) + - [Layers](#layers) + - [Unit Tests](#unit-tests) + - [Python API](#python-api) + - [Benchmarking](#benchmarking) + + +## Overview +我们计划将 Intel® MKL 中引入的 GEMM Packed APIs\[[1](#references)\] 集成到 PaddlePaddle 中,充分发挥英特尔平台的优势,有效提升PaddlePaddle在英特尔架构上的性能。 +现阶段的优化主要针对 Recurrent Neural Network(以下简称RNN)相关层(包括`RecurrentLayer`, `GatedRecurrentLayer`和`LstmLayer`), 以及 PaddlePaddle V1 API。 + +## Key Points + +### Background +目前PaddlePaddle采用了 Intel® MKL库的[cblas_?gemm](https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm)函数,这个函数本身会在计算前将原数据转换为更适合英特尔平台的内部格式。 + +1. 转换耗时 \ +这一数据格式的转换操作(Packing),在问题本身的计算量比较小的时候,显得相对来说较为耗时。例如在DeepSpeech2 \[[2](#references)\] 的Vanilla RNN部分中,矩阵大小是`batch_size * 2048`。 +2. 转换冗余 \ +由于在现有的某些情况下(例如RNN),多次调用 cblas_?gemm 会使用相同的原数据,因此,每次调用时对原数据的重复Packing便成为了冗余。 + +为了最大程度减少多次调用 cblas_?gemm 在Packing上的耗时,Intel® MKL 引入了以下四个API: + * [cblas_?gemm_alloc](https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm-alloc) + * [cblas_?gemm_pack](https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm-pack) + * [cblas_?gemm_compute](https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm-compute) + * [cblas_?gemm_free](https://software.intel.com/en-us/mkl-developer-reference-c-cblas-gemm-free) + +通过使用这些API,我们可以先完成对原数据的Packing操作,再把已转换为Packed格式的数据传递给那些复用同一数据的gemm_compute函数,从而避免了Packing冗余。 + +### Solution +在RNN的情况下,同一次前向、后向(forward/backward)过程中所有时间步(time step)共享同一个权重(weight)。当只做推断(inference)时,各次前向之间也都使用了相同的权重,没有必要在每次前向中每个时间步的计算时对权重进行重复的Packing操作。 + +我们通过使用新引入的GEMM Packed APIs,在层初始化的时候,先完成对权重的Packing操作,然后在前向,后向时复用已经转换过的权重,并在每次权重更新后,对新的权重进行转换用于下次迭代。 + +* 优化前,对于序列长度(sequence length)为`T`的网络模型(model), `N`次迭代执行的转换次数为: + - `inference`: `N * T` + - `training`: `2 * N * T` +* 优化后,对于同样设置的网络模型,其转换次数减少至: + - `inference`: `1` + - `training`: `2 * N` + +## Actions + +添加的相关文件和目录结构如下: + +```txt +PaddlePaddle/Paddle +├── ... +└── paddle/ + ├── ... + └── gserver/ + ├── ... + ├── layers/ + │ ├── ... + │ ├── MKLPackedRecurrentLayer.* + | ├── MKLPackedGatedRecurrentLayer.* + | ├── MKLPackedLstmLayer.* + | └── MKLPackedGemm.h + └── tests/ + ├── ... + └── test_MKLPacked.cpp +``` + +### CMake +在对应的`CMakeLists.txt`中根据`WITH_MKL`是否打开,来决定是否开启MKL Packed相关功能。 + +### Layers +所有的`MKLPacked*Layer`都继承于PaddlePaddle的基类`Layer`, 并添加头文件 `MKLPackedGemm.h`,该文件对相关GEMM Packed APIs做了封装。 + +### Unit Tests +我们会添加`test_MKLPacked.cpp`用于MKL Packed优化后layer的测试。 +对于每一个新加的RNN layer,我们会对比如下2个方面: +1. 对比优化后layer自身,sequence mode(`rnn_use_batch=false`)与batch mode(`rnn_use_batch=true`)的结果。 +2. 对比优化后layer与相对应的PaddlePaddle原有layer, 在batch mode下的结果。 + +### Python API +计划在`paddle/utils.Flags`中添加`use_mkl_packed`的flag,用于选择是否使用相关功能,并且当编译时`WITH_MKL=ON`的情况下,默认设置为`true`。 + +同时,在`python/paddle/trainer/config_parser.py`中对应的layer处,添加`use_mkl_packed`这个选择,方便用户在Python端选择是否启用这个功能。 + +具体实现方式比如: + +```python +use_mkl_packed = bool(int(g_command_config_args.get("use_mkl_packed", 0))) +if use_mkl_packed: + self.layer_type = mkl_packed_* +``` + +所有相关的`layer_type`会以*mkl_packed_*开头,这些会在`MKLPacked*Layer`注册layer的时候保证,以示区分。 + + +### Benchmarking +会添加相应的脚本用于测试和对比在使用MKL Packed recurrent layers 前后的网络性能。 + +## References +1. [Introducing the new Packed APIs for GEMM](https://software.intel.com/en-us/articles/introducing-the-new-packed-apis-for-gemm) +2. [DeepSpeech2 on PaddlePaddle](https://github.com/PaddlePaddle/DeepSpeech#deepspeech2-on-paddlepaddle) + diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkl/mkldnn.md similarity index 99% rename from doc/design/mkldnn/README.MD rename to doc/design/mkl/mkldnn.md index 61d453de24..e2fe1e6b26 100644 --- a/doc/design/mkldnn/README.MD +++ b/doc/design/mkl/mkldnn.md @@ -208,4 +208,3 @@ if use_mkldnn 但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。 4. MKL-DNN的高性能格式与PaddlePaddle原有的`NCHW`不同(PaddlePaddle中的cuDNN部分使用的也是`NCHW`,所以不存在这个问题)。 所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。 - diff --git a/doc/design/mkl/mkldnn_fluid.md b/doc/design/mkl/mkldnn_fluid.md new file mode 100644 index 0000000000..bef126f3f0 --- /dev/null +++ b/doc/design/mkl/mkldnn_fluid.md @@ -0,0 +1,149 @@ +# Design Doc: Add MKLDNN Kernel in Fluid Operator + +## Principles + +First of all, we should follow some basical principles like: +1. [How to write a new operator](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_en.md). We are trying to add a new kind of kernel into operators, so basically we should follow this doc. +2. [Supporting new Device/Library](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/support_new_device.md). Since MKLDNN is a new library to fluid, we should add `MKLDNNDeviceContext` and maybe `mkldnn_helper.h`, just like [cudnn_helper.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/cudnn_helper.h). +3. [Switch Kernel](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/switch_kernel.md). Another important point is that we should ensure the data synchronization between different kernel types, which is this [topic](https://github.com/PaddlePaddle/Paddle/issues/6549). So basically we should override `GetExpectedKernelType` and `trans` functions to support switching kernels. +4. [The Keys of Operator Kernel Type](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/operator_kernel_type.md). Kernel Type is a pivotal conception which can record the `Place`, `Library`, `DataType` and `Layout`. + +## Sulution + +In general, there are four parts we should follow to run a MKL-DNN primitive. +- Create a primitive descriptor that describe this operator +- Create a primitive itself by primitive descriptor and the engine +- Create all memory buffers that primitive needed +- Launch a stream to execute the primitive created +More details can refer to [here](http://01org.github.io/mkl-dnn). + +It's better to avoid reinitialization of primitives and memory handles in the first three stages in every iteration. \ +So we plan to create a map to record all the `primitive` and `memory`, which should not take too much memories as discussed [here](https://github.com/PaddlePaddle/Paddle/issues/6822). + +It's assumed that following three conditions should be satisfied. +1. there is a unique key for each operator instance. May be the actual name of `Output Tensor`. +2. the `Input Tensor` inside `Compute` function is the one after converted. +3. we can get the phase(eg. `is_test`) inside `Compute` function, otherwise we need to expose this attribue to user. + +### Compute +The algorithm of `Compute` would be described as follow, let's take conv like an example. + +```c++ + + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), "It must use CPUPlace."); + PADDLE_ENFORCE(platform::is_mkldnn_library(ctx.GetLibrary()), "It must use MKLDNN Library."); + + auto& dev_ctx = ctx.template device_context(); + + // find primitive by unique key from mkldnn context + // the op_key should be a unique name of this op instance + auto& p = dev_ctx.findPrimitive(op_key + "_fwd"); + + // assuming the input tensor inside this compute function is the one after converted + // this point should be guarantee by another mechanism + auto& i = dev_ctx.findMemory(op_key + "_input"); + + if (p == nullptr || i == nullptr || inputSizeChanged(p, i)) { + auto fwd_primitive_desc = createPrimitiveDesc(ctx); + auto* input = ctx.Input("Input"); + auto* filter = ctx.Input("Filter"); + auto* output = ctx.Output("Output"); + shared_ptr in(new mkldnn::memory(fwd_primitive_desc->src_primitive_desc(), input->data())); + shared_ptr wgt(new mkldnn::memory(fwd_primitive_desc->weights_primitive_desc(), filter->data())); + shared_ptr out(new mkldnn::memory(fwd_primitive_desc->dst_primitive_desc(), output->mutable_data(ctx.GetPlace()))); + shared_ptr fwd_primitive(new mkldnn::conv_fwd(*fwd_primitive_desc, *in, *wgt, *out)); + + dev_ctx.addMemory(op_key+"_input", in); + dev_ctx.addMemory(op_key+"_output", out); + dev_ctx.addMemory(op_key+"_filer", wgt); + dev_ctx.addPrimitive(op_key+"_fwd", fwd_primitive); + dev_ctx.addPrimitiveDesc(op_key+"_fwd_PD", fwd_primitive_desc); + } + + p = dev_ctx.findPrimitive(op_key + "_fwd"); + + PADDLE_ENFORCE(p, "Should have forward Primitive"); + PADDLE_ENFORCE(dev_ctx.findMemory(op_unique_key+"_input"), "Should have input memory"); + PADDLE_ENFORCE(dev_ctx.findMemory(op_unique_key+"_output"), "Should have output memory"); + PADDLE_ENFORCE(dev_ctx.findMemory(op_unique_key+"_filter"), "Should have filter memory"); + PADDLE_ENFORCE(dev_ctx.findPrimitiveDesc(op_unique_key+"_fwd_PD"), "Should have forward PrimitiveDesc"); + dev_ctx.submit(p); + dev_ctx.execute(); // the convert primitive should have already contained. + +``` + +The `createPrimitiveDesc` returns the primitive descripotor of this operator, would be like this: +```c++ + auto* input = ctx.Input("Input"); + auto* filter = ctx.Input("Filter"); + auto* output = ctx.Output("Output"); + std::vector strides = ctx.Attr>("strides"); + std::vector paddings = ctx.Attr>("paddings"); + std::vector dilations = ctx.Attr>("dilations"); + int groups = ctx.Attr("groups"); + algorithm algo = static_cast(ctx.Attr("convolution_algorithm_option")); + prop_kind pk = ctx.Attr("is_test") ? prop_kind::forward_inference : prop_kind::forward_training; + + auto fwd_desc = mkldnn::conv_fwd::desc(/* all the setting above*/); + shared_ptr fwd_primitive_desc(new mkldnn::conv_fwd::primitive_desc(fwd_desc, ctx.getEngine())); + + return fwd_primitive_desc; + } +``` + +### MKLDNNDeviceContext +`MKLDNNDeviceContext`, which is very straightforward, should contain some base information like: `stream`, `engine` and the map needed. + + +### mkldnn_helper +Some functions would be put in `paddle/platform/mkldnn_helper.h`. +- create MKLDNN memories +- create MKLDNN primitives +- error check function +- etc + + +### Kernel Switch +We should `reorder` the different Layout from other device or to other device. `GetExpectedKernelType` and `trans` functions can help us to implement it. + +`GetExpectedKernelType` should get the context, and this operator can return the best `KernelType`. +`trans` would be like this: + +```c++ +void trans(inputs, ctx) override { + if (NoNeedTrans()) { + return; + } + // find reorder primitive by op_key from context + auto& dev_ctx = ctx.template device_context(); + auto& p = dev_ctx.findPrimitive(op_key + "_reorder_input"); + auto& i = dev_ctx.findMemory(op_key + "_src_input"); + + if (p == nullptr || i == nullptr || changeSized(i, input)) { + auto prim = createPrimitiveDesc(ctx); + auto src = createMemory(memoryDesc(input->dims(), actual_layout), input->data); + auto newbuffer = paddle::memory::Alloc(ctx.GetPlace(), input->size_in_bytes()); + auto dst = createMemory(p->expected_desc(), newbuffer->data); + auto reorder_primitive(new mkldnn::reorder(src, dst)); + + dev_ctx.addMemory(op_key+"_src_input", src); + dev_ctx.addMemory(op_key+"_input", dst); + dev_ctx.addPrimitive(op_key+"_reorder_input", reorder_primitive); + } + + p = dev_ctx.findPrimitive(op_key + "_reorder_input"); + PADDLE_ENFORCE(p, "Should have Reorder Primitive"); + dev_ctx.submit(p); + if (! this->isMKLDNNKernel()) { + // execute immediately only if this is not mkldnn kernel function. + // otherwise, it can be executed with the operator primitive in Compute + dev_ctx.stream(); + } + // after submit, the input tensor in ExecutionContext should be changed as the converted one + // there should be another mechanism to ensure this +} +``` + +### Unit Test +All the functions should be tested corresponding. +TBD diff --git a/doc/design/operator_kernel_type.md b/doc/design/operator_kernel_type.md new file mode 100644 index 0000000000..aa82e96bf7 --- /dev/null +++ b/doc/design/operator_kernel_type.md @@ -0,0 +1,91 @@ +# Design Doc: The Keys of Operator Kernel Type +## Problem +An operator can have different kernel implementations, and each operator will have a map to store the related kernels. Fluid uses `OpKernelType` as a key to identify a unique Kernel. Before an operator runs, an certain kernel must be chosen by a key of `OpKernelType`. Currently, `OpKernelType` is defined as follows: + +```cpp +struct OpKernelType { + platform::Place place_; + proto::DataType data_type_; +}; +``` +For more details, please refer to [codes](https://github.com/PaddlePaddle/Paddle/blob/2d5ec16bc8a09fb8e0f62c89b116b0cd1d333907/paddle/framework/operator.h#L348-L374) in github. + +It contains two keys, `Place` and `DataType`. And these two keys will be hashed to a unique key to represent a certain type of kernel. However, these two keys are not enough. We need a more complete representation of `OpKernelType`. + +We often implement a kernel of an operator with some computing library in certain device(place). Please remind that computing library and device are not one-to-one corresponding. A device can have a lot of computing libraries and a computing library can also support several devices. + +For example, Eigen library can support Nvidia GPU/AMD GPU/CPU. And MKLDNN library can support Intel CPU/Intel FPGA. Both `Place` and `Library` should be a key of `OpKernelType`. + +It's obvious that different DataTypes, like fp64/fp32/int8 will have different kernels. But the data layout of a Tensor will also lead to different implementation. Please refer to the batch norm operator [kernels](https://github.com/PaddlePaddle/Paddle/blob/a948fac4d0ad7e0412d373b8aabeb711c2899563/paddle/operators/batch_norm_op.cc#L180-L209). Data Layout should also be taken into consideration. + +## Solution + +There are four keys to determine a kernel type of an operator: `Place`/`Library`/`DataType`/`Layout`. + +```cpp +struct OpKernelType { + platform::Place place_; + platform::Library library_; + proto::DataType data_type_; + framework::Layout layout_; +}; +``` + +Following is the details: + +### Place + +`Place` is defined as follows: + +```cpp +typedef boost::variant Place; +``` + +`Place` is to represent the device memory where data is locating. + + +### Library + +One operator kernel is usually implemented based on one library. `Library` is defined as a enum variable: + +```cpp +enum Library { Plain, MKLDNN, CUDNN }; +``` + +We use `Plain` enumerator to represent default library. Since most operators in Fluid are implemented based on `Eigen` library, we take `Eigen` library as the `Plain` enumerator. +A library usually has a corresponding `DeviceContext` which contains some handles needed by computation. Fluid now have two default DeviceContexts in CPU and CUDA, `CPUDeviceContext` and `CUDADeviceContext`. `CPUDeviceContext` contains a Eigen library handle and `CDUADeviceContext` contains a Eigen library handle and cuBLAS handle. + +If we want to support new Library, a new enumerator need to be added to `Library` and a new corresponding `LibraryDeviceContext` will be created. + + +### DataType + + +`DataType` is defined in [framework.proto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto). Currently, int32/int64/fp32/fp64 are supported. + +### Layout + +Actually, a Tensor is a view of a block of memory. Besides a pointer to the memory, we also have to get some other descriptions of this block of memory, such as shape(ddim), stride, and layout. + +Different layout leads to different implementation of operator kernel. There are mainly 4 principles we have to follow to support layout in our fluid framework. + +- We take layout as a data member of Tensor. Layout is actually a enum variable. If fluid is built with MKLDNN, then, the memory format in MKLDNN will be added into this enum variable too. + +- Users have to set layout for input data. And some operators like fill_constant/random, also have to set layout of generating data. Of course, we can have some default layout, like NCHW. + +- The inference of Layout is at run-time, not compile-time. + +- Every operator have to implement different kernels for different layouts. Let's take MKLDNN as an example, if we want to implement a MKLDNN convolution operator, we have to realize all the kernels for different layout, list at [here](http://01org.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to do registering kernels for MKLDNN operators. + +`Layout` is also defined as a enum variable: + +```cpp +enum Layout { + kNCHW, + kNHWC, +#ifdef PADDLE_WITH_MKLDNN + knChw8c + ... +#endif +}; +``` diff --git a/doc/design/paddle_nccl.md b/doc/design/paddle_nccl.md new file mode 100644 index 0000000000..c7dac70998 --- /dev/null +++ b/doc/design/paddle_nccl.md @@ -0,0 +1,65 @@ +# Design Doc: NCCL support in Paddle Fluid + +## Abstract + +This Design Doc refers to the NCCL feature in paddle. We propose an approach to support NCCL library both on a single machine and multiple machines. We wrapper the NCCL primitives `Broadcast`, `Allreduce`, `Reduce` as operators to utilize Multi-GPU powers in one script. + + +## Motivation + +[NCCL](https://developer.nvidia.com/nccl) is a NVIDIA library support Multi-GPU communicating and optimized for NVIDIA GPUs, it provides routines such as all-gather, all-reduce, broadcast, reduce, reduce-scatter, that can achieve high bandwidth over PCIe and NVLink high-speed interconnect. With NCCL library, we can easily accelerate the training in parallel. + +- Pros +1. easily plug-in with [NCCL2](https://developer.nvidia.com/nccl) library. +1. high performance in NVIDIA GPUs. +1. MPI like primitives, which have low learning cost for users. + +- Cons +1. Only design for NVIDIA GPUs, not a general multi-device solution. +1. Although NCCL1 is opensourced under BSD license, but NCCL2 is not opensourced anymore. + +At the beginning of training, the framework needs to distribute the same parameters to every GPU, and merge the gradients at any time user interests. + +As a result, during training, we need the operations of peer to peer copy between different GPUs, aggregating gradients/parameters from GPUs, and broadcasting parameters to GPUs. Every GPU only need to run the operator with correct place information. + +Besides, it needs interfaces to synchronize model update with each different GPU Cards. + +## Implementation + +As mentioned above, we wrap the NCCL routines as several kinds of operators. Need to note that NCCL need to create Communicator between gpu at the beginning, so there is a NCCLInit operator created. + +### Transpiler + +To be compatible with [parameter server design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/ops/dist_train.md), the transpiler compiles the user defined operation graph into sub-graphs to be executed on different devices. + +1. The user-defined model will be a single device program + +2. Broadcast/Reduce operators between GPUs will be inserted into the program, even for the multi-node, may insert the `Send`, `Recv` operator. + + *Broadcast, AllReduce in a single machine. And Broadcast, AllReduce, [Send, Recv](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/ops/dist_train.md#graph-converter) in multiple machines* + + + +After compiling, the graph as shows + + + +Operators are added to the sub-graphs. Every GPU assigned a role of `rank0`, `rank1` etc. + +- **Broadcast**. Broadcast operator distribute initialized parameter to all the GPUs from the GPU who owns it. e.g. from`rank0` GPU. +- **AllReduce**. AllReduce operator synchronizes parameters/gradients between GPUs. AllReduce implemented in the Ring-Based communicating method, avoid of the bottle neck in a single GPU. + +Need to notice that AllReduce operator force GPUs synchronized at that point. The whole training process in asynchronous or synchronous mode depends on the AllReduce point in the graph. + +As it shown in the picture, when each GPU compute the gradient of `W`, followed with a `AllReduce` operator, accumulate the `dW` to full batch of data, then run the optimize process individually and apply the gradient to its `W`. + +- **AllReduce** + Need to note that our AllReduce operator is a ring-base AllReduce implementation. If we use the NCCL2 AllReduce primitive, every GPU optimized full batch of data, wasted (n-1) GPU compute resources. In addition, NCCL2 built-in AllReduce will only utilize the communicating resource during synchronization, then update the gradient will be a subsequent phase. In fact, we can amortize the update gradient time cost into the communicating phase. The process is +1. Every parameter has its root card. That card will responsible for aggregating the gradients from GPUs. +2. The whole model's parameter will be hashed to different root card, ensure the load balance between GPUs. +3. Logically neighberhood card will start send parameter to the next one. After one round, the parameter main card will aggregate the full gradients. +4. Then the root card will optimize the parameter. +5. This parameter card will send its optimized result to its neighberhood, then the neighberhood will send parameter to its next one. +6. Finish the sychronization round. + +The total time cost will be 2 * (n-1) * per-parameter-send-time, we reach the goal of amortize the upgrade time into communicating phase. diff --git a/doc/design/refactor/multi_cpu.md b/doc/design/refactor/multi_cpu.md new file mode 100644 index 0000000000..a8d8ee0422 --- /dev/null +++ b/doc/design/refactor/multi_cpu.md @@ -0,0 +1,43 @@ +# Design Doc: Execute the Program with Multi CPU + +## Abstract + +This Design Doc propose an approach to make the user-defined Op graph +running with multi-CPU, we will use an auto transpiler to convert the user-defined +Op graph to a multi-CPU Op graph, and run `ParallelDo` Op to run the graph. + +## Transpiler + + + +After converted: + + + +## Implement + +- `Multi-CPU Transpiler` will convert the graph to a multi-CPU graph + which would be executed with multi-threads. +- `BlockingCounter` will `Init/Decrement` an atomic counter, and Blocking `Wait` + for the atomic counter become `0`: + ```cpp + BlockingCounter bc(thread_count); + for (int i = 0; i < thread_count; ++i) { + thread_pool->Start([&bc] {bc.DecrementCount(); }) + } + bc.Wait(); + ``` +- `ParallelDo` Operator + - Initialize a thread pool which is a Singleton. + - Use a block id as the input, and create run the specify Block on independent scope + with multi-threads. + - Initialize a `BlockingCounter` instance and wait until all threads are done. +- `Split` Operator will split the Input Tensor into a TensorArray. +- `Merge` merge all the gradients which calculated in different threads + with `mean/sum/max/min...` method, and then run the Optimizer Op to optimize `W`. + +## TODO + +- Improve the optimizer stage with multi-threads, since we could + assign the parameters to the different threads and execute + optimizer with multi-threads. diff --git a/doc/design/refactor/src/multi-threads.graffle b/doc/design/refactor/src/multi-threads.graffle new file mode 100644 index 0000000000..e71173715f Binary files /dev/null and b/doc/design/refactor/src/multi-threads.graffle differ diff --git a/doc/design/refactor/src/multi-threads/multi-threads@3x.png b/doc/design/refactor/src/multi-threads/multi-threads@3x.png new file mode 100644 index 0000000000..e40a869987 Binary files /dev/null and b/doc/design/refactor/src/multi-threads/multi-threads@3x.png differ diff --git a/doc/design/refactor/src/multi-threads/single-thread@3x.png b/doc/design/refactor/src/multi-threads/single-thread@3x.png new file mode 100644 index 0000000000..4083aebfdd Binary files /dev/null and b/doc/design/refactor/src/multi-threads/single-thread@3x.png differ diff --git a/doc/design/support_new_device.md b/doc/design/support_new_device.md new file mode 100644 index 0000000000..f54b2b3694 --- /dev/null +++ b/doc/design/support_new_device.md @@ -0,0 +1,250 @@ +# Design Doc: Supporting new Device/Library + +## Background + +Deep learning has a high demand for computing resources. New high-performance devices and computing libraries are appearing very frequently. Deep learning frameworks have to integrate these high-performance devices and computing libraries flexibly and efficiently. + +On one hand, hardware and computing libraries usually do not have a one-to-one correspondence. For example,Intel CPUs support Eigen and MKL computing libraries while Nvidia GPUs support Eigen and cuDNN computing libraries. We have to implement operator specific kernels for each computing library. + +On the other hand, users usually do not want to care about the low-level hardware and computing libraries when writing a neural network configuration. In Fluid, `Layer` is exposed in `Python`, and `Operator` is exposed in `C++`. Both `Layer` and `Operator` are hardware independent. + +So, how to support a new Device/Library in Fluid becomes a challenge. + + +## Basic: Integrate A New Device/Library + +For a general overview of fluid, please refer to the [overview doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/read_source.md). + +There are mainly three parts that we have to consider while integrating a new device/library: + +- Place and DeviceContext: indicates the device id and manages hardware resources + +- Memory and Tensor: malloc/free data on certain device + +- Math Functor and OpKernel: implement computing unit on certain devices/libraries + +### Place and DeviceContext + +Please remind that device and computing library are not one-to-one corresponding. A device can have a lot of computing libraries and a computing library can also support several devices. + +#### Place +Fluid uses class [Place](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h#L55) to represent the device memory where data is located. If we add another device, we have to add corresponding `DevicePlace`. + +``` + | CPUPlace +Place --| CUDAPlace + | FPGAPlace +``` + +And `Place` is defined as follows: + +``` +typedef boost::variant Place; +``` + +#### DeviceContext + +Fluid uses class [DeviceContext](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h#L30) to manage the resources in different libraries, such as CUDA stream in `CDUADeviceContext`. There are also inheritance relationships between different kinds of `DeviceContext`. + + +``` + /-> CPUDeviceContext --> MKLDeviceContext +DeviceContext ----> CUDADeviceContext --> CUDNNDeviceContext + \-> FPGADeviceContext +``` + +An example of Nvidia GPU is as follows: + +- DeviceContext + + +``` +class DeviceContext { + virtual Place GetPlace() const = 0; +}; +``` + + +- CUDADeviceContext + + +``` +class CUDADeviceContext : public DeviceContext { + Place GetPlace() const override { return place_; } +private: + CUDAPlace place_; + cudaStream_t stream_; + cublasHandle_t cublas_handle_; + std::unique_ptr eigen_device_; // binds with stream_ +}; +``` + +- CUDNNDeviceContext + +``` +class CUDNNDeviceContext : public CUDADeviceContext { + private: + cudnnHandle_t cudnn_handle_; +}; +``` + + +### Memory and Tensor + + +#### memory module + +Fluid provides the following [memory interfaces](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/memory/memory.h#L36): + +``` +template +void* Alloc(Place place, size_t size); + +template +void Free(Place place, void* ptr); + +template +size_t Used(Place place); +``` + +To implement these interfaces, we have to implement MemoryAllocator for different Devices. + + +#### Tensor + +[Tensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/tensor.h#L36) holds data with some shape in a specific Place. + +```cpp +class Tensor { + public: + /*! Return a pointer to mutable memory block. */ + template + inline T* data(); + + /** + * @brief Return a pointer to mutable memory block. + * @note If not exist, then allocation. + */ + template + inline T* mutable_data(platform::Place place); + + /** + * @brief Return a pointer to mutable memory block. + * + * @param[in] dims The dimensions of the memory block. + * @param[in] place The place of the memory block. + * + * @note If not exist, then allocation. + */ + template + inline T* mutable_data(DDim dims, platform::Place place); + + /*! Resize the dimensions of the memory block. */ + inline Tensor& Resize(const DDim& dims); + + /*! Return the dimensions of the memory block. */ + inline const DDim& dims() const; + + private: + /*! holds the memory block if allocated. */ + std::shared_ptr holder_; + + /*! points to dimensions of memory block. */ + DDim dim_; +}; +``` + +`Placeholder` is used to delay memory allocation; that is, we can first define a tensor, using `Resize` to configure its shape, and then call `mutuable_data` to allocate the actual memory. + +```cpp +paddle::framework::Tensor t; +paddle::platform::CPUPlace place; +// set size first +t.Resize({2, 3}); +// allocate memory on CPU later +t.mutable_data(place); +``` + + + +### Math Functor and OpKernel + +Fluid implements computing units based on different DeviceContexts. Some computing units are shared between operators. This common part will be put in operators/math directory as basic Functors. + +Let's take [MaxOutFunctor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/math/maxouting.h#L27) as an example: + +The interface is defined in header file. + +``` +template +class MaxOutFunctor { + public: + void operator()(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* output, int groups); +}; +``` + +CPU implemention is in .cc file + +``` +template +class MaxOutFunctor { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::Tensor& input, framework::Tensor* output, + int groups) { + ... + } +}; +``` + +CUDA implemention is in .cu file + +``` +template +class MaxOutFunctor { + public: + void operator()(const platform::CUDADeviceContext& context, + const framework::Tensor& input, framework::Tensor* output, + int groups) { + ... + } +}; +``` + + +We get computing handle from a concrete DeviceContext, and make compution on tensors. + +The implemention of `OpKernel` is similar to math functors, the extra thing we need to do is to register the OpKernel in a global map. + +Fluid provides different register interfaces in op_registry.h + + +Let's take [Crop](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/crop_op.cc#L134) operator as an example: + +In .cc file: + +``` +REGISTER_OP_CPU_KERNEL(crop, ops::CropKernel); +REGISTER_OP_CPU_KERNEL( + crop_grad, ops::CropGradKernel); +``` + +In .cu file: + +``` +REGISTER_OP_CUDA_KERNEL(crop, ops::CropKernel); +REGISTER_OP_CUDA_KERNEL( + crop_grad, ops::CropGradKernel); +``` + + +## Advanced topics: How to switch between different Device/Library + +Generally, we will impelement OpKernel for all Device/Library of an Operator. We can easily train a Convolutional Neural Network in GPU. However, some OpKernel is not sutibale on a specific Device. For example, crf operator can only run on CPU, whereas most other operators can run at GPU. To achieve high performance in such circumstance, we have to switch between different Device/Library. + + +For more details, please refer to following docs: + +- operator kernel type [doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/operator_kernel_type.md) +- switch kernel [doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/switch_kernel.md) diff --git a/doc/design/switch_kernel.md b/doc/design/switch_kernel.md new file mode 100644 index 0000000000..1846e5d9f9 --- /dev/null +++ b/doc/design/switch_kernel.md @@ -0,0 +1,66 @@ +## Background +Every operator has many kernels because there are multiple data types, places, data layout that Fluid supports. We use the `KernelType` to describe kernel types that operators can hold. + +The `KernelType` is as follows. + +``` +struct KernelType { + Place place_; + DataType data_type_; + LayoutType layout_; +}; +``` + +The `place_` is a descriptor of the device and the computational library, e.g., `MKLDNNPlace`, `CUDAPlace`. + +The `data_type_` is the data type that this kernel performs on, e.g., `FP32`, `INT64`. Note that one kernel may have inputs with different data types. However, it will be a major `data_type`. For example, the `cross_entropy` takes `int64` as it label, and `double`/`float` as its input logit and output cost. The major `data_type` of `cross_entropy` is `float`/`double`. + +The `layout` is useful for some computational library. One example is that MKLDNN uses many kinds of layout, such as `nChw8c`. Each kind of layout will invoke the different kernel. + +## Problem + +We register a kernel for every operator and every kernel type ideally. However, it is impracticable for the following situations. + +1. Some operators, like CRF, are complicated and inefficient to be implemented on GPU. The CRF operator will only have a CPU kernel. +2. Some operators will take too many memory. It is better to force them into CPU. However, the rest of operators in this neural network will be performed on GPU, i.e., model parallel problem. +3. Some layout and place are particular. One example is that MKLDNN uses `nChw8` and there is no other library uses `nChw8c`. + +Problems under these situations are similar. We can formalise this problem as follow. + +We register kernels with types $KT = \{kt_1, kt_2, kt_3, ...\}$ for one operator. The inputs of this operator should be run on kernel type $kt_{?}$, which the $kt_{?} \notin KT$. How to cast the input of this operator from $kt_{?}$ to any of kernel type in $KT$. + +## Solution + +It is clearly that transforming inputs of an operator toadapt another kernel type is not related to the particular operator. So we should register these transformation methods as global methods. + +We can infer a kernel type from the inputs of an operators. We let this kernel type as `actual kernel type`, which means this kernel type is the actually kernel type that operator should be performed. + +We can get a kernel type by 1) The configuration of operator description. (Users may want to force use `MKL` for `conv` operator). 2) The place of the current executor. (Executor is running on GPU). This kernel type is what we expect the operator will be performed on. We let this kernel type as `expect kernel type`. + +We transform the input data from `actual` to `expect` if the expect kernel type is not as same as actual kernel type. + +The algorithm is described as follow + +```cpp +using DataTransformationFN = std::function; +using KernelTypePair = std::pair; + +map g_data_transformation_; + +void OpWithKernel::Run() { + vec inputs = ... + auto actual_kernel_type = GetActualKernelType(inputs); + + // The expected kernel type is related to actual kernel type. + // For the most operators, the expected kernel type is as same as + // actual kernel type. + // + // So we pass `actual_kernel_type` as a parameter of + // GetExpectedKernelType + auto expect_kernel_type = GetExpectedKernelType(actual_kernel_type); + + auto trans = g_data_transformation_[{actual_kernel_type, expect_kernel_type}]; + + kernel.run(trans(inputs)); +} +``` diff --git a/doc/faq/build_and_install/index_cn.rst b/doc/faq/build_and_install/index_cn.rst index f1677e216f..ed8a0c7e87 100644 --- a/doc/faq/build_and_install/index_cn.rst +++ b/doc/faq/build_and_install/index_cn.rst @@ -14,7 +14,7 @@ $ export CUDA_SO="$(\ls usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" $ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - $ docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddlepaddle:latest-gpu + $ docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu 更多关于Docker的安装与使用, 请参考 `PaddlePaddle Docker 文档 `_ 。 @@ -109,3 +109,31 @@ PaddlePaddle使用avx SIMD指令提高cpu执行效率,因此错误的使用二 解决办法是: * 卸载PaddlePaddle包 :code:`pip uninstall paddle`, 清理掉老旧的PaddlePaddle安装包,使得单元测试有一个干净的环境。如果PaddlePaddle包已经在python的site-packages里面,单元测试会引用site-packages里面的python包,而不是源码目录里 :code:`/python` 目录下的python包。同时,即便设置 :code:`PYTHONPATH` 到 :code:`/python` 也没用,因为python的搜索路径是优先已经安装的python包。 + +8. 下载MKLML库失败 +------------------ + +.. code-block:: bash + + make[2]: *** [third_party/mklml/src/extern_mklml-stamp/extern_mklml-download] 错误 4 + make[1]: *** [CMakeFiles/extern_mklml.dir/all] 错误 2 + make[1]: *** 正在等待未完成的任务.... + +原因:网速或SSL链接原因,导致MKLML库下载不成功。 + +解决办法是:手动下载并安装,具体步骤如下。 + +.. code-block:: bash + + // 1. 进入对应的目录 + cd build/third_party/mklml/src/extern_mklml + + // 2. 查看包的大小, 正常情况下是75M,如果小于75M,即下载失败: + du -sh mklml_lnx_2018.0.1.20171007.tgz + + // 3. 手动下载且解压缩,并手动生成download成功标签: + wget --no-check-certificate https://github.com/01org/mkl-dnn/releases/download/v0.11/mklml_lnx_2018.0.1.20171007.tgz -c -O mklml_lnx_2018.0.1.20171007.tgz + tar zxf mklml_lnx_2018.0.1.20171007.tgz + touch ../extern_mklml-stamp/extern_mklml-download + + // 4. 接着编译即可 diff --git a/doc/getstarted/build_and_install/build_from_source_cn.rst b/doc/getstarted/build_and_install/build_from_source_cn.rst index c875c807b8..41ac07ca56 100644 --- a/doc/getstarted/build_and_install/build_from_source_cn.rst +++ b/doc/getstarted/build_and_install/build_from_source_cn.rst @@ -70,13 +70,13 @@ PaddlePaddle编译需要使用到下面的依赖(包含但不限于),其 :header: "依赖", "版本", "说明" :widths: 10, 15, 30 - "CMake", ">=3.5", "" + "CMake", ">=3.2", "" "GCC", "4.8.2", "推荐使用CentOS的devtools2" - "Python", "2.7.x", "依赖libpython2.7.so" - "pip", ">=9.0", "" - "numpy", "", "" + "Python", "2.7.x", "依赖libpython2.7.so" + "pip", ">=9.0", "" + "numpy", "", "" "SWIG", ">=2.0", "" - "Go", ">=1.8", "可选" + "Go", ">=1.8", "可选" .. _build_options: diff --git a/doc/getstarted/build_and_install/build_from_source_en.rst b/doc/getstarted/build_and_install/build_from_source_en.rst index f194f84ce7..92211aee8c 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.rst +++ b/doc/getstarted/build_and_install/build_from_source_en.rst @@ -76,13 +76,13 @@ will be downloaded automatically. :header: "Dependency", "Version", "Description" :widths: 10, 15, 30 - "CMake", ">=3.5", "" + "CMake", ">=3.2", "" "GCC", "4.8.2", "Recommend devtools2 for CentOS" - "Python", "2.7.x", "Need libpython2.7.so" - "pip", ">=9.0", "" - "numpy", "", "" + "Python", "2.7.x", "Need libpython2.7.so" + "pip", ">=9.0", "" + "numpy", "", "" "SWIG", ">=2.0", "" - "Go", ">=1.8", "Optional" + "Go", ">=1.8", "Optional" .. _build_options: diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/getstarted/build_and_install/docker_install_cn.rst index f78b1fb0e1..fa1b6a3727 100644 --- a/doc/getstarted/build_and_install/docker_install_cn.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -114,7 +114,7 @@ PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Note .. code-block:: bash - nvidia-docker run -it -v $PWD:/work paddledev/paddle:latest-gpu /bin/bash + nvidia-docker run -it -v $PWD:/work paddlepaddle/paddle:latest-gpu /bin/bash **注: 如果没有安装nvidia-docker,可以尝试以下的方法,将CUDA库和Linux设备挂载到Docker容器内:** @@ -122,13 +122,13 @@ PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Note export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:latest-gpu + docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu **关于AVX:** AVX是一种CPU指令集,可以加速PaddlePaddle的计算。最新的PaddlePaddle Docker镜像默认 是开启AVX编译的,所以,如果您的电脑不支持AVX,需要单独 -`编译 <./build_from_source_cn.rst>`_ PaddlePaddle为no-avx版本。 +`编译 <./build_from_source_cn.html>`_ PaddlePaddle为no-avx版本。 以下指令能检查Linux电脑是否支持AVX: diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index d7acc7aeb7..06012bf65e 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -122,7 +122,7 @@ GPU driver installed before move on. .. code-block:: bash - nvidia-docker run -it -v $PWD:/work paddledev/paddle:latest-gpu /bin/bash + nvidia-docker run -it -v $PWD:/work paddlepaddle/paddle:latest-gpu /bin/bash **NOTE: If you don't have nvidia-docker installed, try the following method to mount CUDA libs and devices into the container.** @@ -130,14 +130,14 @@ GPU driver installed before move on. export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:latest-gpu + docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu **About AVX:** AVX is a kind of CPU instruction can accelerate PaddlePaddle's calculations. The latest PaddlePaddle Docker image turns AVX on by default, so, if your computer doesn't support AVX, you'll probably need to -`build <./build_from_source_en.rst>`_ with :code:`WITH_AVX=OFF`. +`build <./build_from_source_en.html>`_ with :code:`WITH_AVX=OFF`. The following command will tell you whether your computer supports AVX. diff --git a/doc/getstarted/build_and_install/pip_install_cn.rst b/doc/getstarted/build_and_install/pip_install_cn.rst index b270e2c2f0..a4587f82a9 100644 --- a/doc/getstarted/build_and_install/pip_install_cn.rst +++ b/doc/getstarted/build_and_install/pip_install_cn.rst @@ -37,11 +37,11 @@ PaddlePaddle可以使用常用的Python包管理工具 :header: "版本说明", "cp27-cp27mu", "cp27-cp27m", "C-API" :widths: 1, 3, 3, 3 - "cpu_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" - "cpu_avx_openblas", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "暂无" - "cuda7.5_cudnn5_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" - "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" - "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cpu_avx_mkl", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cpu_avx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "暂无" + "cuda7.5_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" .. _pip_dependency: diff --git a/doc/getstarted/build_and_install/pip_install_en.rst b/doc/getstarted/build_and_install/pip_install_en.rst index 70f601a11c..55e31560a0 100644 --- a/doc/getstarted/build_and_install/pip_install_en.rst +++ b/doc/getstarted/build_and_install/pip_install_en.rst @@ -40,11 +40,11 @@ If the links below shows up the login form, just click "Log in as guest" to star :header: "version", "cp27-cp27mu", "cp27-cp27m", "C-API" :widths: 1, 3, 3, 3 - "cpu_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" - "cpu_avx_openblas", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "Not Available" - "cuda7.5_cudnn5_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" - "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" - "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle-0.10.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.10.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cpu_avx_mkl", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cpu_avx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "Not Available" + "cuda7.5_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" + "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_", "`paddle.tgz `_" .. _pip_dependency: diff --git a/doc/getstarted/concepts/src/infer.py b/doc/getstarted/concepts/src/infer.py new file mode 100644 index 0000000000..4cc58dfee0 --- /dev/null +++ b/doc/getstarted/concepts/src/infer.py @@ -0,0 +1,18 @@ +import paddle.v2 as paddle +import numpy as np + +paddle.init(use_gpu=False) +x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(2)) +y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear()) + +# loading the model which generated by training +with open('params_pass_90.tar', 'r') as f: + parameters = paddle.parameters.Parameters.from_tar(f) + +# Input multiple sets of data,Output the infer result in a array. +i = [[[1, 2]], [[3, 4]], [[5, 6]]] +print paddle.infer(output_layer=y_predict, parameters=parameters, input=i) +# Will print: +# [[ -3.24491572] +# [ -6.94668722] +# [-10.64845848]] diff --git a/doc/getstarted/concepts/src/train.py b/doc/getstarted/concepts/src/train.py index 8aceb23406..4bccbfca3c 100644 --- a/doc/getstarted/concepts/src/train.py +++ b/doc/getstarted/concepts/src/train.py @@ -26,6 +26,11 @@ def event_handler(event): if event.batch_id % 1 == 0: print "Pass %d, Batch %d, Cost %f" % (event.pass_id, event.batch_id, event.cost) + # product model every 10 pass + if isinstance(event, paddle.event.EndPass): + if event.pass_id % 10 == 0: + with open('params_pass_%d.tar' % event.pass_id, 'w') as f: + trainer.save_parameter_to_tar(f) # define training dataset reader diff --git a/doc/getstarted/concepts/use_concepts_cn.rst b/doc/getstarted/concepts/use_concepts_cn.rst index c243083794..e695ff283e 100644 --- a/doc/getstarted/concepts/use_concepts_cn.rst +++ b/doc/getstarted/concepts/use_concepts_cn.rst @@ -147,4 +147,9 @@ PaddlePaddle支持不同类型的输入数据,主要包括四种类型,和 .. literalinclude:: src/train.py :linenos: +使用以上训练好的模型进行预测,取其中一个模型params_pass_90.tar,输入需要预测的向量组,然后打印输出: + +.. literalinclude:: src/infer.py + :linenos: + 有关线性回归的实际应用,可以参考PaddlePaddle book的 `第一章节 `_。 diff --git a/doc/howto/dev/contribute_to_paddle_cn.md b/doc/howto/dev/contribute_to_paddle_cn.md index 6993901452..3e0bf7b397 100644 --- a/doc/howto/dev/contribute_to_paddle_cn.md +++ b/doc/howto/dev/contribute_to_paddle_cn.md @@ -76,18 +76,18 @@ no changes added to commit (use "git add" and/or "git commit -a") ## 构建和测试 -编译 PaddlePaddle 的源码以及生成文档需要多种开发工具。为了方便大家,我们的标准开发流程是把这些工具都装进一个Docker image,称为*开发镜像*,通常名字是 `paddle:dev`。然后所有用 `cmake && make` 的地方(比如IDE配置里)都用 `docker run paddle:dev`来代替。 +编译 PaddlePaddle 的源码以及生成文档需要多种开发工具。为了方便大家,我们的标准开发流程是把这些工具都装进一个Docker image,称为*开发镜像*,通常名字是 `paddle:latest-dev` 或者 `paddle:[version tag]-dev` 如 `paddle:0.11.0-dev`。然后所有用 `cmake && make` 的地方(比如IDE配置里)都用 `docker run paddle:latest-dev`来代替。 如要build这个开发镜像,在源码目录树的根目录中运行: ```bash -➜ docker build -t paddle:dev . +➜ docker build -t paddle:latest-dev . ``` 随后可以用这个开发镜像开始build PaddlePaddle的源码。比如如果要build一个不依赖GPU,但是支持AVX指令集,并且包括unit tests的PaddlePaddle,可以: ```bash -➜ docker run -v $(pwd):/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=ON" paddle:dev +➜ docker run -v $(pwd):/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=ON" paddle:latest-dev ``` 这个过程除了编译PaddlePaddle为 `./build/libpaddle.so`,并且输出一个 `./build/paddle.deb`文件之外,还会输出一个 `build/Dockerfile`。我们只需要运行下面命令把编译好的PaddlePaddle打包成一个*生产镜像*(`paddle:prod`): @@ -99,7 +99,7 @@ no changes added to commit (use "git add" and/or "git commit -a") 如果要运行所有的单元测试,可以用如下命令: ```bash -➜ docker run -it -v $(pwd):/paddle paddle:dev bash -c "cd /paddle/build && ctest" +➜ docker run -it -v $(pwd):/paddle paddle:latest-dev bash -c "cd /paddle/build && ctest" ``` 关于构建和测试的更多信息,请参见[这篇文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/getstarted/build_and_install/docker_install_cn.rst)。 diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 44dbeecbbd..3109d72001 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -1,17 +1,18 @@ # 如何写新的Operator - [概念简介](#概念简介) - - [实现C++类](#实现C++类) - - [定义ProtoMaker类](#定义ProtoMaker类) - - [定义Operator类](#定义Operator类) - - [定义OpKernel类](#定义OpKernel类) - - [注册Operator](#注册Operator) + - [实现C++类](#实现c类) + - [定义ProtoMaker类](#定义protomaker类) + - [定义Operator类](#定义operator类) + - [定义OpKernel类](#定义opkernel类) + - [注册Operator](#注册operator) - [编译](#编译) - - [绑定Python](#绑定Python) + - [绑定Python](#绑定python) - [实现单元测试](#实现单元测试) - - [前向Operator单测](#前向Operator单测) - - [反向Operator单测](#反向Operator单测) + - [前向Operator单测](#前向operator单测) + - [反向Operator单测](#反向operator单测) - [编译和执行](#编译和执行) + - [注意事项](#注意事项) ## 概念简介 @@ -43,7 +44,7 @@ Kernel实现 | CPU、CUDA共享Kernel实现在`.h`文件中,否则,CPU ## 实现C++类 -### 1. 定义ProtoMaker类 +### 定义ProtoMaker类 矩阵乘法的公式:$Out = X * Y$, 可见该计算由两个输入,一个输出组成。 @@ -52,7 +53,7 @@ Kernel实现 | CPU、CUDA共享Kernel实现在`.h`文件中,否则,CPU ```cpp class MulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor), 2D tensor of size (M x K)"); AddInput("Y", "(Tensor), 2D tensor of size (K x N)"); @@ -81,7 +82,7 @@ The equation is: Out = X * Y template class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScaleOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + ScaleOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of scale operator.").NotInGradient(); AddOutput("Out", "The output tensor of scale operator.").NotInGradient(); @@ -100,7 +101,7 @@ The equation is: Out = scale*X - `AddAttr("scale", "...").SetDefault(1.0);` : 增加`scale`系数,作为参数属性,并且设置默认值为1.0。 -### 2. 定义Operator类 +### 定义Operator类 下面的点实现了MulOp的定义: @@ -149,7 +150,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, 通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中,和下面将要介绍的注册函数一起放在`.cc`中 -### 3. 定义OpKernel类 +### 定义OpKernel类 `MulKernel`继承自`framework::OpKernel`,带有下面两个模板参数: @@ -177,6 +178,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, math::matmul(*X, false, *Y, false, 1, Z, 0, device_context); } }; + ``` 需要注意:**不同设备(CPU、CUDA)共享一个Op定义,是否则共享同一个`OpKernel`,取决于`Compute`调用的函数是否支持不同设备。** @@ -188,7 +190,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, 到此,前向Op实现完成。接下来,需要在`.cc`文件中注册该op和kernel。 反向Op类的定义,反向OpKernel的定义与前向Op类似,这里不再赘述。**但需注意反向Op没有`ProtoMaker`**。 -### 4. 注册Operator +### 注册Operator - 在`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 @@ -220,7 +222,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, ops::MulGradKernel); ``` -### 5. 编译 +### 编译 运行下面命令可以进行编译: @@ -236,6 +238,7 @@ make mul_op 单测包括对比前向Op不同设备(CPU、CUDA)的实现、对比反向OP不同设备(CPU、CUDA)的实现、反向Op的梯度测试。下面介绍介绍[`MulOp`的单元测试](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/test_mul_op.py)。 +### 前向Operator单测 Op单元测试继承自`OpTest`。各项更加具体的单元测试在`TestMulOp`里完成。测试Operator,需要: @@ -273,8 +276,7 @@ Op单元测试继承自`OpTest`。各项更加具体的单元测试在`TestMulOp def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) - - ``` + ``` 上面的代码首先导入依赖的包,下面是对`setUp`函数中操作的重要变量的详细解释: @@ -282,6 +284,8 @@ Op单元测试继承自`OpTest`。各项更加具体的单元测试在`TestMulOp - `self.inputs` : 定义输入,类型为`numpy.array`,并初始化。 - `self.outputs` : 定义输出,并在Python脚本中完成与operator同样的计算逻辑,返回Python端的计算结果。 +### 反向operator单测 + 而反向测试中: - `test_check_grad_normal`中调用`check_grad`使用数值法检测梯度正确性和稳定性。 - 第一个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。 @@ -290,7 +294,7 @@ Op单元测试继承自`OpTest`。各项更加具体的单元测试在`TestMulOp - `test_check_grad_ingore_x`和`test_check_grad_ingore_y`分支用来测试只需要计算一个输入梯度的情况。 -### 编译和执行单元测试 +### 编译和执行 `python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。 diff --git a/doc/howto/dev/new_op_en.md b/doc/howto/dev/new_op_en.md index 510233306c..7175d8370d 100644 --- a/doc/howto/dev/new_op_en.md +++ b/doc/howto/dev/new_op_en.md @@ -1,8 +1,8 @@ # How to write a new operator - [Background](#background) - - [Implementing C++ Types](#implementing-c++-types) - - [Defining ProtoMaker](#defining-protoMaker) + - [Implementing C++ Types](#implementing-c-types) + - [Defining ProtoMaker](#defining-protomaker) - [Defining Operator](#defining-operator) - [Registering Operator](#registering-operator) - [Compilation](#compilation) @@ -41,7 +41,7 @@ Let's take matrix multiplication operator, [MulOp](https://github.com/PaddlePadd ## Implementing C++ Types -### 1. Defining Class ProtoMaker +### Defining ProtoMaker Matrix Multiplication can be written as $Out = X * Y$, meaning that the operation consists of two inputs and pne output. @@ -50,7 +50,7 @@ First, define `ProtoMaker` to describe the Operator's input, output, and additio ```cpp class MulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor), 2D tensor of size (M x K)"); AddInput("Y", "(Tensor), 2D tensor of size (K x N)"); @@ -79,7 +79,7 @@ An additional example [`ScaleOp`](https://github.com/PaddlePaddle/Paddle/blob/de template class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScaleOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + ScaleOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of scale operator.").NotInGradient(); AddOutput("Out", "The output tensor of scale operator.").NotInGradient(); @@ -98,7 +98,7 @@ There are two changes in this example: - `AddAttr("scale", "...").SetDefault(1.0);` adds `scale`constant as an attribute, and sets the default value to 1.0. -### 2. Defining Operator +### Defining Operator The following code defines the interface for MulOp: @@ -147,7 +147,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, Usually `OpProtoMaker` and `Op`'s type definitions are written in `.cc` files, which also include the registration methods introduced later. -### 3. Defining OpKernel +### Defining OpKernel `MulKernel` inherits `framework::OpKernel`, which includes the following templates: @@ -188,7 +188,7 @@ This concludes the forward implementation of an operator. Next its operation and The definition of its corresponding backward operator, if applicable, is similar to that of an forward operator. **Note that a backward operator does not include a `ProtoMaker`**. -### 4. Registering Operator +### Registering Operator - In `.cc` files, register forward and backward operator classes and the CPU kernel. @@ -220,7 +220,7 @@ The definition of its corresponding backward operator, if applicable, is similar ops::MulGradKernel); ``` -### 5. Compilation +### Compilation Run the following commands to compile. @@ -284,8 +284,7 @@ A forward operator unit test inherits `unittest.TestCase` and defines metaclass def test_check_grad_ingore_y(self): self.check_grad( ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) - - ``` + ``` Get its output, and compare it with the forward operator's own output. The code above first loads required packages. In addition, we have @@ -294,6 +293,8 @@ The code above first loads required packages. In addition, we have - `self.inputs` defines input, with type `numpy.array` and initializes it. - `self.outputs` defines output and completes the same operator computation in the Python script, and returns its result from the Python script. +### Testing Backward Operators + Some key points in checking gradient above include: - `test_normal` calls `check_grad` to validate scaling tests' correctness and stability through numeric methods. diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index 991b9e2596..ccd9097702 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -9,9 +9,6 @@ usage/cmd_parameter/index_cn.rst usage/cluster/cluster_train_cn.md - usage/k8s/k8s_basis_cn.md - usage/k8s/k8s_cn.md - usage/k8s/k8s_distributed_cn.md 开发标准 -------- diff --git a/doc/howto/index_en.rst b/doc/howto/index_en.rst index 61bf25ccd1..6d1bf7dfc0 100644 --- a/doc/howto/index_en.rst +++ b/doc/howto/index_en.rst @@ -9,8 +9,6 @@ Usage usage/cmd_parameter/index_en.rst usage/cluster/cluster_train_en.md - usage/k8s/k8s_en.md - usage/k8s/k8s_aws_en.md Development ------------ diff --git a/doc/howto/read_source.md b/doc/howto/read_source.md new file mode 100644 index 0000000000..e4211abb3b --- /dev/null +++ b/doc/howto/read_source.md @@ -0,0 +1,67 @@ +# PaddlePaddle Fluid Source Code Overview + +Examples: https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/v2/fluid/tests/book + +Core: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework + +Operator: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators + +Memory: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory + +Platform: https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/platform + +# Compile Time + +The following **defines** the NN. The definition goes into this [protocol buffer](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto). + +```python +x = fluid.layers.data(name='x', shape=[13], dtype='float32') +y = fluid.layers.data(name='y', shape=[1], dtype='float32') + +y_predict = fluid.layers.fc(input=x, size=1, act=None) +cost = fluid.layers.square_error_cost(input=y_predict, label=y) +avg_cost = fluid.layers.mean(x=cost) + +sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) +sgd_optimizer.minimize(avg_cost) +``` + +- Variables: `x`, `y`, `y_predict`, `cost` and `avg_cost`. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/framework.py#L93) +- Layers: `fluid.layers.data`, `fluid.layers.fc` and `fluid.layers.mean` are layers. [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/layers.py) + - Every Layer has one or more operators and variables/parameters + - All the operators are defined at [`paddle/operators/`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators). Other worth-looking files: + - Base class: [`paddle/framework/operator.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h) + - Operator Registration: [`paddle/framework/op_registry.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h) + - Operator Lookup: [`paddle/framework/op_info.h`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_info.h) +- Optimizer: `fluid.optimizer.SGD`. It does the following + - Add backward operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/backward.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/backward.cc)] + - Add optimizer operators. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/optimizer.py), [C++](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/optimizer)] + +# Run Time + +The following **evaluates** the NN. Instantiates all the variables, operators. + +```python +place = fluid.CPUPlace() +feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) +exe = fluid.Executor(place) + +# Allocate memory. Initialize Parameter. +exe.run(fluid.default_startup_program()) + +# Allocate memory. Do computation. +exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost]) +``` + +- Place: `place`. one of CPU, GPU or FPGA. [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/place.h) + - The device handle are at [paddle/platform/device_context.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/platform/device_context.h) +- Executor: `fluid.Executor(place)`. [[Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/fluid/executor.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/executor.cc)] + - Feeds the data: `feed=feeder.feed(data)` + - Evaluates all the operators + - Fetches the result: `fetch_list=[avg_cost]` +- Other worth looking files: + - Scope: [paddle/framework/scope.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/scope.h). Where all the variables live + - Variable: [paddle/framework/variable.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h). Where all the data (most likely tensors) live + - Tensor: [paddle/framework/tensor.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/tensor.h). Where we allocate memory through [`paddle/memory/`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory) diff --git a/doc/howto/usage/cluster/cluster_train_cn.md b/doc/howto/usage/cluster/cluster_train_cn.md index 2e98b3de3f..659bae9c0c 100644 --- a/doc/howto/usage/cluster/cluster_train_cn.md +++ b/doc/howto/usage/cluster/cluster_train_cn.md @@ -1,25 +1,8 @@ -# PaddlePaddle分布式训练 - -* [概述](#概述) -* [环境准备](#环境准备) -* [启动参数说明](#启动参数说明) - * [启动参数服务器](#启动参数服务器) - * [启动计算节点](#启动计算节点) - * [准备数据集](#准备数据集) - * [准备训练程序](#准备训练程序) -* [使用分布式计算平台或工具](#使用分布式计算平台或工具) - * [使用Fabric启动集群作业](#使用fabric启动集群作业) - * [准备一个Linux集群](#准备一个linux集群) - * [启动集群作业](#启动集群作业) - * [终止集群作业](#终止集群作业) - * [检查集群训练结果](#检查集群训练结果) - * [检查模型输出](#检查模型输出) - * [在OpenMPI集群中提交训练作业](#在openmpi集群中提交训练作业) - * [准备OpenMPI集群](#准备OpenMPI集群) - * [启动集群作业](#启动集群作业-1) - * [在Kubernetes集群中提交训练作业](#在kubernetes集群中提交训练作业) +# 分布式训练 + ## 概述 + 本文将介绍如何使用PaddlePaddle在不同的集群框架下完成分布式训练。分布式训练架构如下图所示: @@ -32,10 +15,11 @@ 在使用同步SGD训练神经网络时,PaddlePaddle使用同步屏障(barrier),使梯度的提交和参数的更新按照顺序方式执行。在异步SGD中,则并不会等待所有trainer提交梯度才更新参数,这样极大地提高了计算的并行性:参数服务器之间不相互依赖,并行地接收梯度和更新参数,参数服务器也不会等待计算节点全部都提交梯度之后才开始下一步,计算节点之间也不会相互依赖,并行地执行模型的训练。可以看出,虽然异步SGD方式会提高参数更新并行度, 但是并不能保证参数同步更新,在任意时间某一台参数服务器上保存的参数可能比另一台要更新,与同步SGD相比,梯度会有噪声。 + ## 环境准备 1. 准备您的计算集群。计算集群通常由一组(几台到几千台规模)的Linux服务器组成。服务器之间可以通过局域网(LAN)联通,每台服务器具有集群中唯一的IP地址(或者可被DNS解析的主机名)。集群中的每台计算机通常被成为一个“节点”。 -1. 我们需要在集群的所有节点上安装 PaddlePaddle。 如果要启用GPU,还需要在节点上安装对应的GPU驱动以及CUDA。PaddlePaddle的安装可以参考[build_and_install](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/getstarted/build_and_install)的多种安装方式。我们推荐使用[Docker](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/getstarted/build_and_install/docker_install_cn.rst)安装方式来快速安装PaddlePaddle。 +1. 我们需要在集群的所有节点上安装 PaddlePaddle。 如果要启用GPU,还需要在节点上安装对应的GPU驱动以及CUDA。PaddlePaddle的安装可以参考[build_and_install](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/index_cn.html)的多种安装方式。我们推荐使用[Docker](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/docker_install_cn.html)安装方式来快速安装PaddlePaddle。 安装完成之后,执行下面的命令可以查看已经安装的版本(docker安装方式可以进入docker容器执行:`docker run -it paddlepaddle/paddle:[tag] /bin/bash`): ```bash @@ -63,12 +47,12 @@ $ paddle pserver --port=7164 --ports_num=1 --ports_num_for_sparse=1 --num_gradie $ stdbuf -oL /usr/bin/nohup paddle pserver --port=7164 --ports_num=1 --ports_num_for_sparse=1 --num_gradient_servers=1 &> pserver.log ``` -| 参数 | 是否必选 | 默认值 | 说明 | -| ------------- | ------------- | ------------- | ------------- | -| port | 必选 | 7164 | pserver监听的起始端口,根据ports_num决定
总端口个数,从起始端口监听多个端口用于通信 | -| ports_num | 必选 | 1 | 监听的端口个数 | -| ports_num_for_sparse | 必选 | 1 | 用于稀疏类型参数通信的端口个数 | -| num_gradient_servers | 必选 | 1 | 当前训练任务pserver总数 | +参数说明 + +- port:**必选,默认7164**,pserver监听的起始端口,根据ports_num决定总端口个数,从起始端口监听多个端口用于通信 +- ports_num:**必选,默认1**,监听的端口个数 +- ports_num_for_sparse:**必选,默认1**,用于稀疏类型参数通信的端口个数 +- num_gradient_servers:**必选,默认1**,当前训练任务pserver总数 ### 启动计算节点 执行以下命令启动使用python编写的trainer程序(文件名为任意文件名,如train.py) @@ -105,16 +89,16 @@ paddle.init( pservers="127.0.0.1") ``` -| 参数 | 是否必选 | 默认 | 说明 | -| ------------- | ------------- | ------------- | ------------- | -| use_gpu | 可选 | False | 是否启用GPU训练 | -| trainer_count | 必选 | 1 | 当前训练任务trainer总个数 | -| port | 必选 | 7164 | 连接到pserver的端口 | -| ports_num | 必选 | 1 | 连接到pserver的端口个数 | -| ports_num_for_sparse | 必选 | 1 | 和pserver之间用于稀疏类型参数通信的端口个数 | -| num_gradient_servers | 必选 | 1 | 当前训练任务pserver总数 | -| trainer_id | 必选 | 0 | 每个trainer的唯一ID,从0开始的整数 | -| pservers | 必选 | 127.0.0.1 | 当前训练任务启动的pserver的IP列表,多个IP使用“,”隔开 | +参数说明 + +- use_gpu: **可选,默认False**,是否启用GPU训练 +- trainer_count:**必选,默认1**,当前训练任务trainer总个数 +- port:**必选,默认7164**,连接到pserver的端口 +- ports_num:**必选,默认1**,连接到pserver的端口个数 +- ports_num_for_sparse:**必选,默认1**,和pserver之间用于稀疏类型参数通信的端口个数 +- num_gradient_servers:**必选,默认1**,当前训练任务pserver总数 +- trainer_id:**必选,默认0**,每个trainer的唯一ID,从0开始的整数 +- pservers:**必选,默认127.0.0.1**,当前训练任务启动的pserver的IP列表,多个IP使用“,”隔开 ### 准备数据集 @@ -171,7 +155,7 @@ test.txt-00002 - `my_lib.py`:会被`train.py`调用的一些用户定义的库函数,比如PIL库等。 - `word_dict.pickle`:在`train.py`中会使用到的字典数据文件。 -- `train.py`:训练程序,代码参考[api_train_v2_cluster.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/prepare.py)。***注意:*** 对于本样例代码,在使用不同的分布式计算平台时,您可能需要修改`train.py`开头的部分(如下),以便获得训练数据的位置和获取环境变量配置: +- `train.py`:训练程序,代码参考[api_train_v2_cluster.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py)。***注意:*** 对于本样例代码,在使用不同的分布式计算平台时,您可能需要修改`train.py`开头的部分(如下),以便获得训练数据的位置和获取环境变量配置: ```python cluster_train_file = "./train_data_dir/train/train.txt" @@ -195,91 +179,10 @@ PaddlePaddle可以使用多种分布式计算平台构建分布式计算任务 在使用分布式计算平台进行训练时,任务被调度在集群中时,分布式计算平台通常会通过API或者环境变量提供任务运行需要的参数,比如节点的ID、IP和任务节点个数等。 -### 使用Fabric启动集群作业 - -#### 准备一个Linux集群 -可以在`paddle/scripts/cluster_train_v2/fabric/docker_cluster`目录下,执行`kubectl -f ssh_servers.yaml`启动一个测试集群,并使用`kubectl get po -o wide`获得这些节点的IP地址。 - -#### 启动集群作业 - -`paddle.py` 提供了自动化脚本来启动不同节点中的所有 PaddlePaddle 集群进程。默认情况下,所有命令行选项可以设置为 `paddle.py` 命令选项并且 `paddle.py` 将透明、自动地将这些选项应用到 PaddlePaddle 底层进程。 - -`paddle.py` 为方便作业启动提供了两个独特的命令选项。 - -- `job_dispatch_package` 设为本地 `workspace` 目录,它将被分发到 `conf.py` 中设置的所有节点。它有助于帮助频繁修改和访问工作区文件的用户减少负担,否则频繁的多节点工作空间部署可能会很麻烦。 -- `job_workspace` 设为已部署的工作空间目录,`paddle.py` 将跳过分发阶段直接启动所有节点的集群作业。它可以帮助减少分发延迟。 - -`cluster_train/run.sh` 提供了命令样例来运行 `doc/howto/usage/cluster/src/word2vec` 集群任务,只需用您定义的目录修改 `job_dispatch_package` 和 `job_workspace`,然后: -``` -sh run.sh -``` - -集群作业将会在几秒后启动。 - -#### 终止集群作业 -`paddle.py`能获取`Ctrl + C` SIGINT 信号来自动终止它启动的所有进程。只需中断 `paddle.py` 任务来终止集群作业。如果程序崩溃你也可以手动终止。 - -#### 检查集群训练结果 -详细信息请检查 $workspace/log 里的日志,每一个节点都有相同的日志结构。 - -`paddle_trainer.INFO` -提供几乎所有训练的内部输出日志,与本地训练相同。这里检验运行时间模型的收敛。 - -`paddle_pserver2.INFO` -提供 pserver 运行日志,有助于诊断分布式错误。 - -`server.log` -提供 parameter server 进程的 stderr 和 stdout。训练失败时可以检查错误日志。 - -`train.log` -提供训练过程的 stderr 和 stdout。训练失败时可以检查错误日志。 - -#### 检查模型输出 -运行完成后,模型文件将被写入节点 0 的 `output` 目录中。 -工作空间中的 `nodefile` 表示当前集群作业的节点 ID。 - -### 在OpenMPI集群中提交训练作业 - -#### 准备OpenMPI集群 - -执行下面的命令以启动3个节点的OpenMPI集群和一个"head"节点: - -```bash -paddle/scripts/cluster_train_v2/openmpi/docker_cluster -kubectl create -f head.yaml -kubectl create -f mpi-nodes.yaml -``` - -然后可以从head节点ssh无密码登录到OpenMPI的每个节点上。 - -#### 启动集群作业 - -您可以按照下面的步骤在OpenMPI集群中提交paddle训练任务: - -```bash -# 获得head和node节点的IP地址 -kubectl get po -o wide -# 将node节点的IP地址保存到machines文件中 -kubectl get po -o wide | grep nodes | awk '{print $6}' > machines -# 拷贝必要的文件到head节点 -scp -i ssh/id_rsa.mpi.pub machines prepare.py train.py start_mpi_train.sh tutorial@[headIP]:~ -# ssh 登录到head节点 -ssh -i ssh/id_rsa.mpi.pub tutorial@[headIP] -# --------------- 以下操作均在head节点中执行 --------------- -# 准备训练数据 -python prepare.py -# 拷贝训练程序和字典文件到每台MPI节点 -cat machines | xargs -i scp word_dict.pickle train.py start_mpi_train.sh machines {}:/home/tutorial -# 创建日志目录 -mpirun -hostfile machines -n 3 mkdir /home/tutorial/logs -# 拷贝训练数据到各自的节点 -scp train.txt-00000 test.txt-00000 [node1IP]:/home/tutorial -scp train.txt-00001 test.txt-00001 [node2IP]:/home/tutorial -scp train.txt-00002 test.txt-00002 [node3IP]:/home/tutorial -# 启动训练任务 -mpirun -hostfile machines -n 3 /home/tutorial/start_mpi_train.sh -``` - -### 在Kubernetes集群中提交训练作业 +## 在不同集群中运行 -此部分的使用方法可以参考[here](../k8s/k8s_distributed_cn.md)。 + - [fabric集群](fabric_cn.md) + - [openmpi集群](openmpi_cn.md) + - [kubernetes单机](k8s_cn.md) + - [kubernetes distributed分布式](k8s_distributed_cn.md) + - [AWS上运行kubernetes集群训练](k8s_aws_cn.md) diff --git a/doc/howto/usage/cluster/cluster_train_en.md b/doc/howto/usage/cluster/cluster_train_en.md index baa97c0c02..915405ca5b 100644 --- a/doc/howto/usage/cluster/cluster_train_en.md +++ b/doc/howto/usage/cluster/cluster_train_en.md @@ -1,23 +1,4 @@ -# PaddlePaddle Distributed Training - -* [Introduction](#introduction) -* [Preparations](#preparations) -* [Command-line arguments](#command-line-arguments) - * [Starting parameter server](#starting-parameter-server) - * [Starting trainer](#starting-trainer) - * [Prepare Training Dataset](#prepare-training-dataset) - * [Prepare Training program](#prepare-training-program) -* [Use cluster platforms or cluster management tools](#use-cluster-platforms-or-cluster-management-tools) - * [Cluster Training Using Fabric](#cluster-training-using-fabric) - * [Prepare a Linux cluster](#prepare-a-linux-cluster) - * [Launching Cluster Job](#launching-cluster-job) - * [Kill Cluster Job](#kill-cluster-job) - * [Check Cluster Training Result](#check-cluster-training-result) - * [Check Model Output](#check-model-output) - * [Cluster Training Using OpenMPI](#cluster-training-using-openmpi) - * [Prepare an OpenMPI cluster](#prepare-an-openmpi-cluster) - * [Launching Cluster Job](#launching-cluster-job-1) - * [Cluster Training Using Kubernetes](#cluster-training-using-kubernetes) +# Distributed Training ## Introduction @@ -35,7 +16,7 @@ When training with synchronize SGD, PaddlePaddle uses an internal "synchronize b ## Preparations 1. Prepare your computer cluster. It's normally a bunch of Linux servers connected by LAN. Each server will be assigned a unique IP address. The computers in the cluster can be called "nodes". -2. Install PaddlePaddle on every node. If you are going to take advantage of GPU cards, you'll also need to install proper driver and CUDA libraries. To install PaddlePaddle please read [this build and install](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/getstarted/build_and_install) document. We strongly recommend using [Docker installation](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/getstarted/build_and_install/docker_install_en.rst). +2. Install PaddlePaddle on every node. If you are going to take advantage of GPU cards, you'll also need to install proper driver and CUDA libraries. To install PaddlePaddle please read [this build and install](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/index_en.html) document. We strongly recommend using [Docker installation](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/docker_install_en.html). After installation, you can check the version by typing the below command (run a docker container if using docker: `docker run -it paddlepaddle/paddle:[tag] /bin/bash`): @@ -67,12 +48,12 @@ If you wish to run parameter servers in background, and save a log file, you can $ stdbuf -oL /usr/bin/nohup paddle pserver --port=7164 --ports_num=1 --ports_num_for_sparse=1 --num_gradient_servers=1 &> pserver.log ``` -| param | required | default | description | -| ------------- | ------------- | ------------- | ------------- | -| port | required | 7164 | port which parameter server will listen on. If ports_num greater than 1, parameter server will listen on multiple ports for more network throughput | -| ports_num | required | 1 | total number of ports will listen on | -| ports_num_for_sparse | required | 1 | number of ports which serves sparse parameter update | -| num_gradient_servers | required | 1 | total number of gradient servers | +Parameter Description + +- port: **required, default 7164**, port which parameter server will listen on. If ports_num greater than 1, parameter server will listen on multiple ports for more network throughput. +- ports_num: **required, default 1**, total number of ports will listen on. +- ports_num_for_sparse: **required, default 1**, number of ports which serves sparse parameter update. +- num_gradient_servers: **required, default 1**, total number of gradient servers. ### Starting trainer Type the command below to start the trainer(name the file whatever you want, like "train.py") @@ -111,16 +92,16 @@ paddle.init( pservers="127.0.0.1") ``` -| param | required | default | description | -| ------------- | ------------- | ------------- | ------------- | -| use_gpu | optional | False | set to "True" to enable GPU training | -| trainer_count | required | 1 | total count of trainers in the training job | -| port | required | 7164 | port to connect to parameter server | -| ports_num | required | 1 | number of ports for communication | -| ports_num_for_sparse | required | 1 | number of ports for sparse type caculation | -| num_gradient_servers | required | 1 | total number of gradient server | -| trainer_id | required | 0 | ID for every trainer, start from 0 | -| pservers | required | 127.0.0.1 | list of IPs of parameter servers, separated by "," | +Parameter Description + +- use_gpu: **optional, default False**, set to "True" to enable GPU training. +- trainer_count: **required, default 1**, total count of trainers in the training job. +- port: **required, default 7164**, port to connect to parameter server. +- ports_num: **required, default 1**, number of ports for communication. +- ports_num_for_sparse: **required, default 1**, number of ports for sparse type caculation. +- num_gradient_servers: **required, default 1**, total number of gradient server. +- trainer_id: **required, default 0**, ID for every trainer, start from 0. +- pservers: **required, default 127.0.0.1**, list of IPs of parameter servers, separated by ",". ### Prepare Training Dataset @@ -178,7 +159,7 @@ Your workspace may looks like: - `my_lib.py`: user defined libraries, like PIL libs. This is optional. - `word_dict.pickle`: dict file for training word embeding. -- `train.py`: training program. Sample code: [api_train_v2_cluster.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/prepare.py). ***NOTE:*** You may need to modify the head part of `train.py` when using different cluster platform to retrive configuration environment variables: +- `train.py`: training program. Sample code: [api_train_v2_cluster.py](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py). ***NOTE:*** You may need to modify the head part of `train.py` when using different cluster platform to retrive configuration environment variables: ```python cluster_train_file = "./train_data_dir/train/train.txt" @@ -202,92 +183,9 @@ We'll introduce cluster job management on these platforms. The examples can be f These cluster platforms provide API or environment variables for training processes, when the job is dispatched to different nodes. Like node ID, IP or total number of nodes etc. -### Cluster Training Using Fabric - -#### Prepare a Linux cluster - -Run `kubectl -f ssh_servers.yaml` under the directory: `paddle/scripts/cluster_train_v2/fabric/docker_cluster` will launch a demo cluster. Run `kubectl get po -o wide` to get IP addresses of these nodes. - -#### Launching Cluster Job -`paddle.py` provides automatical scripts to start all PaddlePaddle cluster processes in different nodes. By default, all command line options can be set as `paddle.py` command options and `paddle.py` will transparently and automatically set these options to PaddlePaddle lower level processes. - -`paddle.py`provides two distinguished command option for easy job launching. - -- `job_dispatch_package` set it with local `workspace` directory, it will be dispatched to all nodes which is set in `conf.py`. It could be helpful for frequently manipulating workspace files. otherwise, frequent multi-nodes workspace deployment is very annoying. -- `job_workspace` set it with already deployed workspace directory, `paddle.py` will skip dispatch stage to directly launch cluster job with all nodes. It could help to reduce heavy -dispatch latency. - -`cluster_train/run.sh` provides command line sample to run `demo/recommendation` cluster job, just modify `job_dispatch_package` and `job_workspace` with your defined directory, then: -``` -sh run.sh -``` - -The cluster Job will start in several seconds. - -#### Kill Cluster Job -`paddle.py` can capture `Ctrl + C` SIGINT signal to automatically kill all processes launched by it. So just stop `paddle.py` to kill cluster job. You should manually kill the job if the program crashed. - -#### Check Cluster Training Result -Check log in $workspace/log for details, each node owns same log structure. - -`paddle_trainer.INFO` -It provides almost all internal output log for training, same as local training. Check runtime model convergence here. - -`paddle_pserver2.INFO` -It provides parameter server running log, which could help to diagnose distributed error. - -`server.log` -It provides stderr and stdout of parameter server process. Check error log if training crashes. - -`train.log` -It provides stderr and stdout of trainer process. Check error log if training crashes. - -#### Check Model Output -After one pass finished, model files will be written in `output` directory in node 0. -`nodefile` in workspace indicates the node id of current cluster job. - -### Cluster Training Using OpenMPI - -#### Prepare an OpenMPI cluster - -Run the following command to start a 3-node MPI cluster and one "head" node. - -```bash -cd paddle/scripts/cluster_train_v2/openmpi/docker_cluster -kubectl create -f head.yaml -kubectl create -f mpi-nodes.yaml -``` - -Then you can log in to every OpenMPI node using ssh without input any passwords. - -#### Launching Cluster Job - -Follow the steps to launch a PaddlePaddle training job in OpenMPI cluster:\ - -```bash -# find out node IP addresses -kubectl get po -o wide -# generate a "machines" file containing node IP addresses -kubectl get po -o wide | grep nodes | awk '{print $6}' > machines -# copy necessary files onto "head" node -scp -i ssh/id_rsa.mpi.pub machines prepare.py train.py start_mpi_train.sh tutorial@[headIP]:~ -# login to head node using ssh -ssh -i ssh/id_rsa.mpi.pub tutorial@[headIP] -# --------------- in head node --------------- -# prepare training data -python prepare.py -# copy training data and dict file to MPI nodes -cat machines | xargs -i scp word_dict.pickle train.py start_mpi_train.sh machines {}:/home/tutorial -# creat a directory for storing log files -mpirun -hostfile machines -n 3 mkdir /home/tutorial/logs -# copy training data to every node -scp train.txt-00000 test.txt-00000 [node1IP]:/home/tutorial -scp train.txt-00001 test.txt-00001 [node2IP]:/home/tutorial -scp train.txt-00002 test.txt-00002 [node3IP]:/home/tutorial -# start the job -mpirun -hostfile machines -n 3 /home/tutorial/start_mpi_train.sh -``` - -### Cluster Training Using Kubernetes +## Use different clusters -The details can be found [here](../k8s/k8s_cn.md) + - [fabric](fabric_en.md) + - [openmpi](openmpi_en.md) + - [kubernetes](k8s_en.md) + - [kubernetes on AWS](k8s_aws_en.md) diff --git a/doc/howto/usage/cluster/fabric_cn.md b/doc/howto/usage/cluster/fabric_cn.md new file mode 100644 index 0000000000..0385e401b3 --- /dev/null +++ b/doc/howto/usage/cluster/fabric_cn.md @@ -0,0 +1,42 @@ +# 使用fabric启动集群训练 + +## 准备一个Linux集群 +可以在`paddle/scripts/cluster_train_v2/fabric/docker_cluster`目录下,执行`kubectl -f ssh_servers.yaml`启动一个测试集群,并使用`kubectl get po -o wide`获得这些节点的IP地址。 + +## 启动集群作业 + +`paddle.py` 提供了自动化脚本来启动不同节点中的所有 PaddlePaddle 集群进程。默认情况下,所有命令行选项可以设置为 `paddle.py` 命令选项并且 `paddle.py` 将透明、自动地将这些选项应用到 PaddlePaddle 底层进程。 + +`paddle.py` 为方便作业启动提供了两个独特的命令选项。 + +- `job_dispatch_package` 设为本地 `workspace` 目录,它将被分发到 `conf.py` 中设置的所有节点。它有助于帮助频繁修改和访问工作区文件的用户减少负担,否则频繁的多节点工作空间部署可能会很麻烦。 +- `job_workspace` 设为已部署的工作空间目录,`paddle.py` 将跳过分发阶段直接启动所有节点的集群作业。它可以帮助减少分发延迟。 + +`cluster_train/run.sh` 提供了命令样例来运行 `doc/howto/usage/cluster/src/word2vec` 集群任务,只需用您定义的目录修改 `job_dispatch_package` 和 `job_workspace`,然后: +``` +sh run.sh +``` + +集群作业将会在几秒后启动。 + +## 终止集群作业 +`paddle.py`能获取`Ctrl + C` SIGINT 信号来自动终止它启动的所有进程。只需中断 `paddle.py` 任务来终止集群作业。如果程序崩溃你也可以手动终止。 + +## 检查集群训练结果 +详细信息请检查 $workspace/log 里的日志,每一个节点都有相同的日志结构。 + +`paddle_trainer.INFO` +提供几乎所有训练的内部输出日志,与本地训练相同。这里检验运行时间模型的收敛。 + +`paddle_pserver2.INFO` +提供 pserver 运行日志,有助于诊断分布式错误。 + +`server.log` +提供 parameter server 进程的 stderr 和 stdout。训练失败时可以检查错误日志。 + +`train.log` +提供训练过程的 stderr 和 stdout。训练失败时可以检查错误日志。 + +## 检查模型输出 +运行完成后,模型文件将被写入节点 0 的 `output` 目录中。 +工作空间中的 `nodefile` 表示当前集群作业的节点 ID。 diff --git a/doc/howto/usage/cluster/fabric_en.md b/doc/howto/usage/cluster/fabric_en.md new file mode 100644 index 0000000000..bf270d89ab --- /dev/null +++ b/doc/howto/usage/cluster/fabric_en.md @@ -0,0 +1,43 @@ +# Cluster Training Using Fabric + +## Prepare a Linux cluster + +Run `kubectl -f ssh_servers.yaml` under the directory: `paddle/scripts/cluster_train_v2/fabric/docker_cluster` will launch a demo cluster. Run `kubectl get po -o wide` to get IP addresses of these nodes. + +## Launching Cluster Job +`paddle.py` provides automatical scripts to start all PaddlePaddle cluster processes in different nodes. By default, all command line options can be set as `paddle.py` command options and `paddle.py` will transparently and automatically set these options to PaddlePaddle lower level processes. + +`paddle.py`provides two distinguished command option for easy job launching. + +- `job_dispatch_package` set it with local `workspace` directory, it will be dispatched to all nodes which is set in `conf.py`. It could be helpful for frequently manipulating workspace files. otherwise, frequent multi-nodes workspace deployment is very annoying. +- `job_workspace` set it with already deployed workspace directory, `paddle.py` will skip dispatch stage to directly launch cluster job with all nodes. It could help to reduce heavy +dispatch latency. + +`cluster_train/run.sh` provides command line sample to run `demo/recommendation` cluster job, just modify `job_dispatch_package` and `job_workspace` with your defined directory, then: +``` +sh run.sh +``` + +The cluster Job will start in several seconds. + +## Kill Cluster Job +`paddle.py` can capture `Ctrl + C` SIGINT signal to automatically kill all processes launched by it. So just stop `paddle.py` to kill cluster job. You should manually kill the job if the program crashed. + +## Check Cluster Training Result +Check log in $workspace/log for details, each node owns same log structure. + +`paddle_trainer.INFO` +It provides almost all internal output log for training, same as local training. Check runtime model convergence here. + +`paddle_pserver2.INFO` +It provides parameter server running log, which could help to diagnose distributed error. + +`server.log` +It provides stderr and stdout of parameter server process. Check error log if training crashes. + +`train.log` +It provides stderr and stdout of trainer process. Check error log if training crashes. + +## Check Model Output +After one pass finished, model files will be written in `output` directory in node 0. +`nodefile` in workspace indicates the node id of current cluster job. diff --git a/doc/howto/usage/cluster/k8s_aws_cn.md b/doc/howto/usage/cluster/k8s_aws_cn.md new file mode 120000 index 0000000000..c44cd9a731 --- /dev/null +++ b/doc/howto/usage/cluster/k8s_aws_cn.md @@ -0,0 +1 @@ +k8s_aws_en.md \ No newline at end of file diff --git a/doc/howto/usage/k8s/k8s_aws_en.md b/doc/howto/usage/cluster/k8s_aws_en.md similarity index 98% rename from doc/howto/usage/k8s/k8s_aws_en.md rename to doc/howto/usage/cluster/k8s_aws_en.md index ce72b08038..0dfa8237a3 100644 --- a/doc/howto/usage/k8s/k8s_aws_en.md +++ b/doc/howto/usage/cluster/k8s_aws_en.md @@ -493,7 +493,7 @@ spec: spec: containers: - name: paddle-data - image: paddledev/paddle-tutorial:k8s_data + image: paddlepaddle/paddle-tutorial:k8s_data imagePullPolicy: Always volumeMounts: - mountPath: "/efs" @@ -522,7 +522,7 @@ NAME DESIRED SUCCESSFUL AGE paddle-data 1 1 6m ``` -Data preparation is done by docker image `paddledev/paddle-tutorial:k8s_data`, see [here](src/k8s_data/README.md) for how to build this docker image and source code. +Data preparation is done by docker image `paddlepaddle/paddle-tutorial:k8s_data`, see [here](src/k8s_data/README.md) for how to build this docker image and source code. #### Start Training @@ -545,7 +545,7 @@ spec: claimName: efsvol containers: - name: trainer - image: paddledev/paddle-tutorial:k8s_train + image: paddlepaddle/paddle-tutorial:k8s_train command: ["bin/bash", "-c", "/root/start.sh"] env: - name: JOB_NAME @@ -617,7 +617,7 @@ kubectl --kubeconfig=kubeconfig log -f POD_NAME Run `kubectl --kubeconfig=kubeconfig describe job paddle-cluster-job` to check training job status. It will complete in around 20 minutes. -The details for start `pserver` and `trainer` are hidden inside docker image `paddledev/paddle-tutorial:k8s_train`, see [here](src/k8s_train/README.md) for how to build the docker image and source code. +The details for start `pserver` and `trainer` are hidden inside docker image `paddlepaddle/paddle-tutorial:k8s_train`, see [here](src/k8s_train/README.md) for how to build the docker image and source code. #### Inspect Training Output diff --git a/doc/howto/usage/k8s/k8s_cn.md b/doc/howto/usage/cluster/k8s_cn.md similarity index 83% rename from doc/howto/usage/k8s/k8s_cn.md rename to doc/howto/usage/cluster/k8s_cn.md index ab07cb9cd5..c1a11f7165 100644 --- a/doc/howto/usage/k8s/k8s_cn.md +++ b/doc/howto/usage/cluster/k8s_cn.md @@ -1,21 +1,22 @@ # Kubernetes单机训练 -在这篇文档里,我们介绍如何在 Kubernetes 集群上启动一个单机使用CPU的Paddle训练作业。在下一篇中,我们将介绍如何启动分布式训练作业。 +在这篇文档里,我们介绍如何在 Kubernetes 集群上启动一个单机使用CPU的PaddlePaddle训练作业。在下一篇中,我们将介绍如何启动分布式训练作业。 ## 制作Docker镜像 -在一个功能齐全的Kubernetes机群里,通常我们会安装Ceph等分布式文件系统来存储训练数据。这样的话,一个分布式Paddle训练任务中的每个进程都可以从Ceph读取数据。在这个例子里,我们只演示一个单机作业,所以可以简化对环境的要求,把训练数据直接放在 -Paddle的Docker image里。为此,我们需要制作一个包含训练数据的Paddle镜像。 +在一个功能齐全的Kubernetes机群里,通常我们会安装Ceph等分布式文件系统来存储训练数据。这样的话,一个分布式PaddlePaddle训练任务中 +的每个进程都可以从Ceph读取数据。在这个例子里,我们只演示一个单机作业,所以可以简化对环境的要求,把训练数据直接放在 +PaddlePaddle的Docker Image里。为此,我们需要制作一个包含训练数据的PaddlePaddle镜像。 + +PaddlePaddle的 `paddlepaddle/paddle:cpu-demo-latest` 镜像里有PaddlePaddle的源码与demo, +(请注意,默认的PaddlePaddle生产环境镜像 `paddlepaddle/paddle:latest` 是不包括源码的,PaddlePaddle的各版本镜像可以参考 +[Docker Installation Guide](http://paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/docker_install_cn.html)), +下面我们使用这个镜像来下载数据到Docker Container中,并把这个包含了训练数据的Container保存为一个新的镜像。 -Paddle 的 [Quick Start Tutorial](http://www.paddlepaddle.org/doc/demo/quick_start/index_en.html) -里介绍了用Paddle源码中的脚本下载训练数据的过程。 -而 `paddledev/paddle:cpu-demo-latest` 镜像里有 Paddle 源码与demo,( 请注意,默认的 -Paddle镜像 `paddledev/paddle:cpu-latest` 是不包括源码的, Paddle的各版本镜像可以参考 [Docker installation guide](http://www.paddlepaddle.org/doc/build/docker_install.html) ),所以我们使用这个镜像来下载训练数据到Docker container中,然后把这个包含了训练数据的container保存为一个新的镜像。 - ### 运行容器 ``` -$ docker run --name quick_start_data -it paddledev/paddle:cpu-demo-latest +$ docker run --name quick_start_data -it paddlepaddle/paddle:cpu-demo-latest ``` ### 下载数据 @@ -103,7 +104,7 @@ spec: restartPolicy: Never ``` -### 创建Paddle Job +### 创建PaddlePaddle Job 使用上文创建的yaml文件创建Kubernetes Job,命令为: diff --git a/doc/howto/usage/k8s/k8s_distributed_cn.md b/doc/howto/usage/cluster/k8s_distributed_cn.md similarity index 88% rename from doc/howto/usage/k8s/k8s_distributed_cn.md rename to doc/howto/usage/cluster/k8s_distributed_cn.md index a9bebf0955..167089b807 100644 --- a/doc/howto/usage/k8s/k8s_distributed_cn.md +++ b/doc/howto/usage/cluster/k8s_distributed_cn.md @@ -1,8 +1,6 @@ # Kubernetes分布式训练 -前一篇文章介绍了如何在Kubernetes集群上启动一个单机PaddlePaddle训练作业 (Job)。在这篇文章里,我们介绍如何在Kubernetes集群上进行分布式PaddlePaddle训练作业。关于PaddlePaddle的分布式训练,文章 [Cluster Training](https://github.com/baidu/Paddle/blob/develop/doc/cluster/opensource/cluster_train.md)介绍了一种通过SSH远程分发任务,进行分布式训练的方法,与此不同的是,本文将介绍在Kubernetes容器管理平台上快速构建PaddlePaddle容器集群,进行分布式训练的方案。 - -有关Kubernetes相关概念以及如何搭建和配置Kubernetes集群,可以参考[k8s_basis](./k8s_basis_cn.md)。 +前一篇文章介绍了如何在Kubernetes集群上启动一个单机PaddlePaddle训练作业 (Job)。在这篇文章里,我们介绍如何在Kubernetes集群上进行分布式PaddlePaddle训练作业。关于PaddlePaddle的分布式训练,文章 [Cluster Training](http://www.paddlepaddle.org/docs/develop/documentation/zh/howto/usage/cluster/cluster_train_cn.html)介绍了一种通过SSH远程分发任务,进行分布式训练的方法,与此不同的是,本文将介绍在Kubernetes容器管理平台上快速构建PaddlePaddle容器集群,进行分布式训练的方案。 ## 整体方案 @@ -28,7 +26,7 @@ PaddlePaddle镜像需要提供`paddle pserver`与`paddle train`进程的运行 - 拷贝训练文件到容器内 - 生成`paddle pserver`与`paddle train`进程的启动参数,并且启动训练 -因为官方镜像 `paddledev/paddle:cpu-latest` 内已经包含PaddlePaddle的执行程序但是还没上述功能,所以我们可以在这个基础上,添加启动脚本,制作新镜像来完成以上的工作。参考镜像的[*Dockerfile*](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/usage/cluster/k8s/src/k8s_train/Dockerfile)。 +因为官方镜像 `paddlepaddle/paddle:latest` 内已经包含PaddlePaddle的执行程序但是还没上述功能,所以我们可以在这个基础上,添加启动脚本,制作新镜像来完成以上的工作。参考镜像的[*Dockerfile*](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/usage/cluster/src/k8s_train/Dockerfile)。 ```bash $ cd doc/howto/usage/k8s/src/k8s_train @@ -62,7 +60,7 @@ spec: hostNetwork: true containers: - name: paddle-data - image: paddledev/paddle-tutorial:k8s_data + image: paddlepaddle/paddle-tutorial:k8s_data imagePullPolicy: Always volumeMounts: - mountPath: "/mnt" @@ -149,20 +147,19 @@ spec: 文件中,`metadata`下的`name`表示这个job的名字。`parallelism,completions`字段表示这个job会同时开启3个PaddlePaddle节点,成功训练且退出的pod数目为3时,这个job才算成功结束。然后申明一个存储卷`jobpath`,代表宿主机目录`/home/work/mfs`,在对容器的描述`containers`字段中,将此目录挂载为容器的`/home/jobpath`目录,这样容器的`/home/jobpath`目录就成为了共享存储,放在这个目录里的文件其实是保存到了MFS上。 -`env`字段表示容器的环境变量,我们将`paddle`运行的一些参数通过这种方式传递到容器内。 +`env`字段表示容器的环境变量,我们将`paddle`运行的一些参数通过这种方式传递到容器内: + -环境变量 | 说明 ---- | --- -JOB_PATH | 共享存储挂在的路径 -JOB_NAME | Job的名字 -TRAIN_CONFIG_DIR | 本次训练文件所在目录,与JOB_PATH,JOB_NAME组合可以找到本次训练需要的文件路径 -CONF_PADDLE_NIC | `paddle pserver`进程需要的`--nics`参数,即网卡名 -CONF_PADDLE_PORT | `paddle paserver`的`--port`参数 -CONF_PADDLE_PORTS_NUM | 稠密更新的端口数量,即`--ports_num`参数 -CONF_PADDLE_PORTS_NUM_SPARSE | 稀疏更新的端口数量,即`--ports_num_for_sparse`参数 -CONF_PADDLE_GRADIENT_NUM | 训练节点数量,即`--num_gradient_servers参数` +- JOB_PATH:共享存储挂在的路径 +- JOB_NAME:Job的名字 +- TRAIN_CONFIG_DIR:本次训练文件所在目录,与JOB_PATH,JOB_NAME组合可以找到本次训练需要的文件路径 +- CONF_PADDLE_NIC:`paddle pserver`进程需要的`--nics`参数,即网卡名 +- CONF_PADDLE_PORT:`paddle paserver`的`--port`参数 +- CONF_PADDLE_PORTS_NUM:稠密更新的端口数量,即`--ports_num`参数 +- CONF_PADDLE_PORTS_NUM_SPARSE:稀疏更新的端口数量,即`--ports_num_for_sparse`参数 +- CONF_PADDLE_GRADIENT_NUM:训练节点数量,即`--num_gradient_servers参数` -这些参数的具体描述,读者可以查看[这里](http://www.paddlepaddle.org/doc/ui/cmd_argument/detail_introduction.html#parameter-server-and-distributed-communication)。 +这些参数的具体描述,读者可以查看[这里](http://www.paddlepaddle.org/docs/develop/documentation/zh/howto/usage/cmd_parameter/detail_introduction_cn.html)。 编写完YAML文件后,可以使用Kubernetes的命令行工具创建job。 diff --git a/doc/howto/usage/k8s/k8s_en.md b/doc/howto/usage/cluster/k8s_en.md similarity index 79% rename from doc/howto/usage/k8s/k8s_en.md rename to doc/howto/usage/cluster/k8s_en.md index 0c3ab05b70..c374f00a49 100644 --- a/doc/howto/usage/k8s/k8s_en.md +++ b/doc/howto/usage/cluster/k8s_en.md @@ -1,18 +1,27 @@ -# Paddle On Kubernetes +# PaddlePaddle On Kubernetes ->In this article, we will introduce how to run Paddle training job on single CPU machine using Kubernetes. In next article, we will introduce how to run Paddle training job on distributed cluster. +In this article, we will introduce how to run PaddlePaddle training job on single CPU machine using Kubernetes. In next article, we will introduce how to run PaddlePaddle training job on distributed cluster. ## Build Docker Image -In distributed Kubernetes cluster, we will use Ceph or other shared storage system for storing training related data so that all processes in Paddle training can retrieve data from Ceph. In this example, we will only demo training job on single machine. In order to simplify the requirement of the environment, we will directly put training data into Paddle's Docker Image, so we need to create a Paddle Docker image that already includes the training data. +In distributed Kubernetes cluster, we will use Ceph or other distributed +storage system for storing training related data so that all processes in +PaddlePaddle training can retrieve data from Ceph. In this example, we will +only demo training job on single machine. In order to simplify the requirement +of the environment, we will directly put training data into the PaddlePaddle Docker Image, +so we need to create a PaddlePaddle Docker image that includes the training data. + +The production Docker Image `paddlepaddle/paddle:cpu-demo-latest` has the PaddlePaddle +source code and demo. (Caution: Default PaddlePaddle Docker Image `paddlepaddle/paddle:latest` doesn't include +the source code, PaddlePaddle's different versions of Docker Image can be referred here: +[Docker Installation Guide](http://paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/docker_install_en.html)), +so we run this Docker Image and download the training data, and then commit the whole +Container to be a new Docker Image. -Paddle's [Quick Start Tutorial](http://www.paddlepaddle.org/doc/demo/quick_start/index_en.html) introduces how to download and train data by using script from Paddle's source code. -And `paddledev/paddle:cpu-demo-latest` image has the Paddle source code and demo. (Caution: Default Paddle image `paddledev/paddle:cpu-latest` doesn't include the source code, Paddle's different versions of image can be referred here: [Docker installation guide](http://www.paddlepaddle.org/doc/build/docker_install.html)), so we run this container and download the training data, and then commit the whole container to be a new Docker image. - ### Run Docker Container ``` -$ docker run --name quick_start_data -it paddledev/paddle:cpu-demo-latest +$ docker run --name quick_start_data -it paddlepaddle/paddle:cpu-demo-latest ``` ### Download Training Data @@ -67,7 +76,7 @@ $ docker commit quick_start_data mypaddle/paddle:quickstart ## Use Kubernetes For Training ->We will use Kubernetes job for training process, following steps shows how to do the training with Kubernetes. +We will use Kubernetes job for training process, following steps shows how to do the training with Kubernetes. ### Create Yaml Files @@ -99,7 +108,7 @@ spec: restartPolicy: Never ``` -### Start Paddle Job +### Start PaddlePaddle Job Using the above yaml file to start the Kubernetes job. diff --git a/doc/howto/usage/cluster/openmpi_cn.md b/doc/howto/usage/cluster/openmpi_cn.md new file mode 100644 index 0000000000..831cafdc03 --- /dev/null +++ b/doc/howto/usage/cluster/openmpi_cn.md @@ -0,0 +1,41 @@ +# 在OpenMPI集群中提交训练作业 + +## 准备OpenMPI集群 + +执行下面的命令以启动3个节点的OpenMPI集群和一个"head"节点: + +```bash +paddle/scripts/cluster_train_v2/openmpi/docker_cluster +kubectl create -f head.yaml +kubectl create -f mpi-nodes.yaml +``` + +然后可以从head节点ssh无密码登录到OpenMPI的每个节点上。 + +## 启动集群作业 + +您可以按照下面的步骤在OpenMPI集群中提交paddle训练任务: + +```bash +# 获得head和node节点的IP地址 +kubectl get po -o wide +# 将node节点的IP地址保存到machines文件中 +kubectl get po -o wide | grep nodes | awk '{print $6}' > machines +# 拷贝必要的文件到head节点 +scp -i ssh/id_rsa.mpi.pub machines prepare.py train.py start_mpi_train.sh tutorial@[headIP]:~ +# ssh 登录到head节点 +ssh -i ssh/id_rsa.mpi.pub tutorial@[headIP] +# --------------- 以下操作均在head节点中执行 --------------- +# 准备训练数据 +python prepare.py +# 拷贝训练程序和字典文件到每台MPI节点 +cat machines | xargs -i scp word_dict.pickle train.py start_mpi_train.sh machines {}:/home/tutorial +# 创建日志目录 +mpirun -hostfile machines -n 3 mkdir /home/tutorial/logs +# 拷贝训练数据到各自的节点 +scp train.txt-00000 test.txt-00000 [node1IP]:/home/tutorial +scp train.txt-00001 test.txt-00001 [node2IP]:/home/tutorial +scp train.txt-00002 test.txt-00002 [node3IP]:/home/tutorial +# 启动训练任务 +mpirun -hostfile machines -n 3 /home/tutorial/start_mpi_train.sh +``` diff --git a/doc/howto/usage/cluster/openmpi_en.md b/doc/howto/usage/cluster/openmpi_en.md new file mode 100644 index 0000000000..09af46e25e --- /dev/null +++ b/doc/howto/usage/cluster/openmpi_en.md @@ -0,0 +1,41 @@ +# Cluster Training Using OpenMPI + +## Prepare an OpenMPI cluster + +Run the following command to start a 3-node MPI cluster and one "head" node. + +```bash +cd paddle/scripts/cluster_train_v2/openmpi/docker_cluster +kubectl create -f head.yaml +kubectl create -f mpi-nodes.yaml +``` + +Then you can log in to every OpenMPI node using ssh without input any passwords. + +## Launching Cluster Job + +Follow the steps to launch a PaddlePaddle training job in OpenMPI cluster:\ + +```bash +# find out node IP addresses +kubectl get po -o wide +# generate a "machines" file containing node IP addresses +kubectl get po -o wide | grep nodes | awk '{print $6}' > machines +# copy necessary files onto "head" node +scp -i ssh/id_rsa.mpi.pub machines prepare.py train.py start_mpi_train.sh tutorial@[headIP]:~ +# login to head node using ssh +ssh -i ssh/id_rsa.mpi.pub tutorial@[headIP] +# --------------- in head node --------------- +# prepare training data +python prepare.py +# copy training data and dict file to MPI nodes +cat machines | xargs -i scp word_dict.pickle train.py start_mpi_train.sh machines {}:/home/tutorial +# creat a directory for storing log files +mpirun -hostfile machines -n 3 mkdir /home/tutorial/logs +# copy training data to every node +scp train.txt-00000 test.txt-00000 [node1IP]:/home/tutorial +scp train.txt-00001 test.txt-00001 [node2IP]:/home/tutorial +scp train.txt-00002 test.txt-00002 [node3IP]:/home/tutorial +# start the job +mpirun -hostfile machines -n 3 /home/tutorial/start_mpi_train.sh +``` diff --git a/doc/howto/usage/k8s/src/Dockerfile b/doc/howto/usage/cluster/src/Dockerfile similarity index 54% rename from doc/howto/usage/k8s/src/Dockerfile rename to doc/howto/usage/cluster/src/Dockerfile index 3a73606c61..e178bf4da0 100644 --- a/doc/howto/usage/k8s/src/Dockerfile +++ b/doc/howto/usage/cluster/src/Dockerfile @@ -1,4 +1,4 @@ -FROM paddledev/paddle:cpu-latest +FROM paddlepaddle/paddle:latest MAINTAINER zjsxzong89@gmail.com diff --git a/doc/howto/usage/k8s/src/add_security_group.png b/doc/howto/usage/cluster/src/add_security_group.png similarity index 100% rename from doc/howto/usage/k8s/src/add_security_group.png rename to doc/howto/usage/cluster/src/add_security_group.png diff --git a/doc/howto/usage/k8s/src/create_efs.png b/doc/howto/usage/cluster/src/create_efs.png similarity index 100% rename from doc/howto/usage/k8s/src/create_efs.png rename to doc/howto/usage/cluster/src/create_efs.png diff --git a/doc/howto/usage/k8s/src/efs_mount.png b/doc/howto/usage/cluster/src/efs_mount.png similarity index 100% rename from doc/howto/usage/k8s/src/efs_mount.png rename to doc/howto/usage/cluster/src/efs_mount.png diff --git a/doc/howto/usage/cluster/src/k8s-paddle-arch.png b/doc/howto/usage/cluster/src/k8s-paddle-arch.png new file mode 100644 index 0000000000..b3800c4fe8 Binary files /dev/null and b/doc/howto/usage/cluster/src/k8s-paddle-arch.png differ diff --git a/doc/howto/usage/k8s/src/k8s_data/Dockerfile b/doc/howto/usage/cluster/src/k8s_data/Dockerfile similarity index 100% rename from doc/howto/usage/k8s/src/k8s_data/Dockerfile rename to doc/howto/usage/cluster/src/k8s_data/Dockerfile diff --git a/doc/howto/usage/k8s/src/k8s_data/README.md b/doc/howto/usage/cluster/src/k8s_data/README.md similarity index 100% rename from doc/howto/usage/k8s/src/k8s_data/README.md rename to doc/howto/usage/cluster/src/k8s_data/README.md diff --git a/doc/howto/usage/k8s/src/k8s_data/get_data.sh b/doc/howto/usage/cluster/src/k8s_data/get_data.sh similarity index 100% rename from doc/howto/usage/k8s/src/k8s_data/get_data.sh rename to doc/howto/usage/cluster/src/k8s_data/get_data.sh diff --git a/doc/howto/usage/k8s/src/k8s_train/Dockerfile b/doc/howto/usage/cluster/src/k8s_train/Dockerfile similarity index 77% rename from doc/howto/usage/k8s/src/k8s_train/Dockerfile rename to doc/howto/usage/cluster/src/k8s_train/Dockerfile index c0fca1f9a9..77f021a89a 100644 --- a/doc/howto/usage/k8s/src/k8s_train/Dockerfile +++ b/doc/howto/usage/cluster/src/k8s_train/Dockerfile @@ -1,4 +1,4 @@ -FROM paddledev/paddle:cpu-latest +FROM paddlepaddle/paddle:latest COPY start.sh /root/ COPY start_paddle.py /root/ diff --git a/doc/howto/usage/k8s/src/k8s_train/README.md b/doc/howto/usage/cluster/src/k8s_train/README.md similarity index 100% rename from doc/howto/usage/k8s/src/k8s_train/README.md rename to doc/howto/usage/cluster/src/k8s_train/README.md diff --git a/doc/howto/usage/k8s/src/k8s_train/start.sh b/doc/howto/usage/cluster/src/k8s_train/start.sh similarity index 100% rename from doc/howto/usage/k8s/src/k8s_train/start.sh rename to doc/howto/usage/cluster/src/k8s_train/start.sh diff --git a/doc/howto/usage/k8s/src/k8s_train/start_paddle.py b/doc/howto/usage/cluster/src/k8s_train/start_paddle.py similarity index 100% rename from doc/howto/usage/k8s/src/k8s_train/start_paddle.py rename to doc/howto/usage/cluster/src/k8s_train/start_paddle.py diff --git a/doc/howto/usage/k8s/src/managed_policy.png b/doc/howto/usage/cluster/src/managed_policy.png similarity index 100% rename from doc/howto/usage/k8s/src/managed_policy.png rename to doc/howto/usage/cluster/src/managed_policy.png diff --git a/doc/howto/usage/k8s/src/pserver_and_trainer.png b/doc/howto/usage/cluster/src/pserver_and_trainer.png similarity index 100% rename from doc/howto/usage/k8s/src/pserver_and_trainer.png rename to doc/howto/usage/cluster/src/pserver_and_trainer.png diff --git a/doc/howto/usage/k8s/src/route53_create_recordset.png b/doc/howto/usage/cluster/src/route53_create_recordset.png similarity index 100% rename from doc/howto/usage/k8s/src/route53_create_recordset.png rename to doc/howto/usage/cluster/src/route53_create_recordset.png diff --git a/doc/howto/usage/k8s/src/route53_create_zone.png b/doc/howto/usage/cluster/src/route53_create_zone.png similarity index 100% rename from doc/howto/usage/k8s/src/route53_create_zone.png rename to doc/howto/usage/cluster/src/route53_create_zone.png diff --git a/doc/howto/usage/k8s/src/worker_security_group.png b/doc/howto/usage/cluster/src/worker_security_group.png similarity index 100% rename from doc/howto/usage/k8s/src/worker_security_group.png rename to doc/howto/usage/cluster/src/worker_security_group.png diff --git a/doc/howto/usage/k8s/k8s_basis_cn.md b/doc/howto/usage/k8s/k8s_basis_cn.md deleted file mode 100644 index 4c3dc81ed3..0000000000 --- a/doc/howto/usage/k8s/k8s_basis_cn.md +++ /dev/null @@ -1,75 +0,0 @@ -# Kubernetes 简介 - -[*Kubernetes*](http://kubernetes.io/)是Google开源的容器集群管理系统,其提供应用部署、维护、扩展机制等功能,利用Kubernetes能方便地管理跨机器运行容器化的应用。Kubernetes可以在物理机或虚拟机上运行,且支持部署到[AWS](http://kubernetes.io/docs/getting-started-guides/aws),[Azure](http://kubernetes.io/docs/getting-started-guides/azure/),[GCE](http://kubernetes.io/docs/getting-started-guides/gce)等多种公有云环境。介绍分布式训练之前,需要对[Kubernetes](http://kubernetes.io/)有一个基本的认识,下面先简要介绍一下本文用到的几个Kubernetes概念。 - -- [*Node*](http://kubernetes.io/docs/admin/node/) 表示一个Kubernetes集群中的一个工作节点,这个节点可以是物理机或者虚拟机,Kubernetes集群就是由node节点与master节点组成的。 - -- [*Pod*](http://kubernetes.io/docs/user-guide/pods/) 是一组(一个或多个)容器,pod是Kubernetes的最小调度单元,一个pod中的所有容器会被调度到同一个node上。Pod中的容器共享NET,PID,IPC,UTS等Linux namespace。由于容器之间共享NET namespace,所以它们使用同一个IP地址,可以通过*localhost*互相通信。不同pod之间可以通过IP地址访问。 - -- [*Job*](http://kubernetes.io/docs/user-guide/jobs/) 描述Kubernetes上运行的作业,一次作业称为一个job,通常每个job包括一个或者多个pods,job启动后会创建这些pod并开始执行一个程序,等待这个程序执行成功并返回0则成功退出,如果执行失败,也可以配置不同的重试机制。 - -- [*Volume*](http://kubernetes.io/docs/user-guide/volumes/) 存储卷,是pod内的容器都可以访问的共享目录,也是容器与node之间共享文件的方式,因为容器内的文件都是暂时存在的,当容器因为各种原因被销毁时,其内部的文件也会随之消失。通过volume,就可以将这些文件持久化存储。Kubernetes支持多种volume,例如hostPath(宿主机目录),gcePersistentDisk,awsElasticBlockStore等。 - -- [*Namespaces*](https://kubernetes.io/docs/user-guide/namespaces/) 命名空间,在kubernetes中创建的所有资源对象(例如上文的pod,job)等都属于一个命名空间,在同一个命名空间中,资源对象的名字是唯一的,不同空间的资源名可以重复,命名空间主要为了对象进行逻辑上的分组便于管理。本文只使用了默认命名空间。 - -- [*PersistentVolume*](https://kubernetes.io/docs/user-guide/persistent-volumes/): 和[*PersistentVolumeClaim*](https://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims)结合,将外部的存储服务在Kubernetes中描述成为统一的资源形式,便于存储资源管理和Pod引用。 - -## 部署Kubernetes集群 - -Kubernetes提供了多种集群部署的方案,本文档内不重复介绍。这里给出集中常见的部署方法: - -- [*minikube*](https://kubernetes.io/docs/getting-started-guides/minikube/): 快速在本地启动一个单机的kubernetes服务器,便于本地验证和测试。 -- [*kubeadm*](http://kubernetes.io/docs/getting-started-guides/kubeadm/): 在不同操作系统,不同主机(Bare-Metal, AWS, GCE)条件下,快速部署集群。 -- [*AWS EC2*](https://kubernetes.io/docs/getting-started-guides/aws/): 在aws上快速部署集群。 -- [*Bare-Metal*](https://kubernetes.io/docs/getting-started-guides/centos/centos_manual_config/): 在物理机上手动部署。 - -可以参考[这个表格](https://kubernetes.io/docs/getting-started-guides/#table-of-solutions)选择适合您的场景的合适方案。 - -## 选择存储方案 - -容器不会保留在运行时生成的数据,job或者应用程序在容器中运行时生成的数据会在容器销毁时消失。为了完成分布式机器学习训练任务,需要有一个外部的存储服务来保存训练所需数据和训练输出。 -常见的可选存储服务包括: - -- [*NFS*](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/nfs): 可以将磁盘上某个目录共享给网络中其他机器访问。部署和配置比较简单,可以用于小量数据的验证。不提供分布式存储,高可用,冗余等功能。NFS的部署方法可以参考[这里](http://www.tecmint.com/how-to-setup-nfs-server-in-linux/)。 -- [*GlusterFS*](http://gluster.readthedocs.io/en/latest/Quick-Start-Guide/Quickstart/): 网络分布式文件系统,可以在Kubernetes中按照[这个](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/glusterfs)例子使用。 -- [*Ceph*](http://docs.ceph.com/docs/master/): 分布式文件系统,支持rbd,POSIX API接口(ceph fs)和对象存储API,参考[这里](https://kubernetes.io/docs/user-guide/volumes/#rbd)。 -- [*MooseFS*](https://moosefs.com/documentation.html): 一个分布式的存储系统。需要先挂载到服务器Node上再通过kubernetes hostPath Volume挂载到容器中。 - -## 配置kubectl - -### 安装kubectl -``` -# OS X -curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl - -# Linux -curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl - -# Windows -curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/windows/amd64/kubectl.exe -``` - -### 配置kubectl访问你的kubernetes集群 - -编辑`~/.kube/config`这个配置文件,修改`Master-IP`的地址。如果使用SSL认证,则需要配置`certificate-authority`和`users`中的用户证书。如果是使用非SSL方式访问(比如通过8080端口),也可以去掉这些证书的配置。 -``` -apiVersion: v1 -clusters: -- cluster: - certificate-authority: /path/to/ca.crt - server: https://[Master-IP]:443 - name: minikube -contexts: -- context: - cluster: minikube - user: minikube - name: minikube -current-context: minikube -kind: Config -preferences: {} -users: -- name: minikube - user: - client-certificate: /path/to/apiserver.crt - client-key: /Users/wuyi/.minikube/apiserver.key -``` diff --git a/doc/howto/usage/k8s/src/k8s-paddle-arch.png b/doc/howto/usage/k8s/src/k8s-paddle-arch.png deleted file mode 100644 index 2183a232ad..0000000000 Binary files a/doc/howto/usage/k8s/src/k8s-paddle-arch.png and /dev/null differ diff --git a/doc/mobile/cross_compiling_for_ios_cn.md b/doc/mobile/cross_compiling_for_ios_cn.md index 9da48e7f21..d5196d9a4c 100644 --- a/doc/mobile/cross_compiling_for_ios_cn.md +++ b/doc/mobile/cross_compiling_for_ios_cn.md @@ -18,11 +18,11 @@ PaddlePaddle为交叉编译提供了工具链配置文档[cmake/cross_compiling/ - `CMAKE_SYSTEM_NAME`,CMake编译的目标平台,必须设置为`iOS`。在设置`CMAKE_SYSTEM_NAME=iOS`后,PaddlePaddle的CMake系统会自动编译所有的第三方依赖库,并且强制设置一些PaddlePaddle参数的值(`WITH_C_API=ON`、`WITH_GPU=OFF`、`WITH_AVX=OFF`、`WITH_PYTHON=OFF`、`WITH_RDMA=OFF`)。 - `WITH_C_API`,是否编译C-API预测库,必须设置为ON。在iOS平台上只支持使用C-API来预测。 -- `WITH_SWIG_PY`,必须设置为ON。在iOS平台上不支持通过swig调用来训练或者预测。 +- `WITH_SWIG_PY`,必须设置为`OFF`。在iOS平台上不支持通过swig调用来训练或者预测。 iOS平台可选配置参数: -- `IOS_PLATFORM`,可设置为`OS/SIMULATOR`,默认值为`OS`。 +- `IOS_PLATFORM`,可设置为`OS`(默认值)或`SIMULATOR`。 - `OS`,构建目标为`arm`架构的iPhone或者iPad等物理设备。 - `SIMULATOR`,构建目标为`x86`架构的模拟器平台。 - `IOS_ARCH`,目标架构。针对不同的`IOS_PLATFORM`,可设置的目标架构如下表所示,默认编译所有架构: diff --git a/doc/mobile/cross_compiling_for_ios_en.md b/doc/mobile/cross_compiling_for_ios_en.md new file mode 100644 index 0000000000..aa390cd61f --- /dev/null +++ b/doc/mobile/cross_compiling_for_ios_en.md @@ -0,0 +1,120 @@ +# PaddlePaddle Compiling Guide for iOS + +This tutorial will walk you through cross compiling the PaddlePaddle library for iOS from the source in MacOS. + +## Preparation + +Apple provides Xcode for cross-compiling and IDE for iOS development. Download from App store or [here](https://developer.apple.com/cn/xcode/). To verify your installation, run command as follows + +```bash +$ xcodebuild -version +Xcode 9.0 +Build version 9A235 +``` + +## Cross-compiling configurations + +PaddlePaddle provides cross-compiling toolchain configuration documentation [cmake/cross_compiling/ios.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/cross_compiling/ios.cmake), which has some default settings for frequently used compilers. + +There are some mandatory environment variables need to be set before cross compiling PaddlePaddle for iOS: + +- `CMAKE_SYSTEM_NAME`, CMake compiling target platform name, has to be `iOS`. PaddlePaddle CMake will compile all the third party dependencies and enforce some parameters (`WITH_C_API=ON`, `WITH_GPU=OFF`, `WITH_AVX=OFF`, `WITH_PYTHON=OFF`,`WITH_RDMA=OFF`) when this variable is set with value `iOS`. + +- `WITH_C_API`, Whether to compile inference C-API library, has to be `ON`, since C-API is the only supported interface for inferencing in iOS. +- `WITH_SWIG_PY`, has to be `OFF`. It's not supported to inference or train via swig in iOS. + +Optional environment variables for iOS are: + +- `IOS_PLATFORM`, either `OS` (default) or `SIMULATOR`. + - `OS`, build targets ARM-based physical devices like iPhone or iPad. + - `SIMULATOR`, build targets x86 architecture simulators. +- `IOS_ARCH`, target architecture. By default, all architecture types will be compiled. If you need to specify the architecture to compile for, please find valid values for different `IOS_PLATFORM` settings from the table below: + + + + + + + + + + + + + + + + + + + + + + +
IOS_PLATFORMIOS_ARCH
OSarmv7, armv7s, arm64
SIMULATORi386, x86_64
+ +- `IOS_DEPLOYMENT_TARGET`, minimum iOS version to deployment, `7.0` by default. +- `IOS_ENABLE_BITCODE`, whether to enable [Bitcode](https://developer.apple.com/library/content/documentation/IDEs/Conceptual/AppDistributionGuide/AppThinning/AppThinning.html#//apple_ref/doc/uid/TP40012582-CH35-SW3), values can be `ON/OFF`, `ON` by default. +- `IOS_USE_VECLIB_FOR_BLAS`, whether to use [vecLib](https://developer.apple.com/documentation/accelerate/veclib) framework for BLAS computing. values can be `ON/OFF`, `OFF` by default. +- `IOS_DEVELOPMENT_ROOT`, the path to `Developer` directory, can be explicitly set with your `/path/to/platform/Developer`. If left blank, PaddlePaddle will automatically pick the Xcode corresponding `platform`'s `Developer` directory based on your `IOS_PLATFORM` value. +- `IOS_SDK_ROOT`, the path to `SDK` root, can be explicitly set with your `/path/to/platform/Developer/SDKs/SDK`. if left black, PaddlePaddle will pick the latest SDK in the directory of `IOS_DEVELOPMENT_ROOT`. + +other settings: + +- `USE_EIGEN_FOR_BLAS`, whether to use Eigen for matrix computing. effective when `IOS_USE_VECLIB_FOR_BLAS=OFF`. Values can be `ON/OFF`, `OFF` by default. +- `HOST_C/CXX_COMPILER`, host C/C++ compiler. Uses value from environment variable `CC/CXX` by default or `cc/c++` if `CC/CXX` doesn't exist. + +some typical cmake configurations: + +```bash +cmake -DCMAKE_SYSTEM_NAME=iOS \ + -DIOS_PLATFORM=OS \ + -DIOS_ARCH="armv7;arm64" \ + -DIOS_ENABLE_BITCODE=ON \ + -DIOS_USE_VECLIB_FOR_BLAS=ON \ + -DCMAKE_INSTALL_PREFIX=your/path/to/install \ + -DWITH_C_API=ON \ + -DWITH_TESTING=OFF \ + -DWITH_SWIG_PY=OFF \ + .. +``` + +```bash +cmake -DCMAKE_SYSTEM_NAME=iOS \ + -DIOS_PLATFORM=SIMULATOR \ + -DIOS_ARCH="x86_64" \ + -DIOS_USE_VECLIB_FOR_BLAS=ON \ + -DCMAKE_INSTALL_PREFIX=your/path/to/install \ + -DWITH_C_API=ON \ + -DWITH_TESTING=OFF \ + -DWITH_SWIG_PY=OFF \ + .. +``` + +You can set other compiling parameters for your own need. I.E. if you are trying to minimize the library size, set `CMAKE_BUILD_TYPE` with `MinSizeRel`; or if the performance is your concern, set `CMAKE_BUILD_TYPE` with `Release`. You can even manipulate the PaddlePaddle compiling procedure by manually set `CMAKE_C/CXX_FLAGS` values. + +**TIPS for a better performance**: + +- set `CMAKE_BUILD_TYPE` with `Release` +- set `IOS_USE_VECLIB_FOR_BLAS` with `ON` + +## Compile and install + +After CMake, run following commands, PaddlePaddle will download the compile 3rd party dependencies, compile and install PaddlePaddle inference library. + +``` +$ make +$ make install +``` + +Please Note: if you compiled PaddlePaddle in the source directory for other platforms, do remove `third_party` and `build` directory within the source with `rm -rf` to ensure that all the 3rd party libraries dependencies and PaddlePaddle is newly compiled with current CMake configuration. + +`your/path/to/install` directory will have following directories after `compile` and `install`: + +- `include`, contains all the C-API header files. +- `lib`, contains PaddlePaddle C-API static library. +- `third_party` contains all the 3rd party libraries. + +Please note: if PaddlePaddle library need to support both physical devices and simulators, you will need to compile correspondingly, then merge fat library with `lipo`. + +Now you will have PaddlePaddle library compiled and installed, the fat library can be used in deep learning related iOS APPs. Please refer to C-API documentation for usage guides. diff --git a/doc/mobile/index_en.rst b/doc/mobile/index_en.rst index 3c08d73671..ef421dacad 100644 --- a/doc/mobile/index_en.rst +++ b/doc/mobile/index_en.rst @@ -5,4 +5,5 @@ MOBILE :maxdepth: 1 cross_compiling_for_android_en.md + cross_compiling_for_ios_en.md cross_compiling_for_raspberry_en.md diff --git a/go/pserver/client/c/test/test_cclient.c b/go/pserver/client/c/test/test_cclient.c index 89c4d7f00a..05ec421fff 100644 --- a/go/pserver/client/c/test/test_cclient.c +++ b/go/pserver/client/c/test/test_cclient.c @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include diff --git a/paddle/capi/error.cpp b/paddle/capi/error.cpp index 169b65f921..96ce31b45f 100644 --- a/paddle/capi/error.cpp +++ b/paddle/capi/error.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "error.h" -const char* paddle_error_string(paddle_error err) { +extern "C" const char* paddle_error_string(paddle_error err) { switch (err) { case kPD_NULLPTR: return "nullptr error"; diff --git a/paddle/capi/error.h b/paddle/capi/error.h index 9d9d0ed63a..2da9e0a3ef 100644 --- a/paddle/capi/error.h +++ b/paddle/capi/error.h @@ -29,9 +29,17 @@ typedef enum { kPD_UNDEFINED_ERROR = -1, } paddle_error; +#ifdef __cplusplus +extern "C" { +#endif + /** * Error string for Paddle API. */ PD_API const char* paddle_error_string(paddle_error err); +#ifdef __cplusplus +} +#endif + #endif diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 4b0eff3adb..738684795d 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -21,6 +21,8 @@ cc_test(variable_test SRCS variable_test.cc) cc_library(scope SRCS scope.cc DEPS glog) cc_test(scope_test SRCS scope_test.cc DEPS scope) +cc_library(data_transform SRCS data_transform.cc DEPS tensor framework_proto) +cc_test(data_transform_test SRCS data_transform_test.cc DEPS data_transform device_context) cc_library(attribute SRCS attribute.cc DEPS framework_proto) cc_test(program_desc_test SRCS program_desc_test.cc DEPS proto_desc @@ -29,12 +31,13 @@ cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(shape_inference SRCS shape_inference.cc DEPS ddim attribute) -cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope glog shape_inference) -cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) +cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope glog + shape_inference data_transform) +cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry init) cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS shape_inference op_info operator glog) cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator glog proto_desc) -cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) +nv_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) py_proto_compile(framework_py_proto SRCS framework.proto) # Generate an empty __init__.py to make framework_py_proto as a valid python module. @@ -58,3 +61,10 @@ cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry proto_desc) cc_library(selected_rows SRCS selected_rows.cc DEPS tensor) cc_test(selected_rows_test SRCS selected_rows_test.cc DEPS selected_rows) + +cc_library(threadpool SRCS threadpool.cc) +cc_test(threadpool_test SRCS threadpool_test.cc DEPS threadpool) +cc_library(init SRCS init.cc DEPS gflags device_context place stringpiece) +cc_test(init_test SRCS init_test.cc DEPS init) + +cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto) diff --git a/paddle/framework/attribute.cc b/paddle/framework/attribute.cc index b1e1793641..b0fd4d2750 100644 --- a/paddle/framework/attribute.cc +++ b/paddle/framework/attribute.cc @@ -19,42 +19,42 @@ limitations under the License. */ namespace paddle { namespace framework { -Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { +Attribute GetAttrValue(const proto::OpDesc::Attr& attr_desc) { switch (attr_desc.type()) { - case framework::AttrType::BOOLEAN: { + case proto::AttrType::BOOLEAN: { return attr_desc.b(); } - case framework::AttrType::INT: { + case proto::AttrType::INT: { return attr_desc.i(); } - case framework::AttrType::FLOAT: { + case proto::AttrType::FLOAT: { return attr_desc.f(); } - case framework::AttrType::STRING: { + case proto::AttrType::STRING: { return attr_desc.s(); } - case framework::AttrType::BOOLEANS: { + case proto::AttrType::BOOLEANS: { std::vector val(attr_desc.bools_size()); for (int i = 0; i < attr_desc.bools_size(); ++i) { val[i] = attr_desc.bools(i); } return val; } - case framework::AttrType::INTS: { + case proto::AttrType::INTS: { std::vector val(attr_desc.ints_size()); for (int i = 0; i < attr_desc.ints_size(); ++i) { val[i] = attr_desc.ints(i); } return val; } - case framework::AttrType::FLOATS: { + case proto::AttrType::FLOATS: { std::vector val(attr_desc.floats_size()); for (int i = 0; i < attr_desc.floats_size(); ++i) { val[i] = attr_desc.floats(i); } return val; } - case framework::AttrType::STRINGS: { + case proto::AttrType::STRINGS: { std::vector val(attr_desc.strings_size()); for (int i = 0; i < attr_desc.strings_size(); ++i) { val[i] = attr_desc.strings(i); diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h index 0641907d6f..c1c63d9cb1 100644 --- a/paddle/framework/attribute.h +++ b/paddle/framework/attribute.h @@ -27,12 +27,12 @@ limitations under the License. */ namespace paddle { namespace framework { template -inline AttrType AttrTypeID() { +inline proto::AttrType AttrTypeID() { Attribute tmp = T(); - return static_cast(tmp.which() - 1); + return static_cast(tmp.which() - 1); } -Attribute GetAttrValue(const OpDesc::Attr& attr_desc); +Attribute GetAttrValue(const proto::OpDesc::Attr& attr_desc); class AttrReader { public: diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index a17036c652..eaf13ddcef 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/backward.h" #include "paddle/operators/net_op.h" @@ -42,7 +42,7 @@ static std::unordered_set& CtrlFlowOps() { static inline std::unique_ptr CreateGradOp( const OperatorBase& op, const std::unordered_set& no_grad_set, std::unordered_map* grad_to_var) { - OpDescBind op_desc; + OpDesc op_desc; op_desc.SetInputMap(op.Inputs()); op_desc.SetOutputMap(op.Outputs()); op_desc.SetType(op.Type()); @@ -53,7 +53,7 @@ static inline std::unique_ptr CreateGradOp( grad_ops.reserve(grad_descs.size()); std::transform(grad_descs.begin(), grad_descs.end(), std::back_inserter(grad_ops), - [](const std::unique_ptr& grad_desc) { + [](const std::unique_ptr& grad_desc) { return OpRegistry::CreateOp(*grad_desc); }); PADDLE_ENFORCE(!grad_ops.empty()); @@ -217,7 +217,7 @@ static std::unique_ptr BackwardRecursive( // If part of input gradient of that operator is not calculated, fill // zero variables to that input gradient. net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", {{"X", {prefix}}}, - {{"Y", {grad_input}}}, + {{"Out", {grad_input}}}, AttributeMap{})); } return false; @@ -296,7 +296,7 @@ static std::string FwdName(const std::string& grad_name) { static void CreateGradVarInBlock( size_t grad_op_start_index, const std::unordered_map& param_name_map, - BlockDescBind* block_desc, + BlockDesc* block_desc, std::unordered_map* grad_var_record) { auto ops = block_desc->AllOps(); for (size_t op_index = grad_op_start_index; op_index < ops.size(); @@ -341,7 +341,7 @@ static void CreateGradVarInBlock( auto* param = block_desc->FindVarRecursive(pname); auto* grad = block_desc->FindVar(arg); if (param == nullptr) { - grad->SetDataType(DataType::FP32); + grad->SetDataType(proto::DataType::FP32); } else { grad->SetDataType(param->GetDataType()); } @@ -350,12 +350,11 @@ static void CreateGradVarInBlock( } } -std::vector> MakeOpGrad( - const OpDescBind* op_desc, std::unordered_set* no_grad_vars, +std::vector> MakeOpGrad( + const OpDesc* op_desc, std::unordered_set* no_grad_vars, std::unordered_map* grad_to_var, - const std::vector& grad_block = - std::vector()) { - std::vector> grad_op_descs; + const std::vector& grad_block = std::vector()) { + std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculate. const std::vector& inputs = op_desc->InputArgumentNames(); if (AllGradInSet(inputs, *no_grad_vars)) { @@ -386,7 +385,7 @@ std::vector> MakeOpGrad( .Get(op_desc->Type()) .GradOpMaker()(*op_desc, *no_grad_vars, grad_to_var, grad_block); - std::list> pending_fill_zeros_ops; + std::list> pending_fill_zeros_ops; for (auto& desc : grad_op_descs) { for (const std::string& in_name : desc->InputArgumentNames()) { if (no_grad_vars->count(in_name)) { @@ -394,9 +393,9 @@ std::vector> MakeOpGrad( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); std::string new_name = prefix + kZeroVarSuffix; desc->Rename(in_name, new_name); - std::unique_ptr fill_zeros_op( - new OpDescBind("fill_zeros_like", {{"X", {prefix}}}, - {{"Y", {new_name}}}, AttributeMap{})); + std::unique_ptr fill_zeros_op( + new OpDesc("fill_zeros_like", {{"X", {prefix}}}, + {{"Out", {new_name}}}, AttributeMap{})); pending_fill_zeros_ops.push_back(std::move(fill_zeros_op)); } } @@ -408,36 +407,35 @@ std::vector> MakeOpGrad( return grad_op_descs; } -static BlockDescBind* CreateStepBlock( - ProgramDescBind& program_desc, - std::unordered_set* no_grad_vars, +static BlockDesc* CreateStepBlock( + ProgramDesc& program_desc, std::unordered_set* no_grad_vars, std::unordered_map* grad_to_var, int step_block_idx); -std::vector> MakeBlockBackward( - ProgramDescBind& program_desc, int block_idx, +std::vector> MakeBlockBackward( + ProgramDesc& program_desc, int block_idx, std::unordered_set* no_grad_vars, std::unordered_map* grad_to_var) { VLOG(5) << "MakeBlockBackward"; - BlockDescBind* cur_block = program_desc.MutableBlock(block_idx); - std::vector op_descs = cur_block->AllOps(); + BlockDesc* cur_block = program_desc.MutableBlock(block_idx); + std::vector op_descs = cur_block->AllOps(); std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; - std::vector> backward_descs; + std::vector> backward_descs; for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { VLOG(5) << "Making backward " << (*it)->Type() << " op"; - std::vector> op_grads; + std::vector> op_grads; if ((*it)->Type() == "recurrent" || (*it)->Type() == "while") { - int step_block_idx = (*it)->GetBlockAttr("step_block"); - BlockDescBind* backward_block = CreateStepBlock( - program_desc, no_grad_vars, grad_to_var, step_block_idx); + int step_block_idx = (*it)->GetBlockAttr("sub_block"); + BlockDesc* backward_block = CreateStepBlock(program_desc, no_grad_vars, + grad_to_var, step_block_idx); op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); } else if ((*it)->Type() == "conditional_block") { - BlockDescBind* backward_block = + BlockDesc* backward_block = CreateStepBlock(program_desc, no_grad_vars, grad_to_var, - (*it)->GetBlockAttr("block")); + (*it)->GetBlockAttr("sub_block")); op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var, {backward_block}); } else { op_grads = MakeOpGrad(*it, no_grad_vars, grad_to_var); @@ -463,14 +461,14 @@ std::vector> MakeBlockBackward( } ++grad_desc_idx; } - std::transform( - op_grads.begin(), op_grads.end(), std::back_inserter(backward_descs), - [](std::unique_ptr& ptr) { return std::move(ptr); }); + std::transform(op_grads.begin(), op_grads.end(), + std::back_inserter(backward_descs), + [](std::unique_ptr& ptr) { return std::move(ptr); }); } VLOG(5) << "Appending Sums"; // Check whether some variables are written more than once - std::list>> pending_sum_ops; + std::list>> pending_sum_ops; for (const auto& dup : dup_out_ops) { const std::string& out_name = dup.first; const std::vector dup_op = dup.second; @@ -486,18 +484,17 @@ std::vector> MakeBlockBackward( sum_op_inputs.emplace_back(new_name); next_g_name = sum_op_inputs.back(); } - std::unique_ptr sum_op( - new OpDescBind("sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, - AttributeMap{})); + std::unique_ptr sum_op(new OpDesc("sum", {{"X", sum_op_inputs}}, + {{"Out", {out_name}}}, + AttributeMap{})); pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); } } - pending_sum_ops.sort( - [](const std::pair>& a, - const std::pair>& b) { - return a.first > b.first; - }); + pending_sum_ops.sort([](const std::pair>& a, + const std::pair>& b) { + return a.first > b.first; + }); for (auto& p : pending_sum_ops) { backward_descs.insert(backward_descs.begin() + p.first + 1, std::move(p.second)); @@ -508,14 +505,13 @@ std::vector> MakeBlockBackward( return backward_descs; } -static BlockDescBind* CreateStepBlock( - ProgramDescBind& program_desc, - std::unordered_set* no_grad_vars, +static BlockDesc* CreateStepBlock( + ProgramDesc& program_desc, std::unordered_set* no_grad_vars, std::unordered_map* grad_to_var, int step_block_idx) { auto backward_block_op_descs = MakeBlockBackward(program_desc, step_block_idx, no_grad_vars, grad_to_var); - BlockDescBind* backward_block = + BlockDesc* backward_block = program_desc.AppendBlock(*program_desc.MutableBlock(step_block_idx)); for (auto& ptr : backward_block_op_descs) { backward_block->AppendAllocatedOp(move(ptr)); @@ -524,7 +520,7 @@ static BlockDescBind* CreateStepBlock( } ParamGradInfoMap AppendBackward( - ProgramDescBind& program_desc, const VarDescBind& target, + ProgramDesc& program_desc, const VarDesc& target, const std::unordered_set& no_grad_vars) { std::unordered_set no_grad_var_names; no_grad_var_names.reserve(no_grad_vars.size() + 1); @@ -541,11 +537,11 @@ ParamGradInfoMap AppendBackward( PADDLE_ENFORCE(is_scalar, "target should be scalar"); VLOG(3) << "backward from loss=" << target.Name() << " data_type=" << target.GetDataType(); - std::unique_ptr fill_one_op( - new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}}, - {{"shape", std::vector{1}}, - {"value", static_cast(1.0)}, - {"dtype", target.GetDataType()}})); + std::unique_ptr fill_one_op( + new OpDesc("fill_constant", {}, {{"Out", {fill_one_op_out}}}, + {{"shape", std::vector{1}}, + {"value", static_cast(1.0)}, + {"dtype", target.GetDataType()}})); // infer var type of fill_one_op fill_one_op->InferVarType(root_block); diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index 96154fa82c..69ee380236 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -49,7 +49,7 @@ using ParamGradInfoMap = std::unordered_map; ParamGradInfoMap AppendBackward( - ProgramDescBind& program_desc, const VarDescBind& target, + ProgramDesc& program_desc, const VarDesc& target, const std::unordered_set& no_grad_vars); } // namespace framework diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 9fe49881d5..692406b1c3 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/backward.h" @@ -58,13 +58,13 @@ class RowWiseAddGradMaker : public SingleGradOpDescMaker { using SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto grad_op = new OpDescBind(); + std::unique_ptr Apply() const override { + auto grad_op = new OpDesc(); grad_op->SetInput(GradVarName("Out"), OutputGrad("Out")); grad_op->SetOutput(GradVarName("X"), InputGrad("X")); grad_op->SetOutput(GradVarName("b"), InputGrad("b")); grad_op->SetType("rowwise_add_grad"); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; @@ -159,14 +159,14 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker { FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "x"); - AddOutput("Y", "out"); + AddOutput("Out", "out"); AddComment(""); } }; class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: - SumOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + SumOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "the input tensors of sum operator.").AsDuplicable(); AddOutput("Out", "the output tensor of sum operator."); @@ -190,11 +190,11 @@ class MinusGradOpDescMaker : public GradOpDescMakerBase { public: using GradOpDescMakerBase::GradOpDescMakerBase; - std::vector> operator()() const override { - std::vector> retv; + std::vector> operator()() const override { + std::vector> retv; auto x_g = InputGrad("X"); if (!x_g.empty()) { - auto *op_desc = new OpDescBind(); + auto *op_desc = new OpDesc(); op_desc->SetType("scale"); op_desc->SetInput("X", OutputGrad("Out")); op_desc->SetOutput("Out", x_g); @@ -204,7 +204,7 @@ class MinusGradOpDescMaker : public GradOpDescMakerBase { auto y_g = InputGrad("Y"); if (!y_g.empty()) { - auto *op_desc = new OpDescBind(); + auto *op_desc = new OpDesc(); op_desc->SetType("scale"); op_desc->SetInput("X", OutputGrad("Out")); op_desc->SetOutput("Out", y_g); @@ -430,8 +430,8 @@ TEST(Backward, op_part_of_output_are_not_need) { ASSERT_EQ("fill_zeros_like", fill_zero.Type()); ASSERT_EQ(1UL, fill_zero.Inputs("X").size()); ASSERT_EQ("Z", fill_zero.Input("X")); - ASSERT_EQ(1UL, fill_zero.Outputs("Y").size()); - ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Y")); + ASSERT_EQ(1UL, fill_zero.Outputs("Out").size()); + ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Out")); auto &d_many_out = *net->ops_[1]; ASSERT_EQ("many_output_op_grad", d_many_out.Type()); @@ -505,25 +505,25 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { } TEST(Backward, simple_single_op) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); - f::OpDescBind *op = block->AppendOp(); + f::OpDesc *op = block->AppendOp(); op->SetType("rowwise_add"); op->SetInput("X", {"x"}); op->SetInput("b", {"b"}); op->SetOutput("Out", {"out"}); - auto target = f::VarDescBind("out"); + auto target = f::VarDesc("out"); target.SetShape({1}); auto var_to_grad = AppendBackward(program, target, std::unordered_set{}); ASSERT_EQ(block->AllOps().size(), 3UL); - f::OpDescBind *fill_op = block->AllOps()[1]; + f::OpDesc *fill_op = block->AllOps()[1]; EXPECT_EQ(fill_op->Type(), "fill_constant"); - f::OpDescBind *grad_op = block->AllOps()[2]; + f::OpDesc *grad_op = block->AllOps()[2]; EXPECT_EQ(grad_op->Type(), "rowwise_add_grad"); ASSERT_EQ(grad_op->InputNames().size(), 1UL); ASSERT_EQ(grad_op->OutputNames().size(), 2UL); @@ -543,16 +543,16 @@ TEST(Backward, simple_single_op) { } TEST(Backward, default_attribute) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); - f::OpDescBind *op = block->AppendOp(); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); + f::OpDesc *op = block->AppendOp(); op->SetType("mul"); op->SetInput("X", {"x"}); op->SetInput("Y", {"y"}); op->SetOutput("Out", {"out"}); op->CheckAttrs(); - auto target = f::VarDescBind("out"); + auto target = f::VarDesc("out"); target.SetShape({1}); AppendBackward(program, target, std::unordered_set{}); @@ -560,47 +560,47 @@ TEST(Backward, default_attribute) { EXPECT_EQ(boost::get(op->GetAttr("x_num_col_dims")), 1); EXPECT_EQ(boost::get(op->GetAttr("y_num_col_dims")), 1); - f::OpDescBind *fill_op = block->AllOps()[1]; + f::OpDesc *fill_op = block->AllOps()[1]; EXPECT_EQ(fill_op->Type(), "fill_constant"); - f::OpDescBind *grad_op = block->AllOps()[2]; + f::OpDesc *grad_op = block->AllOps()[2]; ASSERT_EQ(grad_op->Type(), "mul_grad"); EXPECT_EQ(boost::get(grad_op->GetAttr("x_num_col_dims")), 1); EXPECT_EQ(boost::get(grad_op->GetAttr("y_num_col_dims")), 1); } TEST(Backward, simple_mult_op) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); - f::OpDescBind *op1 = block->AppendOp(); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); + f::OpDesc *op1 = block->AppendOp(); op1->SetType("rowwise_add"); op1->SetInput("X", {"x1"}); op1->SetInput("b", {"b1"}); op1->SetOutput("Out", {"out1"}); - f::OpDescBind *op2 = block->AppendOp(); + f::OpDesc *op2 = block->AppendOp(); op2->SetType("mul"); op2->SetInput("X", {"out1"}); op2->SetInput("Y", {"y2"}); op2->SetOutput("Out", {"out2"}); - f::OpDescBind *op3 = block->AppendOp(); + f::OpDesc *op3 = block->AppendOp(); op3->SetType("rowwise_add"); op3->SetInput("X", {"out2"}); op3->SetInput("b", {"b3"}); op3->SetOutput("Out", {"out3"}); - auto target = f::VarDescBind("out3"); + auto target = f::VarDesc("out3"); target.SetShape({1}); size_t forward_len = block->AllOps().size(); auto var_to_grad = AppendBackward(program, target, std::unordered_set{}); ASSERT_EQ(block->AllOps().size(), 6UL + 1); - f::OpDescBind *fill_op = block->AllOps()[forward_len]; + f::OpDesc *fill_op = block->AllOps()[forward_len]; EXPECT_EQ(fill_op->Type(), "fill_constant"); - f::OpDescBind *grad_op1 = block->AllOps()[6]; + f::OpDesc *grad_op1 = block->AllOps()[6]; EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); ASSERT_EQ(grad_op1->InputNames().size(), 1UL); ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); @@ -611,7 +611,7 @@ TEST(Backward, simple_mult_op) { EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), std::vector({f::GradVarName("b1")})); - f::OpDescBind *grad_op2 = block->AllOps()[5]; + f::OpDesc *grad_op2 = block->AllOps()[5]; EXPECT_EQ(grad_op2->Type(), "mul_grad"); ASSERT_EQ(grad_op2->InputNames().size(), 4UL); ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); @@ -625,7 +625,7 @@ TEST(Backward, simple_mult_op) { EXPECT_EQ(grad_op2->Output(f::GradVarName("Y")), std::vector({f::GradVarName("y2")})); - f::OpDescBind *grad_op3 = block->AllOps()[4]; + f::OpDesc *grad_op3 = block->AllOps()[4]; EXPECT_EQ(grad_op3->Type(), "rowwise_add_grad"); ASSERT_EQ(grad_op3->InputNames().size(), 1UL); ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); @@ -655,42 +655,42 @@ TEST(Backward, simple_mult_op) { } TEST(Backward, intermedia_var_no_grad) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); - f::OpDescBind *op1 = block->AppendOp(); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); + f::OpDesc *op1 = block->AppendOp(); op1->SetType("rowwise_add"); op1->SetInput("X", {"x1"}); op1->SetInput("b", {"b1"}); op1->SetOutput("Out", {"out1"}); - f::OpDescBind *op2 = block->AppendOp(); + f::OpDesc *op2 = block->AppendOp(); op2->SetType("mul"); op2->SetInput("X", {"x2"}); op2->SetInput("Y", {"y2"}); op2->SetOutput("Out", {"out2"}); - f::OpDescBind *op3 = block->AppendOp(); + f::OpDesc *op3 = block->AppendOp(); op3->SetType("rowwise_add"); op3->SetInput("X", {"out2"}); op3->SetInput("b", {"b3"}); op3->SetOutput("Out", {"out3"}); - f::OpDescBind *op4 = block->AppendOp(); + f::OpDesc *op4 = block->AppendOp(); op4->SetType("mul"); op4->SetInput("X", {"out1"}); op4->SetInput("Y", {"out3"}); op4->SetOutput("Out", {"out4"}); - auto target = f::VarDescBind("out4"); + auto target = f::VarDesc("out4"); target.SetShape({1}); size_t forward_len = block->AllOps().size(); auto var_to_grad = AppendBackward(program, target, {"out3"}); ASSERT_EQ(block->AllOps().size(), 7UL); - f::OpDescBind *fill_op = block->AllOps()[forward_len]; + f::OpDesc *fill_op = block->AllOps()[forward_len]; EXPECT_EQ(fill_op->Type(), "fill_constant"); - f::OpDescBind *grad_op1 = block->AllOps()[6]; + f::OpDesc *grad_op1 = block->AllOps()[6]; EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); ASSERT_EQ(grad_op1->InputNames().size(), 1UL); ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); @@ -701,7 +701,7 @@ TEST(Backward, intermedia_var_no_grad) { EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), std::vector({f::GradVarName("b1")})); - f::OpDescBind *grad_op4 = block->AllOps()[5]; + f::OpDesc *grad_op4 = block->AllOps()[5]; EXPECT_EQ(grad_op4->Type(), "mul_grad"); ASSERT_EQ(grad_op4->InputNames().size(), 4UL); ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); @@ -726,32 +726,32 @@ TEST(Backward, intermedia_var_no_grad) { } TEST(Backward, var_no_grad) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); - f::OpDescBind *op1 = block->AppendOp(); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); + f::OpDesc *op1 = block->AppendOp(); op1->SetType("mult_in_out"); op1->SetInput("X", {"x1"}); op1->SetInput("H", {"h1"}); op1->SetOutput("Y", {"y1"}); op1->SetOutput("Z", {"z1"}); - f::OpDescBind *op2 = block->AppendOp(); + f::OpDesc *op2 = block->AppendOp(); op2->SetType("mult_in_out"); op2->SetInput("X", {"y1"}); op2->SetInput("H", {"z1"}); op2->SetOutput("Y", {"y2"}); op2->SetOutput("Z", {"z2"}); - auto target = f::VarDescBind("z2"); + auto target = f::VarDesc("z2"); target.SetShape({1}); size_t forward_len = block->AllOps().size(); auto var_to_grad = AppendBackward(program, target, {"z1"}); ASSERT_EQ(block->AllOps().size(), 6UL); - f::OpDescBind *fill_op = block->AllOps()[forward_len]; + f::OpDesc *fill_op = block->AllOps()[forward_len]; EXPECT_EQ(fill_op->Type(), "fill_constant"); - f::OpDescBind *grad_op2 = block->AllOps()[3]; + f::OpDesc *grad_op2 = block->AllOps()[3]; ASSERT_EQ(grad_op2->Type(), "mult_in_out_grad"); ASSERT_EQ(grad_op2->InputNames().size(), 6UL); ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); @@ -767,15 +767,15 @@ TEST(Backward, var_no_grad) { std::vector({f::GradVarName("y1")})); EXPECT_EQ(grad_op2->Output(f::GradVarName("H")), std::vector()); - f::OpDescBind *fill_zero_op = block->AllOps()[4]; + f::OpDesc *fill_zero_op = block->AllOps()[4]; ASSERT_EQ(fill_zero_op->Type(), "fill_zeros_like"); ASSERT_EQ(fill_zero_op->InputNames().size(), 1UL); ASSERT_EQ(fill_zero_op->OutputNames().size(), 1UL); EXPECT_EQ(fill_zero_op->Input("X"), std::vector({"z1"})); - EXPECT_EQ(fill_zero_op->Output("Y"), + EXPECT_EQ(fill_zero_op->Output("Out"), std::vector({std::string("z1") + f::kZeroVarSuffix})); - f::OpDescBind *grad_op1 = block->AllOps()[5]; + f::OpDesc *grad_op1 = block->AllOps()[5]; ASSERT_EQ(grad_op1->Type(), "mult_in_out_grad"); ASSERT_EQ(grad_op1->InputNames().size(), 6UL); ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); @@ -803,37 +803,37 @@ TEST(Backward, var_no_grad) { } TEST(Backward, shared_var) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); - f::OpDescBind *op1 = block->AppendOp(); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); + f::OpDesc *op1 = block->AppendOp(); op1->SetType("rowwise_add"); op1->SetInput("X", {"x1"}); op1->SetInput("b", {"b1"}); op1->SetOutput("Out", {"out1"}); - f::OpDescBind *op2 = block->AppendOp(); + f::OpDesc *op2 = block->AppendOp(); op2->SetType("mul"); op2->SetInput("X", {"out1"}); op2->SetInput("Y", {"y2"}); op2->SetOutput("Out", {"out2"}); - f::OpDescBind *op3 = block->AppendOp(); + f::OpDesc *op3 = block->AppendOp(); op3->SetType("rowwise_add"); op3->SetInput("X", {"out1"}); op3->SetInput("b", {"b3"}); op3->SetOutput("Out", {"out3"}); - auto target = f::VarDescBind("out3"); + auto target = f::VarDesc("out3"); target.SetShape({1}); size_t forward_len = block->AllOps().size(); auto var_to_grad = AppendBackward(program, target, std::unordered_set{}); ASSERT_EQ(block->AllOps().size(), 8UL); - f::OpDescBind *fill_op = block->AllOps()[forward_len]; + f::OpDesc *fill_op = block->AllOps()[forward_len]; EXPECT_EQ(fill_op->Type(), "fill_constant"); - f::OpDescBind *grad_op3 = block->AllOps()[4]; + f::OpDesc *grad_op3 = block->AllOps()[4]; ASSERT_EQ(grad_op3->Type(), "rowwise_add_grad"); ASSERT_EQ(grad_op3->InputNames().size(), 1UL); ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); @@ -844,7 +844,7 @@ TEST(Backward, shared_var) { EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), std::vector({f::GradVarName("b3")})); - f::OpDescBind *grad_op4 = block->AllOps()[5]; + f::OpDesc *grad_op4 = block->AllOps()[5]; ASSERT_EQ(grad_op4->Type(), "mul_grad"); ASSERT_EQ(grad_op4->InputNames().size(), 4UL); ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); @@ -858,7 +858,7 @@ TEST(Backward, shared_var) { EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), std::vector({f::GradVarName("y2")})); - f::OpDescBind *sum_op = block->AllOps()[6]; + f::OpDesc *sum_op = block->AllOps()[6]; ASSERT_EQ(sum_op->Type(), "sum"); ASSERT_EQ(sum_op->InputNames().size(), 1UL); ASSERT_EQ(sum_op->OutputNames().size(), 1UL); @@ -868,7 +868,7 @@ TEST(Backward, shared_var) { EXPECT_EQ(sum_op->Output("Out"), std::vector({f::GradVarName("out1")})); - f::OpDescBind *grad_op1 = block->AllOps()[7]; + f::OpDesc *grad_op1 = block->AllOps()[7]; ASSERT_EQ(grad_op1->Type(), "rowwise_add_grad"); ASSERT_EQ(grad_op1->InputNames().size(), 1UL); ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); @@ -895,19 +895,19 @@ TEST(Backward, shared_var) { } TEST(Backward, half_backward) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); auto *op1 = block->AppendOp(); op1->SetType("minus"); op1->SetInput("X", {"a"}); op1->SetInput("Y", {"b"}); op1->SetOutput("Out", {"out"}); - auto target = f::VarDescBind("out"); + auto target = f::VarDesc("out"); target.SetShape({1}); size_t forward_len = block->AllOps().size(); auto var_to_grad = AppendBackward(program, target, {"b"}); - f::OpDescBind *fill_op = block->AllOps()[forward_len]; + f::OpDesc *fill_op = block->AllOps()[forward_len]; EXPECT_EQ(fill_op->Type(), "fill_constant"); auto ops = block->AllOps(); ASSERT_EQ(3UL, ops.size()); diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 6a7a07d5cf..0668b08ff7 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -19,18 +19,18 @@ limitations under the License. */ namespace paddle { namespace framework { -VarDescBind *BlockDescBind::Var(const std::string &name) { +VarDesc *BlockDesc::Var(const std::string &name) { auto it = vars_.find(name); if (it != vars_.end()) { return it->second.get(); } need_update_ = true; - auto *var = new VarDescBind(name); + auto *var = new VarDesc(name); vars_[name].reset(var); return var; } -VarDescBind *BlockDescBind::FindVar(const std::string &name) const { +VarDesc *BlockDesc::FindVar(const std::string &name) const { auto it = vars_.find(name); if (it == vars_.end()) { return nullptr; @@ -38,11 +38,11 @@ VarDescBind *BlockDescBind::FindVar(const std::string &name) const { return it->second.get(); } -bool BlockDescBind::HasVar(const std::string &name) const { +bool BlockDesc::HasVar(const std::string &name) const { return vars_.find(name) != vars_.end(); } -VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const { +VarDesc *BlockDesc::FindVarRecursive(const std::string &name) const { if (name == kEmptyVarName) return nullptr; auto it = vars_.find(name); @@ -53,53 +53,67 @@ VarDescBind *BlockDescBind::FindVarRecursive(const std::string &name) const { return it->second.get(); } -VarDescBind *BlockDescBind::FindRecursiveOrCreateVar( - const std::string &name_bytes) { - VarDescBind *res = FindVarRecursive(name_bytes); +VarDesc *BlockDesc::FindRecursiveOrCreateVar(const std::string &name_bytes) { + VarDesc *res = FindVarRecursive(name_bytes); if (res == nullptr) { res = Var(name_bytes); } return res; } -bool BlockDescBind::HasVarRecursive(const std::string &name) const { +bool BlockDesc::HasVarRecursive(const std::string &name) const { return FindVarRecursive(name) != nullptr; } -std::vector BlockDescBind::AllVars() const { - std::vector res; +std::vector BlockDesc::AllVars() const { + std::vector res; for (const auto &p : vars_) { res.push_back(p.second.get()); } return res; } -OpDescBind *BlockDescBind::AppendOp() { +OpDesc *BlockDesc::AppendOp() { need_update_ = true; - ops_.emplace_back(new OpDescBind()); + ops_.emplace_back(new OpDesc()); return ops_.back().get(); } -void BlockDescBind::AppendAllocatedOp(std::unique_ptr &&op_desc) { +void BlockDesc::AppendAllocatedOp(std::unique_ptr &&op_desc) { need_update_ = true; ops_.emplace_back(std::move(op_desc)); } -OpDescBind *BlockDescBind::PrependOp() { +OpDesc *BlockDesc::PrependOp() { need_update_ = true; - ops_.emplace_front(new OpDescBind()); + ops_.emplace_front(new OpDesc()); return ops_.front().get(); } -std::vector BlockDescBind::AllOps() const { - std::vector res; +void BlockDesc::RemoveOp(size_t s, size_t e) { + if (ops_.begin() + s == ops_.end() || ops_.begin() + e == ops_.end()) { + return; + } + need_update_ = true; + for (auto it = ops_.begin() + s; it != ops_.begin() + e; it++) { + auto names = (*it)->InputArgumentNames(); + for (auto n : names) { + // TODO(typhoonzero): delete vars if no other op use it. + VLOG(3) << "deleting var " << n; + } + } + ops_.erase(ops_.begin() + s, ops_.begin() + e); +} + +std::vector BlockDesc::AllOps() const { + std::vector res; for (const auto &op : ops_) { res.push_back(op.get()); } return res; } -void BlockDescBind::Flush() { +void BlockDesc::Flush() { for (auto &op_desc : ops_) { op_desc->Flush(); } @@ -121,43 +135,43 @@ void BlockDescBind::Flush() { } } -BlockDescBind *BlockDescBind::ParentBlock() const { +BlockDesc *BlockDesc::ParentBlock() const { if (this->desc_->parent_idx() == kNoneBlockIndex) { return nullptr; } return prog_->MutableBlock(static_cast(this->desc_->parent_idx())); } -BlockDesc *BlockDescBind::Proto() { +proto::BlockDesc *BlockDesc::Proto() { Flush(); return desc_; } -BlockDescBind::BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) +BlockDesc::BlockDesc(ProgramDesc *prog, proto::BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) { - for (const VarDesc &var_desc : desc_->vars()) { - vars_[var_desc.name()].reset(new VarDescBind(var_desc)); + for (const proto::VarDesc &var_desc : desc_->vars()) { + vars_[var_desc.name()].reset(new VarDesc(var_desc)); } - for (const OpDesc &op_desc : desc_->ops()) { - ops_.emplace_back(new OpDescBind(op_desc, prog)); + for (const proto::OpDesc &op_desc : desc_->ops()) { + ops_.emplace_back(new OpDesc(op_desc, prog)); } } -BlockDescBind::BlockDescBind(const BlockDescBind &other, BlockDesc *desc, - ProgramDescBind *prog) +BlockDesc::BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, + ProgramDesc *prog) : prog_(prog), desc_(desc) { need_update_ = true; for (auto &op : other.ops_) { - ops_.emplace_back(new OpDescBind(*op)); + ops_.emplace_back(new OpDesc(*op)); } for (auto &it : other.vars_) { - auto *var = new VarDescBind(*it.second); + auto *var = new VarDesc(*it.second); vars_[it.first].reset(var); } } -void BlockDescBind::ClearPBOps() { +void BlockDesc::ClearPBOps() { auto ops = this->desc_->mutable_ops(); while (!ops->empty()) { // we do not own the OpDesc, so release the ownership. @@ -165,7 +179,7 @@ void BlockDescBind::ClearPBOps() { } } -void BlockDescBind::ClearPBVars() { +void BlockDesc::ClearPBVars() { auto vars = this->desc_->mutable_vars(); while (!vars->empty()) { // we do not own the VarDesc, so release the ownership. diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 8e967e5378..6c8c81b332 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -28,20 +28,19 @@ limitations under the License. */ namespace paddle { namespace framework { -class ProgramDescBind; +class ProgramDesc; // Each Protobuf Message, we provide a XXXBind class. In that class, we optimize // read/write speed. Only when we want the protobuf message, the local changes // will be synchronized (by `Sync` method). -class BlockDescBind { +class BlockDesc { public: - BlockDescBind(ProgramDescBind *prog, BlockDesc *desc); + BlockDesc(ProgramDesc *prog, proto::BlockDesc *desc); - BlockDescBind(const BlockDescBind &other, BlockDesc *desc, - ProgramDescBind *prog); + BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, ProgramDesc *prog); - ~BlockDescBind() { + ~BlockDesc() { this->ClearPBVars(); this->ClearPBOps(); } @@ -50,15 +49,15 @@ class BlockDescBind { int32_t Parent() const { return desc_->parent_idx(); } - VarDescBind *Var(const std::string &name_bytes); + VarDesc *Var(const std::string &name_bytes); - VarDescBind *FindVar(const std::string &name_bytes) const; + VarDesc *FindVar(const std::string &name_bytes) const; bool HasVar(const std::string &var_name) const; - VarDescBind *FindVarRecursive(const std::string &name_bytes) const; + VarDesc *FindVarRecursive(const std::string &name_bytes) const; - VarDescBind *FindRecursiveOrCreateVar(const std::string &name_bytes); + VarDesc *FindRecursiveOrCreateVar(const std::string &name_bytes); bool HasVarRecursive(const std::string &var_name) const; @@ -70,41 +69,43 @@ class BlockDescBind { return var_names; } - std::vector AllVars() const; + std::vector AllVars() const; - BlockDescBind *ParentBlock() const; + BlockDesc *ParentBlock() const; - OpDescBind *AppendOp(); + OpDesc *AppendOp(); - void AppendAllocatedOp(std::unique_ptr &&op_desc); + void AppendAllocatedOp(std::unique_ptr &&op_desc); - OpDescBind *PrependOp(); + OpDesc *PrependOp(); - std::vector AllOps() const; + void RemoveOp(size_t s, size_t e); + + std::vector AllOps() const; size_t OpSize() const { return ops_.size(); } - OpDescBind *Op(int idx) { return ops_.at(idx).get(); } + OpDesc *Op(int idx) { return ops_.at(idx).get(); } void Flush(); - BlockDesc *Proto(); + proto::BlockDesc *Proto(); - ProgramDescBind *Program() { return this->prog_; } + ProgramDesc *Program() { return this->prog_; } private: void ClearPBOps(); void ClearPBVars(); private: - ProgramDescBind *prog_; // not_own - BlockDesc *desc_; // not_own + ProgramDesc *prog_; // not_own + proto::BlockDesc *desc_; // not_own bool need_update_; - std::deque> ops_; - std::unordered_map> vars_; + std::deque> ops_; + std::unordered_map> vars_; - DISABLE_COPY_AND_ASSIGN(BlockDescBind); + DISABLE_COPY_AND_ASSIGN(BlockDesc); }; } // namespace framework } // namespace paddle diff --git a/paddle/framework/data_layout.h b/paddle/framework/data_layout.h new file mode 100644 index 0000000000..4a8669c3a4 --- /dev/null +++ b/paddle/framework/data_layout.h @@ -0,0 +1,59 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/platform/enforce.h" + +#include +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace framework { + +enum class DataLayout { + kNHWC = 0, + kNCHW = 1, + kAnyLayout = 2, +}; + +inline DataLayout StringToDataLayout(const std::string& str) { + if (str == "NHWC" || str == "nhwc") { + return DataLayout::kNHWC; + } else if (str == "NCHW" || str == "nchw") { + return DataLayout::kNCHW; + } else { + PADDLE_THROW("Unknown storage order string: %s", str); + } +} + +inline std::string DataLayoutToString(const DataLayout& data_layout) { + switch (data_layout) { + case DataLayout::kNHWC: + return "NHWC"; + case DataLayout::kNCHW: + return "NCHW"; + case DataLayout::kAnyLayout: + return "ANY_LAYOUT"; + default: + PADDLE_THROW("unknown DataLayou %d", data_layout); + } +} + +inline std::ostream& operator<<(std::ostream& out, DataLayout l) { + out << DataLayoutToString(l); + return out; +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/data_transform.cc b/paddle/framework/data_transform.cc new file mode 100644 index 0000000000..35f16025a9 --- /dev/null +++ b/paddle/framework/data_transform.cc @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/data_transform.h" + +namespace paddle { +namespace framework { + +DataTransformFnMap& DataTransformFnMap::Instance() { + static DataTransformFnMap data_transform_map; + return data_transform_map; +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/data_transform.h b/paddle/framework/data_transform.h new file mode 100644 index 0000000000..73f894a3e2 --- /dev/null +++ b/paddle/framework/data_transform.h @@ -0,0 +1,109 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +#include "paddle/framework/op_kernel_type.h" +#include "paddle/framework/tensor.h" +#include "paddle/framework/variable.h" +#include "paddle/platform/device_context.h" +#include "paddle/platform/macros.h" + +namespace paddle { +namespace framework { + +using DataTransformFN = + std::function ctx, + const Variable& in, Variable* out)>; +using KernelTypePair = std::pair; + +struct KernelTypePairHash { + static void HashCombine(const OpKernelType& t, std::size_t* seed) { + OpKernelType::Hash kernel_type_hasher; + (*seed) ^= kernel_type_hasher(t) + 0x9e3779b9 + (*seed << 6) + (*seed >> 2); + } + + size_t operator()(const KernelTypePair& kernel_pair) const { + std::size_t seed = 0; + HashCombine(kernel_pair.first, &seed); + HashCombine(kernel_pair.second, &seed); + return seed; + } +}; + +using DataTransformMap = + std::unordered_map; + +class DataTransformFnMap { + public: + static DataTransformFnMap& Instance(); + + bool Has(const KernelTypePair& key_pair) const { + return map_.find(key_pair) != map_.end(); + } + + void Insert(const OpKernelType& left, const OpKernelType& right, + const DataTransformFN& data_tranform_fn) { + Insert(std::make_pair(left, right), data_tranform_fn); + } + + void Insert(const KernelTypePair& kernel_type_pair, + const DataTransformFN& data_tranform_fn) { + PADDLE_ENFORCE(!Has(kernel_type_pair), + "KernelTypePair %s has been registered", ""); + map_.insert({kernel_type_pair, data_tranform_fn}); + } + + const DataTransformFN& Get(const KernelTypePair& key_pair) const { + auto data_transformer = GetNullable(key_pair); + PADDLE_ENFORCE_NOT_NULL(data_transformer, + "DataTransformFN should not be NULL"); + return *data_transformer; + } + + const DataTransformFN* GetNullable(const KernelTypePair& key_pair) const { + auto it = map_.find(key_pair); + if (it == map_.end()) { + return nullptr; + } else { + return &(it->second); + } + } + + const DataTransformMap& Map() const { return map_; } + + private: + DataTransformFnMap() = default; + DataTransformMap map_; + DISABLE_COPY_AND_ASSIGN(DataTransformFnMap); +}; + +// generate unique name with __LINE__ +// refs https://stackoverflow.com/questions/1597007 +#define TOKENPASTE(x, y) x##y +#define TOKENPASTE2(x, y) TOKENPASTE(x, y) +#define REGISTER_DATA_TRANSFORM_FN(from, to, fn) \ + static int TOKENPASTE2(fn_, __LINE__)() { \ + ::paddle::framework::DataTransformFnMap::Instance().Insert(from, to, fn); \ + return 0; \ + } \ + static int TOKENPASTE2(var_, __LINE__) __attribute__((unused)) = \ + TOKENPASTE2(fn_, __LINE__)() + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/data_transform_test.cc b/paddle/framework/data_transform_test.cc new file mode 100644 index 0000000000..f93a47eeb5 --- /dev/null +++ b/paddle/framework/data_transform_test.cc @@ -0,0 +1,78 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/data_transform.h" +#include + +namespace paddle { +namespace framework { + +using namespace platform; + +int test_value = 0; + +OpKernelType kernel_type_1(proto::DataType::FP32, CPUPlace(), DataLayout::kNCHW, + LibraryType::kCUDNN); +OpKernelType kernel_type_2(proto::DataType::FP32, CUDAPlace(0), + DataLayout::kNCHW, LibraryType::kCUDNN); +OpKernelType kernel_type_3(proto::DataType::FP16, CUDAPlace(0), + DataLayout::kNCHW, LibraryType::kCUDNN); + +void type1_to_type2(std::vector ctx, + const Variable& in, Variable* out) { + test_value++; +} + +void type2_to_type3(std::vector ctx, + const Variable& in, Variable* out) { + test_value--; +} + +void type1_to_type3(std::vector ctx, + const Variable& in, Variable* out) { + test_value += 2; +} + +} // namespace framework +} // namespace paddle + +namespace frw = paddle::framework; + +REGISTER_DATA_TRANSFORM_FN(frw::kernel_type_1, frw::kernel_type_2, + frw::type1_to_type2); +REGISTER_DATA_TRANSFORM_FN(frw::kernel_type_2, frw::kernel_type_3, + frw::type2_to_type3); +REGISTER_DATA_TRANSFORM_FN(frw::kernel_type_1, frw::kernel_type_3, + frw::type1_to_type3); + +TEST(DataTransform, Register) { + using namespace paddle::framework; + using namespace paddle::platform; + + auto& instance = DataTransformFnMap::Instance(); + ASSERT_EQ(instance.Map().size(), 3UL); + std::vector ctx; + paddle::framework::Variable in; + paddle::framework::Variable out; + + instance.Get(std::make_pair(frw::kernel_type_1, frw::kernel_type_2))(ctx, in, + &out); + ASSERT_EQ(test_value, 1); + instance.Get(std::make_pair(frw::kernel_type_2, frw::kernel_type_3))(ctx, in, + &out); + ASSERT_EQ(test_value, 0); + instance.Get(std::make_pair(frw::kernel_type_1, frw::kernel_type_3))(ctx, in, + &out); + ASSERT_EQ(test_value, 2); +} diff --git a/paddle/framework/data_type.h b/paddle/framework/data_type.h index c54d2d4ddf..6a372ac32e 100644 --- a/paddle/framework/data_type.h +++ b/paddle/framework/data_type.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include @@ -20,7 +20,8 @@ namespace paddle { namespace framework { -inline DataType ToDataType(std::type_index type) { +inline proto::DataType ToDataType(std::type_index type) { + using namespace paddle::framework::proto; if (typeid(float).hash_code() == type.hash_code()) { return DataType::FP32; } else if (typeid(double).hash_code() == type.hash_code()) { @@ -36,7 +37,8 @@ inline DataType ToDataType(std::type_index type) { } } -inline std::type_index ToTypeIndex(DataType type) { +inline std::type_index ToTypeIndex(proto::DataType type) { + using namespace paddle::framework::proto; switch (type) { case DataType::FP32: return typeid(float); @@ -54,7 +56,8 @@ inline std::type_index ToTypeIndex(DataType type) { } template -inline void VisitDataType(DataType type, Visitor visitor) { +inline void VisitDataType(proto::DataType type, Visitor visitor) { + using namespace paddle::framework::proto; switch (type) { case DataType::FP32: visitor.template operator()(); diff --git a/paddle/framework/ddim_test.cc b/paddle/framework/ddim_test.cc index 756232b1b5..bc259d1f60 100644 --- a/paddle/framework/ddim_test.cc +++ b/paddle/framework/ddim_test.cc @@ -1,3 +1,16 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h index f91e0e0341..6d50e820b2 100644 --- a/paddle/framework/details/op_registry.h +++ b/paddle/framework/details/op_registry.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -90,7 +90,7 @@ struct OpInfoFiller { template struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { - info->proto_ = new OpProto; + info->proto_ = new proto::OpProto; info->checker_ = new OpAttrChecker(); auto maker = T(info->proto_, info->checker_); maker.Validate(); @@ -106,10 +106,10 @@ template struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { info->grad_op_maker_ = []( - const OpDescBind& fwd_op, + const OpDesc& fwd_op, const std::unordered_set& no_grad_set, std::unordered_map* grad_to_var, - const std::vector& grad_block) { + const std::vector& grad_block) { T maker(fwd_op, no_grad_set, grad_to_var, grad_block); return maker(); }; @@ -119,7 +119,7 @@ struct OpInfoFiller { template struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { - info->infer_var_type_ = [](const OpDescBind& fwd_op, BlockDescBind* block) { + info->infer_var_type_ = [](const OpDesc& fwd_op, BlockDesc* block) { T inference; inference(fwd_op, block); }; diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 83aa927c29..997773c168 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -33,48 +33,22 @@ namespace framework { const std::string kFeedOpType = "feed"; const std::string kFetchOpType = "fetch"; -Executor::Executor(const std::vector& places) : own_(true) { - PADDLE_ENFORCE_GT(places.size(), 0); - device_contexts_.resize(places.size()); - for (size_t i = 0; i < places.size(); i++) { - if (platform::is_cpu_place(places[i])) { - device_contexts_[i] = new platform::CPUDeviceContext( - boost::get(places[i])); - } else if (platform::is_gpu_place(places[i])) { -#ifdef PADDLE_WITH_CUDA - device_contexts_[i] = new platform::CUDADeviceContext( - boost::get(places[i])); -#else - PADDLE_THROW( - "'GPUPlace' is not supported, Please re-compile with WITH_GPU " - "option"); -#endif - } - } -} - -Executor::~Executor() { - if (own_) { - for (auto& device_context : device_contexts_) { - delete device_context; - } - } -} +Executor::Executor(const platform::Place& place) : place_(place) {} -static void CreateTensor(Variable* var, VarDesc::VarType var_type) { - if (var_type == VarDesc::LOD_TENSOR) { +static void CreateTensor(Variable* var, proto::VarDesc::VarType var_type) { + if (var_type == proto::VarDesc::LOD_TENSOR) { var->GetMutable(); - } else if (var_type == VarDesc::SELECTED_ROWS) { + } else if (var_type == proto::VarDesc::SELECTED_ROWS) { var->GetMutable(); - } else if (var_type == VarDesc::FEED_MINIBATCH) { + } else if (var_type == proto::VarDesc::FEED_MINIBATCH) { var->GetMutable(); - } else if (var_type == VarDesc::FETCH_LIST) { + } else if (var_type == proto::VarDesc::FETCH_LIST) { var->GetMutable(); - } else if (var_type == VarDesc::STEP_SCOPES) { + } else if (var_type == proto::VarDesc::STEP_SCOPES) { var->GetMutable>(); - } else if (var_type == VarDesc::LOD_RANK_TABLE) { + } else if (var_type == proto::VarDesc::LOD_RANK_TABLE) { var->GetMutable(); - } else if (var_type == VarDesc::LOD_TENSOR_ARRAY) { + } else if (var_type == proto::VarDesc::LOD_TENSOR_ARRAY) { var->GetMutable(); } else { PADDLE_THROW( @@ -84,56 +58,54 @@ static void CreateTensor(Variable* var, VarDesc::VarType var_type) { } } -void Executor::Run(const ProgramDescBind& pdesc, Scope* scope, int block_id, - bool create_local_scope) { +void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, + bool create_local_scope, bool create_vars) { // TODO(tonyyang-svail): // - only runs on the first device (i.e. no interdevice communication) // - will change to use multiple blocks for RNN op and Cond Op PADDLE_ENFORCE_LT(static_cast(block_id), pdesc.Size()); auto& block = pdesc.Block(block_id); - auto& device = device_contexts_[0]; Scope* local_scope = scope; - if (create_local_scope) { - local_scope = &scope->NewScope(); - for (auto& var : block.AllVars()) { - if (var->Name() == framework::kEmptyVarName) { - continue; + if (create_vars) { + if (create_local_scope) { + local_scope = &scope->NewScope(); + for (auto& var : block.AllVars()) { + if (var->Name() == framework::kEmptyVarName) { + continue; + } + + if (var->Persistable()) { + auto* ptr = scope->Var(var->Name()); + CreateTensor(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() + << " global, which pointer is " << ptr; + } else { + auto* ptr = local_scope->Var(var->Name()); + CreateTensor(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() + << " locally, which pointer is " << ptr; + } } - - if (var->Persistable()) { - auto* ptr = scope->Var(var->Name()); - CreateTensor(ptr, var->GetType()); - VLOG(3) << "Create Variable " << var->Name() - << " global, which pointer is " << ptr; - } else { + } else { + for (auto& var : block.AllVars()) { auto* ptr = local_scope->Var(var->Name()); CreateTensor(ptr, var->GetType()); - VLOG(3) << "Create Variable " << var->Name() - << " locally, which pointer is " << ptr; + VLOG(3) << "Create variable " << var->Name() << ", which pointer is " + << ptr; } - } - } else { - for (auto& var : block.AllVars()) { - auto* ptr = local_scope->Var(var->Name()); - CreateTensor(ptr, var->GetType()); - VLOG(3) << "Create variable " << var->Name() << ", which pointer is " - << ptr; - } - } + } // if (create_local_scope) + } // if (create_vars) for (auto& op_desc : block.AllOps()) { auto op = paddle::framework::OpRegistry::CreateOp(*op_desc); VLOG(3) << op->DebugString(); - op->Run(*local_scope, *device); + op->Run(*local_scope, place_); } if (create_local_scope) { scope->DeleteScope(local_scope); } } -Executor::Executor(const platform::DeviceContext& device) - : device_contexts_({&device}), own_(false) {} - } // namespace framework } // namespace paddle diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index b745f4f647..d869e18901 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -18,15 +18,18 @@ limitations under the License. */ #include "paddle/framework/program_desc.h" #include "paddle/framework/scope.h" #include "paddle/framework/tensor.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace framework { class Executor { public: - explicit Executor(const std::vector& places); - explicit Executor(const platform::DeviceContext& devices); - ~Executor(); + // TODO(dzhwinter) : Do not rely on this function, it will be removed + explicit Executor(const platform::DeviceContext& device) + : Executor(device.GetPlace()) {} + + explicit Executor(const platform::Place& place); /* @Brief * Runtime evaluation of the given ProgramDesc under certain Scope @@ -35,11 +38,11 @@ class Executor { * ProgramDesc * Scope */ - void Run(const ProgramDescBind&, Scope*, int, bool create_local_scope = true); + void Run(const ProgramDesc&, Scope*, int, bool create_local_scope = true, + bool create_vars = true); private: - std::vector device_contexts_; - bool own_; + const platform::Place place_; }; } // namespace framework diff --git a/paddle/framework/feed_fetch_type.h b/paddle/framework/feed_fetch_type.h index bc4ae440fc..9bc4a90c44 100644 --- a/paddle/framework/feed_fetch_type.h +++ b/paddle/framework/feed_fetch_type.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index f1fc4529e1..4f2746e4b8 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -14,7 +14,7 @@ limitations under the License. */ syntax = "proto2"; option optimize_for = LITE_RUNTIME; -package paddle.framework; +package paddle.framework.proto; enum AttrType { INT = 0; diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h index 998186e339..2de5242831 100644 --- a/paddle/framework/grad_op_desc_maker.h +++ b/paddle/framework/grad_op_desc_maker.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include @@ -22,21 +22,27 @@ namespace paddle { namespace framework { +/* + This functor class is responsible for creating the gradient ops for the given + operator fwd_op. After it is called (through operator()), the pairs of + (gradient variable, corresponding input variable of fwd_op) will be added to + grad_to_var. If an input variable of fwd_op is contained in no_grad_set, its + gradient varialbe will be ignored or kEmptyVarName depending on the template + argument DropEmptyIG in the derived classes. + */ class GradOpDescMakerBase { public: explicit GradOpDescMakerBase( - const OpDescBind& fwd_op, - const std::unordered_set& no_grad_set, + const OpDesc& fwd_op, const std::unordered_set& no_grad_set, std::unordered_map* grad_to_var, - const std::vector& grad_block = - std::vector()) + const std::vector& grad_block = std::vector()) : fwd_op_(fwd_op), no_grad_set_(no_grad_set), grad_to_var_(grad_to_var), grad_block_(grad_block) {} virtual ~GradOpDescMakerBase() = default; - virtual std::vector> operator()() const = 0; + virtual std::vector> operator()() const = 0; protected: std::vector InputGrad(const std::string& name, @@ -58,6 +64,16 @@ class GradOpDescMakerBase { if (!drop_empty_grad) { return ret_val; } + PADDLE_ENFORCE_LE(var_names.size(), 1UL, + "BUG from operator developer:" + " for input argument with a list of variables, " + " drop_empty_grad is not allowed because it makes" + " the correspondence bewteen a variable and its gradient" + " ambiguous. Use REGISTER_OP_EX to register the op" + " or call InputGrad(?,false) in GradOpDescMaker." + " Op type %s", + fwd_op_.Type()); + std::vector dropped_ret_val; dropped_ret_val.reserve(ret_val.size()); std::copy_if(ret_val.begin(), ret_val.end(), @@ -105,26 +121,26 @@ class GradOpDescMakerBase { std::string ForwardOpType() const { return this->fwd_op_.Type(); } private: - const OpDescBind& fwd_op_; + const OpDesc& fwd_op_; const std::unordered_set& no_grad_set_; std::unordered_map* grad_to_var_; protected: - std::vector grad_block_; + std::vector grad_block_; }; class SingleGradOpDescMaker : public GradOpDescMakerBase { public: using GradOpDescMakerBase::GradOpDescMakerBase; - std::vector> operator()() const { - std::vector> retv; + std::vector> operator()() const { + std::vector> retv; retv.emplace_back(this->Apply()); return retv; } protected: - virtual std::unique_ptr Apply() const = 0; + virtual std::unique_ptr Apply() const = 0; }; template @@ -133,8 +149,8 @@ class DefaultGradOpDescMaker : public SingleGradOpDescMaker { using SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - virtual std::unique_ptr Apply() const { - auto* grad = new OpDescBind(); + virtual std::unique_ptr Apply() const { + auto* grad = new OpDesc(); grad->SetType(this->GradOpType()); for (auto& input_param : this->InputNames()) { @@ -150,7 +166,7 @@ class DefaultGradOpDescMaker : public SingleGradOpDescMaker { grad->SetAttrMap(this->Attrs()); - return std::unique_ptr(grad); + return std::unique_ptr(grad); } virtual std::string GradOpType() const { @@ -161,7 +177,7 @@ class DefaultGradOpDescMaker : public SingleGradOpDescMaker { class EmptyGradOpMaker : public GradOpDescMakerBase { public: using GradOpDescMakerBase::GradOpDescMakerBase; - std::vector> operator()() const override { + std::vector> operator()() const override { return {}; } }; diff --git a/paddle/framework/init.cc b/paddle/framework/init.cc new file mode 100644 index 0000000000..d6601090d5 --- /dev/null +++ b/paddle/framework/init.cc @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include +#include + +#include "paddle/framework/init.h" +#include "paddle/platform/device_context.h" +#include "paddle/platform/place.h" +#include "paddle/string/piece.h" + +namespace paddle { +namespace framework { + +std::once_flag gflags_init_flag; + +// TODO(qijun) move init gflags to init.cc +void InitGflags(std::vector &argv) { + std::call_once(gflags_init_flag, [&]() { + int argc = argv.size(); + char **arr = new char *[argv.size()]; + std::string line; + for (size_t i = 0; i < argv.size(); i++) { + arr[i] = &argv[i][0]; + line += argv[i]; + line += ' '; + } + google::ParseCommandLineFlags(&argc, &arr, true); + VLOG(1) << "Init commandline: " << line; + }); +} + +bool InitDevices(const std::vector &devices) { + // device format + // CPU + // GPU:1 + // TODO(dzhwinter) : add device format annotation for users. + std::vector places; + for (auto &device : devices) { + auto p = string::Piece(device); + if (string::HasPrefix(p, "CPU")) { + places.emplace_back(platform::CPUPlace()); + } else if (string::HasPrefix(p, "GPU")) { +#ifdef PADDLE_WITH_CUDA + auto pos = string::RFind(p, ':', string::Piece::npos); + auto number = device.substr(pos + 1); + places.emplace_back(platform::CUDAPlace(std::stoi(number))); +#else + LOG(WARNING) + << "'GPU' is not supported, Please re-compile with WITH_GPU option"; +#endif + } else { + return false; + } + } + + if (std::find_if(places.begin(), places.end(), + [&](const platform::Place &place) { + return platform::is_cpu_place(place); + }) == places.end()) { + places.emplace_back(platform::CPUPlace()); + LOG(WARNING) << "Not specified CPU device, create CPU by Default."; + } + platform::DeviceContextPool::Create(places); + return true; +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/init.h b/paddle/framework/init.h new file mode 100644 index 0000000000..33907f9eb0 --- /dev/null +++ b/paddle/framework/init.h @@ -0,0 +1,28 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once +#include + +#include "gflags/gflags.h" +#include "glog/logging.h" + +namespace paddle { +namespace framework { + +void InitGflags(std::vector &argv); + +bool InitDevices(const std::vector &devices); + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/init_test.cc b/paddle/framework/init_test.cc new file mode 100644 index 0000000000..f0788051d4 --- /dev/null +++ b/paddle/framework/init_test.cc @@ -0,0 +1,31 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include "gtest/gtest.h" + +#include "paddle/framework/init.h" + +TEST(Init, InitDevices) { + using paddle::framework::InitDevices; + std::vector ds1 = {"CPU"}; + ASSERT_EQ(InitDevices(ds1), true); + +#ifdef PADDLE_WITH_CUDA + std::vector ds2 = {"CPU", "GPU:0", "GPU:1"}; + ASSERT_EQ(InitDevices(ds2), true); + + // test re-init + std::vector ds3 = {"GPU:0", "GPU:1"}; + ASSERT_EQ(InitDevices(ds3), true); +#endif +} diff --git a/paddle/framework/library_type.h b/paddle/framework/library_type.h new file mode 100644 index 0000000000..7707799cae --- /dev/null +++ b/paddle/framework/library_type.h @@ -0,0 +1,67 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +namespace paddle { +namespace framework { + +// For more details about the design of LibraryType, Please refer to +// https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/operator_kernel_type.md#library + +enum class LibraryType { + kPlain = 0, + kMKLDNN = 1, + kCUDNN = 2, +}; + +inline std::string LibraryTypeToString(const LibraryType& library_type) { + switch (library_type) { + case LibraryType::kPlain: + return "PLAIN"; + case LibraryType::kMKLDNN: + return "MKLDNN"; + case LibraryType::kCUDNN: + return "CUDNN"; + default: + PADDLE_THROW("unknown LibraryType %d", static_cast(library_type)); + } +} + +inline LibraryType StringToLibraryType(const char* ctype) { + std::string s(ctype); + if (s == std::string("PLAIN")) { + return LibraryType::kPlain; + } else if (s == std::string("MKLDNN")) { + return LibraryType::kMKLDNN; + } else if (s == std::string("CUDNN")) { + return LibraryType::kCUDNN; + // To be compatible with register macro. + // CPU, CUDA, PLAIN are same library type. + } else if (s == std::string("CPU")) { + return LibraryType::kPlain; + } else if (s == std::string("CUDA")) { + return LibraryType::kPlain; + } else { + PADDLE_THROW("Unknown LibraryType %s", s.c_str()); + } +} + +inline std::ostream& operator<<(std::ostream& out, LibraryType l) { + out << LibraryTypeToString(l); + return out; +} + +} // namespace +} // framework diff --git a/paddle/framework/lod_rank_table.cc b/paddle/framework/lod_rank_table.cc index 1c2fba70c8..704bce2a0e 100644 --- a/paddle/framework/lod_rank_table.cc +++ b/paddle/framework/lod_rank_table.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_rank_table.h" @@ -46,4 +46,13 @@ void LoDRankTable::Reset(const LoD& lod, size_t level) { } } // namespace framework + +std::ostream& operator<<(std::ostream& out, + const framework::LoDRankTable& table) { + out << "NumOfSequence " << table.items().size() << "\n"; + for (auto& each_item : table.items()) { + out << "\tSeq #" << each_item.index << ", Len=" << each_item.length << "\n"; + } + return out; +} } // namespace paddle diff --git a/paddle/framework/lod_rank_table.h b/paddle/framework/lod_rank_table.h index 9faa3a4d7b..df188709e9 100644 --- a/paddle/framework/lod_rank_table.h +++ b/paddle/framework/lod_rank_table.h @@ -1,18 +1,19 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once +#include #include "paddle/framework/lod_tensor.h" namespace paddle { @@ -52,4 +53,8 @@ class LoDRankTable { }; } // namespace framework + +std::ostream& operator<<(std::ostream& out, + const framework::LoDRankTable& table); + } // namespace paddle diff --git a/paddle/framework/lod_tensor.cc b/paddle/framework/lod_tensor.cc index fdf6de4bab..f8a3be9a82 100644 --- a/paddle/framework/lod_tensor.cc +++ b/paddle/framework/lod_tensor.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_tensor.h" #include "paddle/framework/data_type.h" @@ -197,7 +197,7 @@ void SerializeToStream(std::ostream &os, const LoDTensor &tensor, { // the 2nd field, tensor description // int32_t size // void* protobuf message - framework::TensorDesc desc; + proto::TensorDesc desc; desc.set_data_type(framework::ToDataType(tensor.type())); auto dims = framework::vectorize(tensor.dims()); auto *pb_dims = desc.mutable_dims(); @@ -224,7 +224,7 @@ void SerializeToStream(std::ostream &os, const LoDTensor &tensor, while (size != 0) { size_t size_to_write = std::min(kBufSize, static_cast(size)); memory::Copy(cpu, buf.get(), - boost::get(tensor.place()), + boost::get(tensor.place()), reinterpret_cast(data), size_to_write, gpu_dev_ctx.stream()); gpu_dev_ctx.Wait(); @@ -262,7 +262,7 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor) { uint32_t version; is.read(reinterpret_cast(&version), sizeof(version)); PADDLE_ENFORCE_EQ(version, 0U, "Only version 0 is supported"); - framework::TensorDesc desc; + proto::TensorDesc desc; { // int32_t size // proto buffer int32_t size; @@ -281,16 +281,16 @@ void DeserializeFromStream(std::istream &is, LoDTensor *tensor) { void *buf; platform::Place cpu = platform::CPUPlace(); switch (desc.data_type()) { - case framework::FP32: + case proto::FP32: buf = tensor->mutable_data(cpu); break; - case framework::FP64: + case proto::FP64: buf = tensor->mutable_data(cpu); break; - case framework::INT32: + case proto::INT32: buf = tensor->mutable_data(cpu); break; - case framework::INT64: + case proto::INT64: buf = tensor->mutable_data(cpu); break; default: diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 9411c96aea..147db3ab08 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -184,6 +184,18 @@ LoDTensor LodExpand(const LoDTensor& source, const LoD& lod, size_t level, return tensor; } +// Get the absolute offset of a lod[start_level][start_idx:end_idx] and +// relative length of details for every levels(i.e., [start_level: ]). +// +// For example, +// lod = [[0, 3, 4, 8], [0, 9, 10, 11, 13, 17, 19, 22, 24]] +// start_level = 0 +// start_idx = 1 +// end_idx = 3 +// +// Returns: +// LoD = [[1, 4], [2, 4, 2, 3, 2]] +// pair = {11, 24} std::pair> GetSubLoDAndAbsoluteOffset( const LoD& lod, size_t start_idx, size_t end_idx, size_t start_level); diff --git a/paddle/framework/lod_tensor_array.h b/paddle/framework/lod_tensor_array.h index 13f0608d24..4a8e7f4fa5 100644 --- a/paddle/framework/lod_tensor_array.h +++ b/paddle/framework/lod_tensor_array.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index 5b90fbfca7..e8508ad265 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -27,7 +27,7 @@ __global__ void test(size_t* a, int size) { TEST(LoDTensor, LoDInGPU) { paddle::framework::LoDTensor lod_tensor; - paddle::platform::GPUPlace place(0); + paddle::platform::CUDAPlace place(0); paddle::framework::LoD src_lod; src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14}); diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 7ba1e3e4e3..b361e64438 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -25,12 +25,11 @@ limitations under the License. */ namespace paddle { namespace framework { -class OpDescBind; -class BlockDescBind; +class OpDesc; +class BlockDesc; class CompileTimeInferShapeContext : public InferShapeContext { public: - CompileTimeInferShapeContext(const OpDescBind &op, - const BlockDescBind &block); + CompileTimeInferShapeContext(const OpDesc &op, const BlockDesc &block); bool HasInput(const std::string &name) const override; @@ -58,11 +57,11 @@ class CompileTimeInferShapeContext : public InferShapeContext { PADDLE_ENFORCE_LT(j, Outputs(out).size()); auto *in_var = block_.FindVarRecursive(Inputs(in)[i]); auto *out_var = block_.FindVarRecursive(Outputs(out)[j]); - if (in_var->GetType() != VarDesc::LOD_TENSOR) { + if (in_var->GetType() != proto::VarDesc::LOD_TENSOR) { VLOG(3) << "input " << in << " is not LodTensor"; return; } - PADDLE_ENFORCE_EQ(in_var->GetType(), VarDesc::LOD_TENSOR, + PADDLE_ENFORCE_EQ(in_var->GetType(), proto::VarDesc::LOD_TENSOR, "The %d-th output of Output(%s) must be LoDTensor.", j, out); out_var->SetLoDLevel(in_var->GetLodLevel()); @@ -70,19 +69,18 @@ class CompileTimeInferShapeContext : public InferShapeContext { bool IsRuntime() const override; protected: - VarDesc::VarType GetVarType(const std::string &name) const override; + proto::VarDesc::VarType GetVarType(const std::string &name) const override; DDim GetDim(const std::string &name) const override; void SetDim(const std::string &name, const DDim &dim) override; - const OpDescBind &op_; - const BlockDescBind &block_; + const OpDesc &op_; + const BlockDesc &block_; }; -OpDescBind::OpDescBind(const std::string &type, const VariableNameMap &inputs, - const VariableNameMap &outputs, - const AttributeMap &attrs) { +OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs) { desc_.set_type(type); inputs_ = inputs; outputs_ = outputs; @@ -90,12 +88,12 @@ OpDescBind::OpDescBind(const std::string &type, const VariableNameMap &inputs, need_update_ = true; } -OpDescBind::OpDescBind(const OpDesc &desc, ProgramDescBind *prog) +OpDesc::OpDesc(const proto::OpDesc &desc, ProgramDesc *prog) : desc_(desc), need_update_(false) { // restore inputs_ int input_size = desc_.inputs_size(); for (int i = 0; i < input_size; ++i) { - const OpDesc::Var &var = desc_.inputs(i); + const proto::OpDesc::Var &var = desc_.inputs(i); std::vector &args = inputs_[var.parameter()]; int argu_size = var.arguments_size(); args.reserve(argu_size); @@ -106,7 +104,7 @@ OpDescBind::OpDescBind(const OpDesc &desc, ProgramDescBind *prog) // restore outputs_ int output_size = desc_.outputs_size(); for (int i = 0; i < output_size; ++i) { - const OpDesc::Var &var = desc_.outputs(i); + const proto::OpDesc::Var &var = desc_.outputs(i); std::vector &args = outputs_[var.parameter()]; int argu_size = var.arguments_size(); args.reserve(argu_size); @@ -115,9 +113,9 @@ OpDescBind::OpDescBind(const OpDesc &desc, ProgramDescBind *prog) } } // restore attrs_ - for (const OpDesc::Attr &attr : desc_.attrs()) { + for (const proto::OpDesc::Attr &attr : desc_.attrs()) { std::string attr_name = attr.name(); - if (attr.type() != AttrType::BLOCK) { + if (attr.type() != proto::AttrType::BLOCK) { attrs_[attr_name] = GetAttrValue(attr); } else { auto bid = attr.block_idx(); @@ -126,20 +124,19 @@ OpDescBind::OpDescBind(const OpDesc &desc, ProgramDescBind *prog) } } -OpDesc *OpDescBind::Proto() { +proto::OpDesc *OpDesc::Proto() { Flush(); return &desc_; } -const std::vector &OpDescBind::Input( - const std::string &name) const { +const std::vector &OpDesc::Input(const std::string &name) const { auto it = inputs_.find(name); PADDLE_ENFORCE(it != inputs_.end(), "Input %s cannot be found in Op %s", name, Type()); return it->second; } -std::vector OpDescBind::InputArgumentNames() const { +std::vector OpDesc::InputArgumentNames() const { std::vector retv; for (auto &ipt : this->inputs_) { retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); @@ -147,21 +144,20 @@ std::vector OpDescBind::InputArgumentNames() const { return retv; } -void OpDescBind::SetInput(const std::string ¶m_name, - const std::vector &args) { +void OpDesc::SetInput(const std::string ¶m_name, + const std::vector &args) { need_update_ = true; inputs_[param_name] = args; } -const std::vector &OpDescBind::Output( - const std::string &name) const { +const std::vector &OpDesc::Output(const std::string &name) const { auto it = outputs_.find(name); PADDLE_ENFORCE(it != outputs_.end(), "Output %s cannot be found in Op %s", name, Type()); return it->second; } -std::vector OpDescBind::OutputArgumentNames() const { +std::vector OpDesc::OutputArgumentNames() const { std::vector retv; for (auto &ipt : this->outputs_) { retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); @@ -169,19 +165,19 @@ std::vector OpDescBind::OutputArgumentNames() const { return retv; } -void OpDescBind::SetOutput(const std::string ¶m_name, - const std::vector &args) { +void OpDesc::SetOutput(const std::string ¶m_name, + const std::vector &args) { need_update_ = true; this->outputs_[param_name] = args; } -AttrType OpDescBind::GetAttrType(const std::string &name) const { +proto::AttrType OpDesc::GetAttrType(const std::string &name) const { auto it = attrs_.find(name); PADDLE_ENFORCE(it != attrs_.end(), "Attribute %s is not found", name); - return static_cast(it->second.which() - 1); + return static_cast(it->second.which() - 1); } -std::vector OpDescBind::AttrNames() const { +std::vector OpDesc::AttrNames() const { std::vector retv; retv.reserve(attrs_.size()); for (auto &attr : attrs_) { @@ -190,41 +186,39 @@ std::vector OpDescBind::AttrNames() const { return retv; } -void OpDescBind::SetAttr(const std::string &name, const Attribute &v) { +void OpDesc::SetAttr(const std::string &name, const Attribute &v) { this->attrs_[name] = v; need_update_ = true; } -void OpDescBind::SetBlockAttr(const std::string &name, BlockDescBind &block) { +void OpDesc::SetBlockAttr(const std::string &name, BlockDesc &block) { this->attrs_[name] = █ need_update_ = true; } -void OpDescBind::SetAttrMap( +void OpDesc::SetAttrMap( const std::unordered_map &attr_map) { attrs_ = attr_map; need_update_ = true; } -Attribute OpDescBind::GetAttr(const std::string &name) const { +Attribute OpDesc::GetAttr(const std::string &name) const { auto it = attrs_.find(name); PADDLE_ENFORCE(it != attrs_.end(), "Attribute %s is not found", name); return it->second; } -int OpDescBind::GetBlockAttr(const std::string &name) const { +int OpDesc::GetBlockAttr(const std::string &name) const { auto it = attrs_.find(name); PADDLE_ENFORCE(it != attrs_.end(), "Attribute %s is not found", name); - return boost::get(it->second)->ID(); + return boost::get(it->second)->ID(); } -const std::unordered_map &OpDescBind::GetAttrMap() - const { +const std::unordered_map &OpDesc::GetAttrMap() const { return attrs_; } -void OpDescBind::Rename(const std::string &old_name, - const std::string &new_name) { +void OpDesc::Rename(const std::string &old_name, const std::string &new_name) { for (auto &input : inputs_) { std::replace(input.second.begin(), input.second.end(), old_name, new_name); } @@ -235,8 +229,8 @@ void OpDescBind::Rename(const std::string &old_name, need_update_ = true; } -void OpDescBind::RenameOutput(const std::string &old_name, - const std::string &new_name) { +void OpDesc::RenameOutput(const std::string &old_name, + const std::string &new_name) { for (auto &output : outputs_) { std::replace(output.second.begin(), output.second.end(), old_name, new_name); @@ -244,8 +238,8 @@ void OpDescBind::RenameOutput(const std::string &old_name, need_update_ = true; } -void OpDescBind::RenameInput(const std::string &old_name, - const std::string &new_name) { +void OpDesc::RenameInput(const std::string &old_name, + const std::string &new_name) { for (auto &input : inputs_) { std::replace(input.second.begin(), input.second.end(), old_name, new_name); } @@ -253,8 +247,8 @@ void OpDescBind::RenameInput(const std::string &old_name, } struct SetAttrDescVisitor : public boost::static_visitor { - explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} - mutable OpDesc::Attr *attr_; + explicit SetAttrDescVisitor(proto::OpDesc::Attr *attr) : attr_(attr) {} + mutable proto::OpDesc::Attr *attr_; void operator()(int v) const { attr_->set_i(v); } void operator()(float v) const { attr_->set_f(v); } void operator()(const std::string &v) const { attr_->set_s(v); } @@ -272,11 +266,13 @@ struct SetAttrDescVisitor : public boost::static_visitor { void operator()(const std::vector &v) const { VectorToRepeated(v, attr_->mutable_bools()); } - void operator()(BlockDesc *desc) const { attr_->set_block_idx(desc->idx()); } + void operator()(proto::BlockDesc *desc) const { + attr_->set_block_idx(desc->idx()); + } void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } }; -void OpDescBind::Flush() { +void OpDesc::Flush() { if (need_update_) { this->desc_.mutable_inputs()->Clear(); for (auto &ipt : inputs_) { @@ -297,7 +293,7 @@ void OpDescBind::Flush() { auto *attr_desc = desc_.add_attrs(); attr_desc->set_name(attr.first); attr_desc->set_type( - static_cast(attr.second.which() - 1)); + static_cast(attr.second.which() - 1)); SetAttrDescVisitor visitor(attr_desc); boost::apply_visitor(visitor, attr.second); } @@ -328,7 +324,7 @@ static void InitInferShapeFuncs() { }); } -void OpDescBind::CheckAttrs() { +void OpDesc::CheckAttrs() { PADDLE_ENFORCE(!Type().empty(), "CheckAttr() can not be called before type is setted."); auto *checker = OpInfoMap::Instance().Get(Type()).Checker(); @@ -340,7 +336,7 @@ void OpDescBind::CheckAttrs() { checker->Check(attrs_); } -void OpDescBind::InferShape(const BlockDescBind &block) const { +void OpDesc::InferShape(const BlockDesc &block) const { VLOG(3) << "CompileTime infer shape on " << Type(); InitInferShapeFuncs(); auto &infer_shape = OpInfoMap::Instance().Get(this->Type()).infer_shape_; @@ -363,7 +359,7 @@ void OpDescBind::InferShape(const BlockDescBind &block) const { infer_shape(&ctx); } -void OpDescBind::InferVarType(BlockDescBind *block) const { +void OpDesc::InferVarType(BlockDesc *block) const { auto &info = OpInfoMap::Instance().Get(this->Type()); if (info.infer_var_type_) { info.infer_var_type_(*this, block); @@ -375,14 +371,14 @@ void OpDescBind::InferVarType(BlockDescBind *block) const { for (auto &out_pair : this->outputs_) { for (auto &out_var_name : out_pair.second) { block->FindRecursiveOrCreateVar(out_var_name) - ->SetType(VarDesc::LOD_TENSOR); + ->SetType(proto::VarDesc::LOD_TENSOR); } } } } CompileTimeInferShapeContext::CompileTimeInferShapeContext( - const OpDescBind &op, const BlockDescBind &block) + const OpDesc &op, const BlockDesc &block) : op_(op), block_(block) {} bool CompileTimeInferShapeContext::HasInput(const std::string &name) const { @@ -484,7 +480,7 @@ void CompileTimeInferShapeContext::SetDim(const std::string &name, } bool CompileTimeInferShapeContext::IsRuntime() const { return false; } -VarDesc::VarType CompileTimeInferShapeContext::GetVarType( +proto::VarDesc::VarType CompileTimeInferShapeContext::GetVarType( const std::string &name) const { return block_.FindVarRecursive(name)->GetType(); } diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index da032319af..93d4a88f3c 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -23,19 +23,19 @@ limitations under the License. */ namespace paddle { namespace framework { -class BlockDescBind; -class ProgramDescBind; +class BlockDesc; +class ProgramDesc; -class OpDescBind { +class OpDesc { public: - OpDescBind() {} + OpDesc() {} - OpDescBind(const std::string &type, const VariableNameMap &inputs, - const VariableNameMap &outputs, const AttributeMap &attrs); + OpDesc(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs); - OpDescBind(const OpDesc &desc, ProgramDescBind *prog); + OpDesc(const proto::OpDesc &desc, ProgramDesc *prog); - OpDesc *Proto(); + proto::OpDesc *Proto(); std::string Type() const { return desc_.type(); } @@ -59,13 +59,13 @@ class OpDescBind { return attrs_.find(name) != attrs_.end(); } - AttrType GetAttrType(const std::string &name) const; + proto::AttrType GetAttrType(const std::string &name) const; std::vector AttrNames() const; void SetAttr(const std::string &name, const Attribute &v); - void SetBlockAttr(const std::string &name, BlockDescBind &block); + void SetBlockAttr(const std::string &name, BlockDesc &block); Attribute GetAttr(const std::string &name) const; @@ -107,9 +107,9 @@ class OpDescBind { void CheckAttrs(); - void InferShape(const BlockDescBind &block) const; + void InferShape(const BlockDesc &block) const; - void InferVarType(BlockDescBind *block) const; + void InferVarType(BlockDesc *block) const; void MarkAsTarget() { desc_.set_is_target(true); } @@ -126,8 +126,10 @@ class OpDescBind { return ret_val; } - OpDesc desc_; + proto::OpDesc desc_; + // input arg name => output variable names VariableNameMap inputs_; + // output arg name => output variable names VariableNameMap outputs_; AttributeMap attrs_; diff --git a/paddle/framework/op_info.cc b/paddle/framework/op_info.cc index 81ba29797c..b520108109 100644 --- a/paddle/framework/op_info.cc +++ b/paddle/framework/op_info.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_info.h" diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index d3b1a3b5fa..d9b89f9cac 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include @@ -34,7 +34,7 @@ class InferShapeBase { struct OpInfo { OpCreator creator_; GradOpMakerFN grad_op_maker_; - OpProto* proto_{nullptr}; + proto::OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; InferVarTypeFN infer_var_type_; InferShapeFN infer_shape_; @@ -43,7 +43,7 @@ struct OpInfo { return proto_ != nullptr && checker_ != nullptr; } - const OpProto& Proto() const { + const proto::OpProto& Proto() const { PADDLE_ENFORCE_NOT_NULL(proto_, "Operator Proto has not been registered"); PADDLE_ENFORCE(proto_->IsInitialized(), "Operator Proto must be initialized in op info"); diff --git a/paddle/framework/op_kernel_type.h b/paddle/framework/op_kernel_type.h new file mode 100644 index 0000000000..97b542e345 --- /dev/null +++ b/paddle/framework/op_kernel_type.h @@ -0,0 +1,82 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/data_layout.h" +#include "paddle/framework/data_type.h" +#include "paddle/framework/library_type.h" +#include "paddle/platform/device_context.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace framework { + +struct OpKernelType { + struct Hash { + size_t operator()(const OpKernelType& key) const { + int place = key.place_.which() + (1 << LEFT_SHIFT); + int data_type = + static_cast(key.data_type_) + (1 << (LEFT_SHIFT + 1)); + int data_layout = + static_cast(key.data_layout_) + (1 << (LEFT_SHIFT + 2)); + int library_type = + static_cast(key.library_type_) + (1 << (LEFT_SHIFT + 3)); + std::hash hasher; + return hasher(place + data_type + data_layout + library_type); + } + }; + + // place, data_type, library_type kinds less than 2^8 + constexpr static int LEFT_SHIFT = 8; + + proto::DataType data_type_; + DataLayout data_layout_; + platform::Place place_; + LibraryType library_type_; + + OpKernelType(proto::DataType data_type, platform::Place place, + DataLayout data_layout = DataLayout::kAnyLayout, + LibraryType library_type = LibraryType::kPlain) + : data_type_(data_type), + data_layout_(data_layout), + place_(place), + library_type_(library_type) {} + + OpKernelType(proto::DataType data_type, + const platform::DeviceContext& dev_ctx, + DataLayout data_layout = DataLayout::kAnyLayout, + LibraryType library_type = LibraryType::kPlain) + : data_type_(data_type), + data_layout_(data_layout), + place_(dev_ctx.GetPlace()), + library_type_(library_type) {} + + bool operator==(const OpKernelType& o) const { + return platform::places_are_same_class(place_, o.place_) && + data_type_ == o.data_type_ && data_layout_ == o.data_layout_ && + library_type_ == o.library_type_; + } +}; + +inline std::ostream& operator<<(std::ostream& os, + const OpKernelType& kernel_key) { + os << "data_type[" << kernel_key.data_type_ << "]:data_layout[" + << kernel_key.data_layout_ << "]:place[" << kernel_key.place_ + << "]:library_type[" << kernel_key.library_type_ << "]"; + return os; +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/op_kernel_type_test.cc b/paddle/framework/op_kernel_type_test.cc new file mode 100644 index 0000000000..dd04840500 --- /dev/null +++ b/paddle/framework/op_kernel_type_test.cc @@ -0,0 +1,51 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/op_kernel_type.h" +#include +#include + +TEST(OpKernelType, ToString) { + using OpKernelType = paddle::framework::OpKernelType; + using DataType = paddle::framework::proto::DataType; + using CPUPlace = paddle::platform::CPUPlace; + using DataLayout = paddle::framework::DataLayout; + using LibraryType = paddle::framework::LibraryType; + + OpKernelType op_kernel_type(DataType::FP32, CPUPlace(), DataLayout::kNCHW, + LibraryType::kCUDNN); + + std::ostringstream stream; + stream << op_kernel_type; + ASSERT_EQ( + stream.str(), + "data_type[5]:data_layout[NCHW]:place[CPUPlace]:library_type[CUDNN]"); +} + +TEST(OpKernelType, Hash) { + using OpKernelType = paddle::framework::OpKernelType; + using DataType = paddle::framework::proto::DataType; + using CPUPlace = paddle::platform::CPUPlace; + using CUDAPlace = paddle::platform::CUDAPlace; + using DataLayout = paddle::framework::DataLayout; + using LibraryType = paddle::framework::LibraryType; + + OpKernelType op_kernel_type_1(DataType::FP32, CPUPlace(), DataLayout::kNCHW, + LibraryType::kCUDNN); + OpKernelType op_kernel_type_2(DataType::FP32, CUDAPlace(0), DataLayout::kNCHW, + LibraryType::kCUDNN); + + OpKernelType::Hash hasher; + ASSERT_NE(hasher(op_kernel_type_1), hasher(op_kernel_type_2)); +} diff --git a/paddle/framework/op_proto_maker.h b/paddle/framework/op_proto_maker.h index 44e8ab1689..efd3a5ca53 100644 --- a/paddle/framework/op_proto_maker.h +++ b/paddle/framework/op_proto_maker.h @@ -22,6 +22,8 @@ namespace framework { // this class not only make proto but also init attribute checkers. class OpProtoAndCheckerMaker { public: + using OpProto = proto::OpProto; + using OpAttrChecker = framework::OpAttrChecker; OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) : proto_(proto), op_checker_(op_checker) {} @@ -80,7 +82,7 @@ class OpProtoAndCheckerMaker { class NOPMaker : public OpProtoAndCheckerMaker { public: - NOPMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + NOPMaker(OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) {} }; diff --git a/paddle/framework/op_proto_maker_test.cc b/paddle/framework/op_proto_maker_test.cc index 988a14cf4d..f16cb6fa3a 100644 --- a/paddle/framework/op_proto_maker_test.cc +++ b/paddle/framework/op_proto_maker_test.cc @@ -18,7 +18,7 @@ limitations under the License. */ class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { public: - TestAttrProtoMaker(paddle::framework::OpProto* proto, + TestAttrProtoMaker(paddle::framework::proto::OpProto* proto, paddle::framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddAttr("scale", "scale of test op"); @@ -27,7 +27,7 @@ class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { }; TEST(ProtoMaker, DuplicatedAttr) { - paddle::framework::OpProto op_proto; + paddle::framework::proto::OpProto op_proto; paddle::framework::OpAttrChecker op_checker; auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker); ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); @@ -35,7 +35,7 @@ TEST(ProtoMaker, DuplicatedAttr) { class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { public: - TestInOutProtoMaker(paddle::framework::OpProto* proto, + TestInOutProtoMaker(paddle::framework::proto::OpProto* proto, paddle::framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("input", "input of test op"); @@ -44,7 +44,7 @@ class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { }; TEST(ProtoMaker, DuplicatedInOut) { - paddle::framework::OpProto op_proto; + paddle::framework::proto::OpProto op_proto; paddle::framework::OpAttrChecker op_checker; auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker); ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 8dedd873aa..dfa151316d 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -31,7 +31,8 @@ std::unique_ptr OpRegistry::CreateOp( } static VariableNameMap ConvertOpDescVarsToVarNameMap( - const google::protobuf::RepeatedPtrField& op_desc_vars) { + const google::protobuf::RepeatedPtrField& + op_desc_vars) { VariableNameMap ret_val; for (auto& var : op_desc_vars) { auto& var_names = ret_val[var.parameter()]; @@ -43,9 +44,10 @@ static VariableNameMap ConvertOpDescVarsToVarNameMap( return ret_val; } -std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { +std::unique_ptr OpRegistry::CreateOp( + const proto::OpDesc& op_desc) { VLOG(1) << "CreateOp directly from OpDesc is deprecated. It should only be" - "used in unit tests. Use CreateOp(const OpDescBind& op_desc) " + "used in unit tests. Use CreateOp(const OpDesc& op_desc) " "instead."; VariableNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); VariableNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs()); @@ -57,7 +59,7 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { return CreateOp(op_desc.type(), inputs, outputs, attrs); } -std::unique_ptr OpRegistry::CreateOp(const OpDescBind& op_desc) { +std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { return CreateOp(op_desc.Type(), op_desc.Inputs(), op_desc.Outputs(), op_desc.GetAttrMap()); } diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index b29238432b..bdaa259181 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -61,25 +61,14 @@ struct OperatorRegistrar : public Registrar { class OpRegistry { public: - template - static void RegisterOp(const std::string& op_type, - const std::string& grad_op_type) { - OperatorRegistrar reg(op_type.c_str()); - reg.info.grad_op_type_ = grad_op_type; - // register gradient op - if (!grad_op_type.empty()) { - OperatorRegistrar grad_reg(grad_op_type.c_str()); - } - } - static std::unique_ptr CreateOp(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, AttributeMap attrs); - static std::unique_ptr CreateOp(const OpDesc& op_desc); + static std::unique_ptr CreateOp(const proto::OpDesc& op_desc); - static std::unique_ptr CreateOp(const OpDescBind& op_desc); + static std::unique_ptr CreateOp(const OpDesc& op_desc); }; template @@ -90,30 +79,31 @@ struct OpKernelRegistrarFunctor { using KERNEL_TYPE = typename std::tuple_element>::type; - void operator()(const char* op_type) const { + void operator()(const char* op_type, const char* library_type) const { using T = typename KERNEL_TYPE::ELEMENT_TYPE; - OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType()); + OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType(), + DataLayout::kAnyLayout, StringToLibraryType(library_type)); OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KERNEL_TYPE); constexpr auto size = std::tuple_size>::value; OpKernelRegistrarFunctor func; - func(op_type); + func(op_type, library_type); } }; template struct OpKernelRegistrarFunctor { - void operator()(const char* op_type) const {} + void operator()(const char* op_type, const char* library_type) const {} }; // User can register many kernel in one place. The data type could be different. template class OpKernelRegistrar : public Registrar { public: - explicit OpKernelRegistrar(const char* op_type) { + explicit OpKernelRegistrar(const char* op_type, const char* library_type) { OpKernelRegistrarFunctor func; - func(op_type); + func(op_type, library_type); } }; @@ -126,6 +116,14 @@ class OpKernelRegistrar : public Registrar { __test_global_namespace_##uniq_name##__>::value, \ msg) +/* + The variadic arguments should be class types derived from one of the + following classes: + OpProtoAndCheckerMaker + GradOpDescMakerBase + VarTypeInference + InferShapeBase +*/ #define REGISTER_OPERATOR(op_type, op_class, ...) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_op__##op_type, \ @@ -144,20 +142,29 @@ class OpKernelRegistrar : public Registrar { } /** - * Macro to register Operator. + * Macro to register Operator. When the input is duplicable, you should + * use REGISTER_OP_EX with deop_empty_grad=false instead. */ -#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ - grad_op_class) \ - REGISTER_OPERATOR(grad_op_type, grad_op_class); \ - class _GradOpDescMaker_##grad_op_type##_ \ - : public ::paddle::framework::DefaultGradOpDescMaker { \ - using ::paddle::framework::DefaultGradOpDescMaker< \ - true>::DefaultGradOpDescMaker; \ - \ - protected: \ - virtual std::string GradOpType() const { return #grad_op_type; } \ - }; \ - REGISTER_OPERATOR(op_type, op_class, _GradOpDescMaker_##grad_op_type##_, \ +#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ + grad_op_class) \ + REGISTER_OP_EX(op_type, op_class, op_maker_class, grad_op_type, \ + grad_op_class, true) + +// When an argument is duplicable, we need to use this version. +// Perhaps we can omit DropEmptyIG template parameter and +// only have one version of REGISTER_OP. +#define REGISTER_OP_EX(op_type, op_class, op_maker_class, grad_op_type, \ + grad_op_class, drop_empty_grad) \ + REGISTER_OPERATOR(grad_op_type, grad_op_class); \ + class _GradOpDescMaker_##grad_op_type##_ \ + : public ::paddle::framework::DefaultGradOpDescMaker { \ + using ::paddle::framework::DefaultGradOpDescMaker< \ + drop_empty_grad>::DefaultGradOpDescMaker; \ + \ + protected: \ + virtual std::string GradOpType() const { return #grad_op_type; } \ + }; \ + REGISTER_OPERATOR(op_type, op_class, _GradOpDescMaker_##grad_op_type##_, \ op_maker_class); #define REGISTER_OP_WITH_KERNEL(op_type, ...) \ @@ -175,14 +182,15 @@ class OpKernelRegistrar : public Registrar { __reg_op_kernel_##op_type##_##DEVICE_TYPE##__, \ "REGISTER_OP_KERNEL must be called in global namespace"); \ static ::paddle::framework::OpKernelRegistrar \ - __op_kernel_registrar_##op_type##_##DEVICE_TYPE##__(#op_type); \ + __op_kernel_registrar_##op_type##_##DEVICE_TYPE##__(#op_type, \ + #DEVICE_TYPE); \ int TouchOpKernelRegistrar_##op_type##_##DEVICE_TYPE() { \ __op_kernel_registrar_##op_type##_##DEVICE_TYPE##__.Touch(); \ return 0; \ } #define REGISTER_OP_CUDA_KERNEL(op_type, ...) \ - REGISTER_OP_KERNEL(op_type, CUDA, ::paddle::platform::GPUPlace, __VA_ARGS__) + REGISTER_OP_KERNEL(op_type, CUDA, ::paddle::platform::CUDAPlace, __VA_ARGS__) #define REGISTER_OP_CPU_KERNEL(op_type, ...) \ REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__) diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index b860fe6cac..cef530c6e6 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include "paddle/framework/op_registry.h" #include @@ -8,8 +22,7 @@ namespace framework { class CosineOp : public OperatorBase { public: using OperatorBase::OperatorBase; - void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const override {} + void Run(const Scope& scope, const platform::Place& place) const override {} }; class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { @@ -28,8 +41,7 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class MyTestOp : public OperatorBase { public: using OperatorBase::OperatorBase; - void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const override {} + void Run(const Scope& scope, const platform::Place& place) const override {} }; class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { @@ -51,7 +63,7 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { static void BuildVar(const std::string& param_name, std::initializer_list arguments, - paddle::framework::OpDesc::Var* var) { + paddle::framework::proto::OpDesc::Var* var) { var->set_parameter(param_name); for (auto& arg_name : arguments) { var->add_arguments(arg_name); @@ -63,7 +75,7 @@ REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp, paddle::framework::MyTestOpProtoAndCheckerMaker); TEST(OpRegistry, CreateOp) { - paddle::framework::OpDesc op_desc; + paddle::framework::proto::OpDesc op_desc; op_desc.set_type("cos_sim"); BuildVar("input", {"aa"}, op_desc.add_inputs()); BuildVar("output", {"bb"}, op_desc.add_outputs()); @@ -71,26 +83,26 @@ TEST(OpRegistry, CreateOp) { float scale = 3.3; auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); - attr->set_type(paddle::framework::AttrType::FLOAT); + attr->set_type(paddle::framework::proto::AttrType::FLOAT); attr->set_f(scale); auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::Scope scope; - paddle::platform::CPUDeviceContext dev_ctx; - op->Run(scope, dev_ctx); + paddle::platform::CPUPlace cpu_place; + op->Run(scope, cpu_place); float scale_get = op->Attr("scale"); ASSERT_EQ(scale_get, scale); } TEST(OpRegistry, IllegalAttr) { - paddle::framework::OpDesc op_desc; + paddle::framework::proto::OpDesc op_desc; op_desc.set_type("cos_sim"); BuildVar("input", {"aa"}, op_desc.add_inputs()); BuildVar("output", {"bb"}, op_desc.add_outputs()); auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); - attr->set_type(paddle::framework::AttrType::FLOAT); + attr->set_type(paddle::framework::proto::AttrType::FLOAT); attr->set_f(-2.0); bool caught = false; @@ -108,7 +120,7 @@ TEST(OpRegistry, IllegalAttr) { } TEST(OpRegistry, DefaultValue) { - paddle::framework::OpDesc op_desc; + paddle::framework::proto::OpDesc op_desc; op_desc.set_type("cos_sim"); BuildVar("input", {"aa"}, op_desc.add_inputs()); BuildVar("output", {"bb"}, op_desc.add_outputs()); @@ -117,13 +129,13 @@ TEST(OpRegistry, DefaultValue) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::Scope scope; - paddle::platform::CPUDeviceContext dev_ctx; - op->Run(scope, dev_ctx); + paddle::platform::CPUPlace cpu_place; + op->Run(scope, cpu_place); ASSERT_EQ(op->Attr("scale"), 1.0); } TEST(OpRegistry, CustomChecker) { - paddle::framework::OpDesc op_desc; + paddle::framework::proto::OpDesc op_desc; op_desc.set_type("my_test_op"); BuildVar("input", {"ii"}, op_desc.add_inputs()); BuildVar("output", {"oo"}, op_desc.add_outputs()); @@ -145,7 +157,7 @@ TEST(OpRegistry, CustomChecker) { // set 'test_attr' set to an illegal value auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("test_attr"); - attr->set_type(paddle::framework::AttrType::INT); + attr->set_type(paddle::framework::proto::AttrType::INT); attr->set_i(3); caught = false; try { @@ -164,12 +176,12 @@ TEST(OpRegistry, CustomChecker) { op_desc.mutable_attrs()->Clear(); attr = op_desc.mutable_attrs()->Add(); attr->set_name("test_attr"); - attr->set_type(paddle::framework::AttrType::INT); + attr->set_type(paddle::framework::proto::AttrType::INT); attr->set_i(4); auto op = paddle::framework::OpRegistry::CreateOp(op_desc); - paddle::platform::CPUDeviceContext dev_ctx; + paddle::platform::CPUPlace cpu_place; paddle::framework::Scope scope; - op->Run(scope, dev_ctx); + op->Run(scope, cpu_place); int test_attr = op->Attr("test_attr"); ASSERT_EQ(test_attr, 4); } @@ -184,3 +196,71 @@ TEST(OperatorRegistrar, Test) { using namespace paddle::framework; OperatorRegistrar reg("cos"); } + +namespace paddle { +namespace framework { + +class OpKernelTestMaker : public OpProtoAndCheckerMaker { + public: + OpKernelTestMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddComment("NoGradOp, same input output. no Grad"); + } +}; + +class OpWithKernelTest : public OperatorWithKernel { + public: + using OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(InferShapeContext* ctx) const override {} + + framework::OpKernelType GetActualKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(proto::DataType::FP32, ctx.device_context()); + } +}; + +template +class OpKernelTest : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const {} +}; + +} // namespace framework +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT(op_with_kernel, + paddle::framework::OpWithKernelTest, + paddle::framework::OpKernelTestMaker); +REGISTER_OP_CPU_KERNEL( + op_with_kernel, + paddle::framework::OpKernelTest); + +REGISTER_OP_CUDA_KERNEL(op_with_kernel, + paddle::framework::OpKernelTest< + paddle::platform::CUDADeviceContext, float>); + +TEST(OperatorRegistrar, CPU) { + paddle::framework::proto::OpDesc op_desc; + paddle::platform::CPUPlace cpu_place; + paddle::framework::Scope scope; + + op_desc.set_type("op_with_kernel"); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + + op->Run(scope, cpu_place); +} + +#ifdef PADDLE_WITH_CUDA +TEST(OperatorRegistrar, CUDA) { + paddle::framework::proto::OpDesc op_desc; + paddle::platform::CUDAPlace cuda_place(0); + paddle::framework::Scope scope; + + op_desc.set_type("op_with_kernel"); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + + op->Run(scope, cuda_place); +} +#endif diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index e83d754783..886f73e7b8 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -12,10 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/operator.h" #include #include + +#include "paddle/framework/data_transform.h" +#include "paddle/framework/executor.h" #include "paddle/framework/lod_tensor_array.h" +#include "paddle/framework/operator.h" #include "paddle/framework/shape_inference.h" #include "paddle/framework/var_type.h" @@ -240,12 +243,6 @@ std::vector ExecutionContext::MultiOutput( return res; } -std::ostream& operator<<(std::ostream& os, const OpKernelType& kernel_key) { - os << "place[" << kernel_key.place_ << "]:data_type[" << kernel_key.data_type_ - << "]"; - return os; -} - bool OpSupportGPU(const std::string& op_type) { auto& all_kernels = OperatorWithKernel::AllOpKernels(); auto it = all_kernels.find(op_type); @@ -377,7 +374,7 @@ class RuntimeInferShapeContext : public InferShapeContext { } } - VarDesc::VarType GetVarType(const std::string& name) const override { + proto::VarDesc::VarType GetVarType(const std::string& name) const override { auto* var = scope_.FindVar(name); return ToVarType(var->Type()); } @@ -388,11 +385,11 @@ class RuntimeInferShapeContext : public InferShapeContext { }; void OperatorWithKernel::Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const { + const platform::Place& place) const { RuntimeInferShapeContext infer_shape_ctx(*this, scope); this->InferShape(&infer_shape_ctx); - - ExecutionContext ctx(*this, scope, dev_ctx); + platform::DeviceContextPool& pool = platform::DeviceContextPool::Get(); + auto dev_ctx = pool.Borrow(place); // check if op[type] has kernel registered. auto& all_op_kernels = AllOpKernels(); @@ -404,20 +401,62 @@ void OperatorWithKernel::Run(const Scope& scope, // check if op[type] have kernel for kernel_key OpKernelMap& kernels = kernels_iter->second; - auto kernel_key = GetKernelType(ctx); - auto kernel_iter = kernels.find(kernel_key); + + ExecutionContext ctx(*this, scope, *dev_ctx); + auto actual_kernel_key = GetActualKernelType(ctx); + auto expected_kernel_key = GetExpectedKernelType(actual_kernel_key); + auto kernel_iter = kernels.find(expected_kernel_key); if (kernel_iter == kernels.end()) { - PADDLE_THROW("The operator %s does not support %s", type_, kernel_key); + PADDLE_THROW("The operator %s does not support %s", type_, + expected_kernel_key); } - kernel_iter->second->Compute(ctx); + if (actual_kernel_key == expected_kernel_key) { + kernel_iter->second->Compute(ctx); + } else { + Scope& op_scope = scope.NewScope(); + auto input_vars = this->InputVars(); + for (auto var_name : input_vars) { + op_scope.Var(var_name); + } + + // TODO(qijun) get appropriate DeviceContext from DeviceContext pool + platform::DeviceContext* trans_dev_ctx = nullptr; + std::vector trans_dev_ctx_vec{trans_dev_ctx}; + + // TODO(qijun) get appropriate DataTransformFN from global map + framework::DataTransformFN trans_fun = nullptr; + + // Wait for transform starting + dev_ctx->Wait(); + + for (auto var_name : input_vars) { + trans_fun(trans_dev_ctx_vec, *(scope.FindVar(var_name)), + op_scope.FindVar(var_name)); + } + // Wait for data transform finishing + for (auto ctx : trans_dev_ctx_vec) { + ctx->Wait(); + } + + // Create a new ExecutionContext + ExecutionContext op_ctx(*this, op_scope, *dev_ctx); + kernel_iter->second->Compute(op_ctx); + } } -OpKernelType OperatorWithKernel::GetKernelType( + +OpKernelType OperatorWithKernel::GetActualKernelType( const ExecutionContext& ctx) const { return OpKernelType(IndicateDataType(ctx), ctx.GetPlace()); } -DataType OperatorWithKernel::IndicateDataType( + +OpKernelType OperatorWithKernel::GetExpectedKernelType( + const OpKernelType& actual_kernel_type) const { + return actual_kernel_type; +} + +proto::DataType OperatorWithKernel::IndicateDataType( const ExecutionContext& ctx) const { auto& scope = ctx.scope(); int data_type = -1; @@ -443,7 +482,7 @@ DataType OperatorWithKernel::IndicateDataType( } } PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input"); - return static_cast(data_type); + return static_cast(data_type); } } // namespace framework diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index e60dbfc313..d0a9b643d5 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -23,15 +23,14 @@ limitations under the License. */ #include "glog/logging.h" // For VLOG #include "paddle/framework/attribute.h" #include "paddle/framework/block_desc.h" -#include "paddle/framework/data_type.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_info.h" +#include "paddle/framework/op_kernel_type.h" #include "paddle/framework/scope.h" #include "paddle/framework/selected_rows.h" #include "paddle/framework/tensor.h" #include "paddle/platform/device_context.h" -#include "paddle/platform/place.h" #include "paddle/platform/variant.h" #include "paddle/utils/Error.h" @@ -53,6 +52,11 @@ constexpr char kGradVarSuffix[] = "@GRAD"; /// Variables with this suffix are supposed to be filled up with zeros. constexpr char kZeroVarSuffix[] = "@ZERO"; +// define some kernel hint +const std::string kUseCPU = "use_cpu"; +const std::string kUseCUDNN = "use_cudnn"; +const std::string kUseMKLDNN = "use_mkldnn"; + inline std::string GradVarName(const std::string& var_name) { return var_name + kGradVarSuffix; } @@ -83,8 +87,10 @@ class OperatorBase { virtual std::string DebugString() const; /// Net will call this function to Run an op. - virtual void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const = 0; + virtual void Run(const Scope& scope, const platform::Place& place) const = 0; + + // FIXME(typhoonzero): this is only used for recv_op to stop event_loop. + virtual void Stop() {} virtual bool IsNetOp() const { return false; } @@ -159,8 +165,7 @@ class OperatorBase { class NOP : public OperatorBase { public: using OperatorBase::OperatorBase; - void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const override {} + void Run(const Scope& scope, const platform::Place& place) const override {} std::unique_ptr Clone() const override { return std::unique_ptr(new NOP(*this)); } @@ -345,33 +350,6 @@ class OpKernel : public OpKernelBase { using ELEMENT_TYPE = T; }; -struct OpKernelType { - struct Hash { - std::hash hash_; - size_t operator()(const OpKernelType& key) const { - int place = key.place_.which(); - int data_type = static_cast(key.data_type_); - int pre_hash = data_type << NUM_PLACE_TYPE_LIMIT_IN_BIT | - (place & ((1 << NUM_PLACE_TYPE_LIMIT_IN_BIT) - 1)); - return hash_(pre_hash); - } - }; - - platform::Place place_; - DataType data_type_; - - OpKernelType(DataType data_type, platform::Place place) - : place_(place), data_type_(data_type) {} - - OpKernelType(DataType data_type, const platform::DeviceContext& dev_ctx) - : place_(dev_ctx.GetPlace()), data_type_(data_type) {} - - bool operator==(const OpKernelType& o) const { - return platform::places_are_same_class(place_, o.place_) && - data_type_ == o.data_type_; - } -}; - class OperatorWithKernel : public OperatorBase { public: using OpKernelMap = @@ -382,8 +360,7 @@ class OperatorWithKernel : public OperatorBase { const VariableNameMap& outputs, const AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} - void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const final; + void Run(const Scope& scope, const platform::Place& place) const final; static std::unordered_map& AllOpKernels() { @@ -404,16 +381,16 @@ class OperatorWithKernel : public OperatorBase { } protected: - virtual OpKernelType GetKernelType(const ExecutionContext& ctx) const; + virtual OpKernelType GetActualKernelType(const ExecutionContext& ctx) const; + virtual OpKernelType GetExpectedKernelType( + const OpKernelType& actual_kernel_type) const; private: // indicate kernel DataType by input data. Defaultly all input data must be // same. - DataType IndicateDataType(const ExecutionContext& ctx) const; + proto::DataType IndicateDataType(const ExecutionContext& ctx) const; }; -std::ostream& operator<<(std::ostream& os, const OpKernelType& kernel_key); - extern bool OpSupportGPU(const std::string& op_type); } // namespace framework diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index b678178454..4d38a7ada9 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -11,11 +11,12 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - -#include "paddle/framework/operator.h" #include "gtest/gtest.h" + +#include "paddle/framework/init.h" #include "paddle/framework/op_info.h" #include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" namespace paddle { namespace framework { @@ -27,8 +28,7 @@ class OpWithoutKernelTest : public OperatorBase { OpWithoutKernelTest(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, const AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs), x(1) {} - void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const override { + void Run(const Scope& scope, const platform::Place& place) const override { ++op_run_num; ASSERT_EQ(static_cast(inputs_.size()), 1); ASSERT_EQ(static_cast(outputs_.size()), 1); @@ -41,10 +41,9 @@ class OpWithoutKernelTest : public OperatorBase { int x{0}; }; -class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { +class OpWithoutKernelCheckerMaker : public OpProtoAndCheckerMaker { public: - OpeWithoutKernelTestProtoAndCheckerMaker(OpProto* proto, - OpAttrChecker* op_checker) + OpWithoutKernelCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("input", "input of test op"); AddOutput("output", "output of test op"); @@ -58,35 +57,36 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { static void BuildVar(const std::string& param_name, std::initializer_list arguments, - paddle::framework::OpDesc::Var* var) { + paddle::framework::proto::OpDesc::Var* var) { var->set_parameter(param_name); for (auto& arg_name : arguments) { *var->mutable_arguments()->Add() = arg_name; } } -REGISTER_OP_WITHOUT_GRADIENT( - test_operator, paddle::framework::OpWithoutKernelTest, - paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT(test_operator, + paddle::framework::OpWithoutKernelTest, + paddle::framework::OpWithoutKernelCheckerMaker); TEST(OperatorBase, all) { - paddle::framework::OpDesc op_desc; + paddle::framework::InitDevices({"CPU"}); + paddle::framework::proto::OpDesc op_desc; op_desc.set_type("test_operator"); BuildVar("input", {"IN1"}, op_desc.add_inputs()); BuildVar("output", {"OUT1"}, op_desc.add_outputs()); auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); - attr->set_type(paddle::framework::AttrType::FLOAT); + attr->set_type(paddle::framework::proto::AttrType::FLOAT); attr->set_f(3.14); - paddle::platform::CPUDeviceContext device_context; + paddle::platform::CPUPlace cpu_place; paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); scope.Var("OUT1"); ASSERT_EQ(paddle::framework::op_run_num, 0); - op->Run(scope, device_context); + op->Run(scope, cpu_place); ASSERT_EQ(paddle::framework::op_run_num, 1); } @@ -114,8 +114,8 @@ class OpWithKernelTest : public OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override {} - OpKernelType GetKernelType(const ExecutionContext& ctx) const override { - return OpKernelType(DataType::FP32, ctx.GetPlace()); + OpKernelType GetActualKernelType(const ExecutionContext& ctx) const override { + return OpKernelType(proto::DataType::FP32, ctx.GetPlace()); } }; @@ -123,7 +123,6 @@ template class CPUKernelTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { - std::cout << "this is cpu kernel" << std::endl; std::cout << ctx.op().DebugString() << std::endl; cpu_kernel_run_num++; ASSERT_EQ(ctx.op().Input("x"), "IN1"); @@ -195,22 +194,23 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel, // test with single input TEST(OpKernel, all) { - paddle::framework::OpDesc op_desc; + paddle::framework::InitDevices({"CPU"}); + paddle::framework::proto::OpDesc op_desc; op_desc.set_type("op_with_kernel"); BuildVar("x", {"IN1"}, op_desc.add_inputs()); BuildVar("y", {"OUT1"}, op_desc.add_outputs()); auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); - attr->set_type(paddle::framework::AttrType::FLOAT); + attr->set_type(paddle::framework::proto::AttrType::FLOAT); attr->set_f(3.14); - paddle::platform::CPUDeviceContext cpu_device_context; + paddle::platform::CPUPlace cpu_place; paddle::framework::Scope scope; auto op = paddle::framework::OpRegistry::CreateOp(op_desc); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0); - op->Run(scope, cpu_device_context); + op->Run(scope, cpu_place); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1); } @@ -224,7 +224,9 @@ REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel, TEST(OpKernel, multi_inputs) { using namespace paddle::framework; - OpDesc op_desc; + paddle::framework::InitDevices({"CPU"}); + proto::OpDesc op_desc; + op_desc.set_type("op_multi_inputs_with_kernel"); BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs()); BuildVar("k", {"k0"}, op_desc.add_inputs()); @@ -232,10 +234,10 @@ TEST(OpKernel, multi_inputs) { auto attr = op_desc.mutable_attrs()->Add(); attr->set_name("scale"); - attr->set_type(paddle::framework::AttrType::FLOAT); + attr->set_type(paddle::framework::proto::AttrType::FLOAT); attr->set_f(3.14); - paddle::platform::CPUDeviceContext cpu_device_context; + paddle::platform::CPUPlace cpu_place; paddle::framework::Scope scope; scope.Var("x0")->GetMutable(); scope.Var("x1")->GetMutable(); @@ -245,7 +247,7 @@ TEST(OpKernel, multi_inputs) { scope.Var("y1")->GetMutable(); auto op = paddle::framework::OpRegistry::CreateOp(op_desc); - op->Run(scope, cpu_device_context); + op->Run(scope, cpu_place); } class OperatorClone : public paddle::framework::OperatorBase { @@ -257,10 +259,11 @@ class OperatorClone : public paddle::framework::OperatorBase { const paddle::framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const paddle::framework::Scope& scope, - const paddle::platform::DeviceContext& dev_ctx) const override {} + const paddle::platform::Place& place) const override {} }; TEST(Operator, Clone) { + paddle::framework::InitDevices({"CPU"}); OperatorClone a("ABC", paddle::framework::VariableNameMap{}, paddle::framework::VariableNameMap{}, paddle::framework::AttributeMap{}); diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index 4af8d94563..b5d9e5e385 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -18,49 +18,49 @@ limitations under the License. */ namespace paddle { namespace framework { -BlockDescBind *ProgramDescBind::AppendBlock(const BlockDescBind &parent) { +BlockDesc *ProgramDesc::AppendBlock(const BlockDesc &parent) { auto *b = desc_.add_blocks(); b->set_parent_idx(parent.ID()); b->set_idx(desc_.blocks_size() - 1); - blocks_.emplace_back(new BlockDescBind(this, b)); + blocks_.emplace_back(new BlockDesc(this, b)); return blocks_.back().get(); } -ProgramDesc *ProgramDescBind::Proto() { +proto::ProgramDesc *ProgramDesc::Proto() { for (auto &block : blocks_) { block->Flush(); } return &desc_; } -ProgramDescBind::ProgramDescBind() { +ProgramDesc::ProgramDesc() { auto *block = desc_.mutable_blocks()->Add(); block->set_idx(kRootBlockIndex); block->set_parent_idx(kNoneBlockIndex); - blocks_.emplace_back(new BlockDescBind(this, block)); + blocks_.emplace_back(new BlockDesc(this, block)); } -ProgramDescBind::ProgramDescBind(const ProgramDescBind &o) { +ProgramDesc::ProgramDesc(const ProgramDesc &o) { desc_ = o.desc_; for (int i = 0; i < desc_.blocks_size(); ++i) { auto *block = desc_.mutable_blocks(i); - blocks_.emplace_back(new BlockDescBind(*o.blocks_[i], block, this)); + blocks_.emplace_back(new BlockDesc(*o.blocks_[i], block, this)); } } -ProgramDescBind::ProgramDescBind(const ProgramDesc &desc) { +ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) { desc_ = desc; for (auto &block_desc : *desc_.mutable_blocks()) { - blocks_.emplace_back(new BlockDescBind(this, &block_desc)); + blocks_.emplace_back(new BlockDesc(this, &block_desc)); } } -ProgramDescBind::ProgramDescBind(const std::string &binary_str) { +ProgramDesc::ProgramDesc(const std::string &binary_str) { PADDLE_ENFORCE(desc_.ParseFromString(binary_str), "Fail to parse program_desc from binary string."); for (auto &block_desc : *desc_.mutable_blocks()) { - blocks_.emplace_back(new BlockDescBind(this, &block_desc)); + blocks_.emplace_back(new BlockDesc(this, &block_desc)); } } diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index b1cb086de4..15a962bb69 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -23,32 +23,32 @@ limitations under the License. */ namespace paddle { namespace framework { -class BlockDescBind; +class BlockDesc; -class ProgramDescBind { +class ProgramDesc { public: - ProgramDescBind(); + ProgramDesc(); - explicit ProgramDescBind(const ProgramDesc &desc); + explicit ProgramDesc(const proto::ProgramDesc &desc); - ProgramDescBind(const ProgramDescBind &o); + ProgramDesc(const ProgramDesc &o); - explicit ProgramDescBind(const std::string &binary_str); + explicit ProgramDesc(const std::string &binary_str); - BlockDescBind *AppendBlock(const BlockDescBind &parent); + BlockDesc *AppendBlock(const BlockDesc &parent); - BlockDescBind *MutableBlock(size_t idx) { return blocks_[idx].get(); } + BlockDesc *MutableBlock(size_t idx) { return blocks_[idx].get(); } - const BlockDescBind &Block(size_t idx) const { return *blocks_[idx]; } + const BlockDesc &Block(size_t idx) const { return *blocks_[idx]; } size_t Size() const { return blocks_.size(); } - ProgramDesc *Proto(); + proto::ProgramDesc *Proto(); private: - ProgramDesc desc_; + proto::ProgramDesc desc_; - std::vector> blocks_; + std::vector> blocks_; }; } // namespace framework } // namespace paddle diff --git a/paddle/framework/program_desc_test.cc b/paddle/framework/program_desc_test.cc index 83e7286e0e..59947c9f21 100644 --- a/paddle/framework/program_desc_test.cc +++ b/paddle/framework/program_desc_test.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/program_desc.h" #include "gtest/gtest.h" @@ -19,18 +19,18 @@ namespace paddle { namespace framework { TEST(ProgramDesc, copy_ctor) { - ProgramDescBind program; + ProgramDesc program; auto* global_block = program.MutableBlock(0); auto* x = global_block->Var("X"); - x->SetType(VarDesc_VarType_LOD_TENSOR); + x->SetType(proto::VarDesc_VarType_LOD_TENSOR); x->SetLoDLevel(0); - x->SetDataType(FP32); + x->SetDataType(proto::FP32); x->SetShape({1000, 784}); auto* y = global_block->Var("Y"); - y->SetType(VarDesc_VarType_LOD_TENSOR); + y->SetType(proto::VarDesc_VarType_LOD_TENSOR); y->SetLoDLevel(0); - y->SetDataType(FP32); + y->SetDataType(proto::FP32); y->SetShape({784, 100}); auto* op = global_block->AppendOp(); @@ -39,15 +39,15 @@ TEST(ProgramDesc, copy_ctor) { op->SetInput("Y", {y->Name()}); auto* out = global_block->Var("Out"); - out->SetType(VarDesc_VarType_LOD_TENSOR); + out->SetType(proto::VarDesc_VarType_LOD_TENSOR); op->SetOutput("Y", {out->Name()}); - ProgramDescBind program_copy(program); + ProgramDesc program_copy(program); auto* global_block_copy = program_copy.MutableBlock(0); ASSERT_NE(global_block, global_block_copy); - auto assert_same_var = [&](const std::string& name, VarDescBind* var_before) { + auto assert_same_var = [&](const std::string& name, VarDesc* var_before) { ASSERT_TRUE(global_block_copy->HasVar(name)); auto* copy = global_block_copy->Var(name); ASSERT_NE(copy, var_before); @@ -81,18 +81,18 @@ TEST(ProgramDesc, copy_ctor) { } TEST(ProgramDescBind, serialize_and_deserialize) { - ProgramDescBind program_origin; + ProgramDesc program_origin; auto* global_block = program_origin.MutableBlock(0); auto* x = global_block->Var("X"); - x->SetType(VarDesc_VarType_LOD_TENSOR); + x->SetType(proto::VarDesc_VarType_LOD_TENSOR); x->SetLoDLevel(0); - x->SetDataType(FP32); + x->SetDataType(proto::FP32); x->SetShape({1000, 784}); auto* y = global_block->Var("Y"); - y->SetType(VarDesc_VarType_LOD_TENSOR); + y->SetType(proto::VarDesc_VarType_LOD_TENSOR); y->SetLoDLevel(0); - y->SetDataType(FP32); + y->SetDataType(proto::FP32); y->SetShape({784, 100}); auto* op = global_block->AppendOp(); @@ -101,17 +101,17 @@ TEST(ProgramDescBind, serialize_and_deserialize) { op->SetInput("Y", {y->Name()}); auto* out = global_block->Var("Out"); - out->SetType(VarDesc_VarType_LOD_TENSOR); + out->SetType(proto::VarDesc_VarType_LOD_TENSOR); op->SetOutput("Y", {out->Name()}); std::string binary_str; program_origin.Proto()->SerializeToString(&binary_str); - ProgramDescBind program_restored(binary_str); + ProgramDesc program_restored(binary_str); auto* global_block_restored = program_restored.MutableBlock(0); ASSERT_NE(global_block, global_block_restored); - auto assert_same_var = [&](const std::string& name, VarDescBind* var_before) { + auto assert_same_var = [&](const std::string& name, VarDesc* var_before) { ASSERT_TRUE(global_block_restored->HasVar(name)); auto* restored = global_block_restored->Var(name); ASSERT_NE(restored, var_before); diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index da76052eb4..25eb813ffb 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -29,7 +29,7 @@ const std::string kFetchOpType = "fetch"; const std::string kDropOutOpType = "dropout"; const std::string kBatchNormOpType = "batch_norm"; -bool HasDependentVar(const OpDesc& op_desc, +bool HasDependentVar(const proto::OpDesc& op_desc, const std::set& dependent_vars) { for (auto& var : op_desc.outputs()) { for (auto& argu : var.arguments()) { @@ -41,14 +41,15 @@ bool HasDependentVar(const OpDesc& op_desc, return false; } -bool IsTarget(const OpDesc& op_desc) { +bool IsTarget(const proto::OpDesc& op_desc) { if (op_desc.has_is_target()) { return op_desc.is_target(); } return false; } -void prune_impl(const ProgramDesc& input, ProgramDesc* output, int block_id) { +void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, + int block_id) { // TODO(tonyyang-svail): // - will change to use multiple blocks for RNN op and Cond Op @@ -104,12 +105,12 @@ void prune_impl(const ProgramDesc& input, ProgramDesc* output, int block_id) { } // TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies -void Prune(const ProgramDesc& input, ProgramDesc* output) { +void Prune(const proto::ProgramDesc& input, proto::ProgramDesc* output) { prune_impl(input, output, 0); } -void inference_optimize_impl(const ProgramDesc& input, ProgramDesc* output, - int block_id) { +void inference_optimize_impl(const proto::ProgramDesc& input, + proto::ProgramDesc* output, int block_id) { *output = input; auto* op_field = output->mutable_blocks(block_id)->mutable_ops(); for (auto& op_desc : *op_field) { @@ -125,7 +126,8 @@ void inference_optimize_impl(const ProgramDesc& input, ProgramDesc* output, } } -void InferenceOptimize(const ProgramDesc& input, ProgramDesc* output) { +void InferenceOptimize(const proto::ProgramDesc& input, + proto::ProgramDesc* output) { inference_optimize_impl(input, output, 0); } diff --git a/paddle/framework/prune.h b/paddle/framework/prune.h index 23db014894..593292523d 100644 --- a/paddle/framework/prune.h +++ b/paddle/framework/prune.h @@ -20,9 +20,10 @@ limitations under the License. */ namespace paddle { namespace framework { -void Prune(const ProgramDesc& input, ProgramDesc* output); +void Prune(const proto::ProgramDesc& input, proto::ProgramDesc* output); -void InferenceOptimize(const ProgramDesc& input, ProgramDesc* output); +void InferenceOptimize(const proto::ProgramDesc& input, + proto::ProgramDesc* output); } // namespace framework } // namespace paddle diff --git a/paddle/framework/prune_test.cc b/paddle/framework/prune_test.cc index f21df37a29..d76c5abca9 100644 --- a/paddle/framework/prune_test.cc +++ b/paddle/framework/prune_test.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/prune.h" @@ -29,12 +29,12 @@ namespace ops = paddle::operators; void AddOp(const std::string &type, const f::VariableNameMap &inputs, const f::VariableNameMap &outputs, f::AttributeMap attrs, - paddle::framework::BlockDescBind *block) { + paddle::framework::BlockDesc *block) { // insert output for (auto kv : outputs) { for (auto v : kv.second) { auto var = block->Var(v); - var->SetDataType(paddle::framework::DataType::FP32); + var->SetDataType(paddle::framework::proto::DataType::FP32); } } @@ -51,26 +51,26 @@ void AddOp(const std::string &type, const f::VariableNameMap &inputs, } TEST(Prune, one_operator) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, f::AttributeMap{}, block); - f::ProgramDesc *pdesc = program.Proto(); - f::ProgramDesc pruned; + f::proto::ProgramDesc *pdesc = program.Proto(); + f::proto::ProgramDesc pruned; - Prune(*pdesc, &pruned); + f::Prune(*pdesc, &pruned); PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 0); pdesc->mutable_blocks(0)->mutable_ops(0)->set_is_target(true); - Prune(*pdesc, &pruned); + f::Prune(*pdesc, &pruned); PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 1); } TEST(Prune, forward) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, f::AttributeMap{}, block); @@ -81,19 +81,19 @@ TEST(Prune, forward) { AddOp("one_one", {{"input", {"d"}}}, {{"output", {"e"}}}, f::AttributeMap{}, block); - f::ProgramDesc *pdesc = program.Proto(); + f::proto::ProgramDesc *pdesc = program.Proto(); for (int i = 0; i < pdesc->blocks(0).ops_size(); ++i) { - f::ProgramDesc pruned; + f::proto::ProgramDesc pruned; pdesc->mutable_blocks(0)->mutable_ops(i)->set_is_target(true); - Prune(*pdesc, &pruned); + f::Prune(*pdesc, &pruned); PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), i + 1); } } TEST(Prune, multi_input_op) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); AddOp("one_one", {{"input", {"a0"}}}, {{"output", {"b0"}}}, f::AttributeMap{}, block); @@ -104,17 +104,17 @@ TEST(Prune, multi_input_op) { AddOp("three_one", {{"input", {"b0", "b1", "b2"}}}, {{"output", {"c"}}}, f::AttributeMap{}, block); - f::ProgramDesc *pdesc = program.Proto(); + f::proto::ProgramDesc *pdesc = program.Proto(); pdesc->mutable_blocks(0)->mutable_ops(3)->set_is_target(true); - f::ProgramDesc pruned; - Prune(*pdesc, &pruned); + f::proto::ProgramDesc pruned; + f::Prune(*pdesc, &pruned); PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 4); } TEST(Prune, multi_output_op) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, f::AttributeMap{}, block); @@ -123,17 +123,17 @@ TEST(Prune, multi_output_op) { AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, f::AttributeMap{}, block); - f::ProgramDesc *pdesc = program.Proto(); + f::proto::ProgramDesc *pdesc = program.Proto(); pdesc->mutable_blocks(0)->mutable_ops(2)->set_is_target(true); - f::ProgramDesc pruned; - Prune(*pdesc, &pruned); + f::proto::ProgramDesc pruned; + f::Prune(*pdesc, &pruned); PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 2); } TEST(Prune, multi_target) { - f::ProgramDescBind program; - f::BlockDescBind *block = program.MutableBlock(0); + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, f::AttributeMap{}, block); @@ -142,11 +142,11 @@ TEST(Prune, multi_target) { AddOp("one_one", {{"input", {"c"}}}, {{"output", {"c1"}}}, f::AttributeMap{}, block); - f::ProgramDesc *pdesc = program.Proto(); + f::proto::ProgramDesc *pdesc = program.Proto(); pdesc->mutable_blocks(0)->mutable_ops(1)->set_is_target(true); pdesc->mutable_blocks(0)->mutable_ops(2)->set_is_target(true); - f::ProgramDesc pruned; - Prune(*pdesc, &pruned); + f::proto::ProgramDesc pruned; + f::Prune(*pdesc, &pruned); PADDLE_ENFORCE_EQ(pruned.blocks(0).ops_size(), 3); } diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 656736e238..0c01d605bc 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -74,17 +74,9 @@ void Scope::DropKids() { kids_.clear(); } -std::vector Scope::GetAllNames(bool recursive) const { - std::vector known_vars(vars_.size()); - - if (recursive) { - for (auto& kid : kids_) { - auto kid_vars = kid->GetAllNames(); - for (auto& p : kid_vars) { - known_vars.emplace_back(p); - } - } - } +std::vector Scope::LocalVarNames() const { + std::vector known_vars; + known_vars.reserve(this->vars_.size()); for (auto& p : vars_) { known_vars.emplace_back(p.first); } diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 56e815db54..10143326df 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -66,7 +66,7 @@ class Scope { void DropKids(); // enumerate all the variables current contains. - std::vector GetAllNames(bool recursive = false) const; + std::vector LocalVarNames() const; // Rename variable to a new name void Rename(const std::string& origin_name, diff --git a/paddle/framework/scope_test.cc b/paddle/framework/scope_test.cc index f738d5ba9e..0f5b86061d 100644 --- a/paddle/framework/scope_test.cc +++ b/paddle/framework/scope_test.cc @@ -61,7 +61,7 @@ TEST(Scope, GetAllNames) { Variable* v = s.Var("a"); EXPECT_EQ(&s, s.FindScope(v)); - std::vector ans = s.GetAllNames(); + std::vector ans = s.LocalVarNames(); std::string str; for (auto& var : ans) { str += var; diff --git a/paddle/framework/shape_inference.cc b/paddle/framework/shape_inference.cc index 7dac1cfd5e..e53cc0cdab 100644 --- a/paddle/framework/shape_inference.cc +++ b/paddle/framework/shape_inference.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/shape_inference.h" #include "grad_op_desc_maker.h" #include "paddle/framework/operator.h" @@ -57,17 +57,17 @@ void InferShapeContext::SetDims(const std::vector &names, SetDim(names[i], dims[i]); } } -std::vector InferShapeContext::GetInputsVarType( +std::vector InferShapeContext::GetInputsVarType( const std::string &name) const { return GetVarTypes(Inputs(name)); } -std::vector InferShapeContext::GetOutputsVarType( +std::vector InferShapeContext::GetOutputsVarType( const std::string &name) const { return GetVarTypes(Outputs(name)); } -std::vector InferShapeContext::GetVarTypes( +std::vector InferShapeContext::GetVarTypes( const std::vector &names) const { - std::vector retv; + std::vector retv; retv.resize(names.size()); std::transform(names.begin(), names.end(), retv.begin(), std::bind(std::mem_fn(&InferShapeContext::GetVarType), this, diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 46f2ea84b4..f93319d8f2 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -27,8 +27,9 @@ class InferShapeContext { virtual bool HasInput(const std::string &name) const = 0; virtual bool HasOutput(const std::string &name) const = 0; - std::vector GetInputsVarType(const std::string &name) const; - std::vector GetOutputsVarType( + std::vector GetInputsVarType( + const std::string &name) const; + std::vector GetOutputsVarType( const std::string &name) const; virtual bool HasInputs(const std::string &name) const = 0; @@ -65,10 +66,10 @@ class InferShapeContext { std::vector GetDims( const std::vector &names) const; - std::vector GetVarTypes( + std::vector GetVarTypes( const std::vector &names) const; - virtual VarDesc::VarType GetVarType(const std::string &name) const = 0; + virtual proto::VarDesc::VarType GetVarType(const std::string &name) const = 0; }; } // namespace framework diff --git a/paddle/framework/tensor.cc b/paddle/framework/tensor.cc index ea7b2a1f7b..f922e60624 100644 --- a/paddle/framework/tensor.cc +++ b/paddle/framework/tensor.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/tensor.h" diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 6a0c5133c9..b9f6884f7c 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -20,12 +20,12 @@ limitations under the License. */ #include #include +#include "paddle/framework/data_layout.h" #include "paddle/framework/ddim.h" #include "paddle/memory/memory.h" #include "paddle/platform/device_context.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" -#include "unsupported/Eigen/CXX11/Tensor" namespace paddle { @@ -115,6 +115,10 @@ class Tensor { inline void check_memory_size() const; + inline DataLayout layout() const { return layout_; } + + inline void set_layout(const DataLayout layout) { layout_ = layout; } + private: friend class LoDTensor; @@ -173,6 +177,19 @@ class Tensor { DDim dims_; + /** + * @brief the layout of memory block, default is NCHW. + * + * @note the memory allocation order, describe how weight/data is stored + * For example, in 4-D Tensor(rank=4), there are three commonly + * used layout. They are + * NCHW, NHWC, CHWN. + * N,C,H,W for respectively the batch size, the number of + * feature maps, the height. + */ + + DataLayout layout_ = DataLayout::kNHWC; + /** * @brief A PlaceHolder may be shared by more than one tensor. * diff --git a/paddle/framework/tensor.md b/paddle/framework/tensor.md index 7a80816d8e..0a27ac9bb6 100644 --- a/paddle/framework/tensor.md +++ b/paddle/framework/tensor.md @@ -71,7 +71,7 @@ private: ``` ```c++ -typedef boost::variant Place; +typedef boost::variant Place; typedef boost::variant, Dim<2>, Dim<3>, Dim<4>, Dim<5>, Dim<6>, Dim<7>, Dim<8>, Dim<9>> DDimVar; typedef boost::variant< diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index aba1f9f093..6c6f298edc 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -125,11 +125,11 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { boost::get(place), size, type)); } else if (platform::is_gpu_place(place)) { #ifndef PADDLE_WITH_CUDA - PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); } #else - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type)); + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); } #endif offset_ = 0; @@ -165,6 +165,7 @@ inline Tensor Tensor::Slice(int begin_idx, int end_idx) const { size_t base = numel() / dims_[0]; Tensor dst; dst.holder_ = holder_; + dst.set_layout(layout_); DDim dst_dims = dims_; dst_dims[0] = end_idx - begin_idx; dst.Resize(dst_dims); diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index ceca64365a..ca76a9fcb9 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -80,20 +80,20 @@ TEST(Tensor, MutableData) { float* p1 = nullptr; float* p2 = nullptr; // initialization - p1 = src_tensor.mutable_data(make_ddim({1, 2, 3}), GPUPlace()); + p1 = src_tensor.mutable_data(make_ddim({1, 2, 3}), CUDAPlace()); EXPECT_NE(p1, nullptr); // set src_tensor a new dim with large size // momery is supposed to be re-allocated - p2 = src_tensor.mutable_data(make_ddim({3, 4}), GPUPlace()); + p2 = src_tensor.mutable_data(make_ddim({3, 4}), CUDAPlace()); EXPECT_NE(p2, nullptr); EXPECT_NE(p1, p2); // set src_tensor a new dim with same size // momery block is supposed to be unchanged - p1 = src_tensor.mutable_data(make_ddim({2, 2, 3}), GPUPlace()); + p1 = src_tensor.mutable_data(make_ddim({2, 2, 3}), CUDAPlace()); EXPECT_EQ(p1, p2); // set src_tensor a new dim with smaller size // momery block is supposed to be unchanged - p2 = src_tensor.mutable_data(make_ddim({2, 2}), GPUPlace()); + p2 = src_tensor.mutable_data(make_ddim({2, 2}), CUDAPlace()); EXPECT_EQ(p1, p2); } #endif @@ -130,7 +130,7 @@ TEST(Tensor, ShareDataWith) { { Tensor src_tensor; Tensor dst_tensor; - src_tensor.mutable_data(make_ddim({2, 3, 4}), GPUPlace()); + src_tensor.mutable_data(make_ddim({2, 3, 4}), CUDAPlace()); dst_tensor.ShareDataWith(src_tensor); ASSERT_EQ(src_tensor.data(), dst_tensor.data()); } @@ -166,7 +166,7 @@ TEST(Tensor, Slice) { #ifdef PADDLE_WITH_CUDA { Tensor src_tensor; - src_tensor.mutable_data(make_ddim({6, 9}), GPUPlace()); + src_tensor.mutable_data(make_ddim({6, 9}), CUDAPlace()); Tensor slice_tensor = src_tensor.Slice(2, 6); DDim slice_dims = slice_tensor.dims(); ASSERT_EQ(arity(slice_dims), 2); @@ -176,11 +176,11 @@ TEST(Tensor, Slice) { uintptr_t src_data_address = reinterpret_cast(src_tensor.data()); uintptr_t src_mutable_data_address = reinterpret_cast( - src_tensor.mutable_data(src_tensor.dims(), GPUPlace())); + src_tensor.mutable_data(src_tensor.dims(), CUDAPlace())); uintptr_t slice_data_address = reinterpret_cast(slice_tensor.data()); uintptr_t slice_mutable_data_address = reinterpret_cast( - slice_tensor.mutable_data(slice_tensor.dims(), GPUPlace())); + slice_tensor.mutable_data(slice_tensor.dims(), CUDAPlace())); EXPECT_EQ(src_data_address, src_mutable_data_address); EXPECT_EQ(slice_data_address, slice_mutable_data_address); EXPECT_EQ(src_data_address + 9 * 2 * sizeof(double), slice_data_address); @@ -200,3 +200,12 @@ TEST(Tensor, ReshapeToMatrix) { ASSERT_EQ(res.dims()[0], 2 * 3); ASSERT_EQ(res.dims()[1], 4 * 9); } + +TEST(Tensor, Layout) { + using namespace paddle::framework; + using namespace paddle::platform; + Tensor src; + ASSERT_EQ(src.layout(), DataLayout::kNHWC); + src.set_layout(DataLayout::kAnyLayout); + ASSERT_EQ(src.layout(), DataLayout::kAnyLayout); +} diff --git a/paddle/framework/tensor_util.h b/paddle/framework/tensor_util.h index 4e34b90d57..ea4e4f22ea 100644 --- a/paddle/framework/tensor_util.h +++ b/paddle/framework/tensor_util.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/tensor.h" @@ -33,6 +33,7 @@ inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, src.check_memory_size(); dst->Resize(src.dims()); + dst->set_layout(src.layout()); auto src_place = src.place(); auto src_ptr = src.data(); @@ -47,11 +48,11 @@ inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, #ifdef PADDLE_WITH_CUDA else if (platform::is_gpu_place(src_place) && // NOLINT platform::is_cpu_place(dst_place)) { - auto src_gpu_place = boost::get(src_place); + auto src_gpu_place = boost::get(src_place); auto dst_cpu_place = boost::get(dst_place); auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); + auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); memory::Copy( dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, @@ -59,21 +60,21 @@ inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, } else if (platform::is_cpu_place(src_place) && platform::is_gpu_place(dst_place)) { auto src_cpu_place = boost::get(src_place); - auto dst_gpu_place = boost::get(dst_place); + auto dst_gpu_place = boost::get(dst_place); auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); + auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); memory::Copy( dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, reinterpret_cast(ctx).stream()); } else if (platform::is_gpu_place(src_place) && platform::is_gpu_place(dst_place)) { - auto src_gpu_place = boost::get(src_place); - auto dst_gpu_place = boost::get(dst_place); + auto src_gpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); - auto ctx_gpu_place = boost::get(ctx_place); + auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); memory::Copy( dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, @@ -82,6 +83,29 @@ inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, #endif } +/** + * @brief CopyFrom support CPU <-> CPU + */ +inline void CopyFrom(const Tensor& src, const platform::Place& dst_place, + Tensor* dst) { + src.check_memory_size(); + dst->Resize(src.dims()); + dst->set_layout(src.layout()); + + auto src_place = src.place(); + auto src_ptr = src.data(); + + auto dst_ptr = dst->mutable_data(dst_place, src.type()); + + auto size = src.numel() * SizeOfType(src.type()); + + PADDLE_ENFORCE(platform::is_cpu_place(src_place) && + platform::is_cpu_place(dst_place)); + + memory::Copy(boost::get(dst_place), dst_ptr, + boost::get(src_place), src_ptr, size); +} + /** * @brief Copy the content of an external vector to a tensor. * @@ -108,13 +132,28 @@ inline void CopyFromVector(const std::vector& src, #ifdef PADDLE_WITH_CUDA else if (platform::is_gpu_place(dst_place)) { // NOLINT memory::Copy( - boost::get(dst_place), dst_ptr, src_place, src_ptr, + boost::get(dst_place), dst_ptr, src_place, src_ptr, size, reinterpret_cast(ctx).stream()); } #endif } +/** + * @brief CopyFromVector CPU vector -> CPU Tensor + */ +template +inline void CopyFromVector(const std::vector& src, Tensor* dst) { + platform::CPUPlace dst_place = platform::CPUPlace(); + auto src_ptr = static_cast(src.data()); + platform::CPUPlace src_place; + dst->Resize({static_cast(src.size())}); + auto dst_ptr = static_cast(dst->mutable_data(dst_place)); + auto size = src.size() * sizeof(T); + + memory::Copy(dst_place, dst_ptr, src_place, src_ptr, size); +} + /** * @brief Copy the content of a tensor to a vector * @@ -141,12 +180,30 @@ inline void CopyToVector(const Tensor& src, const platform::DeviceContext& ctx, #ifdef PADDLE_WITH_CUDA else if (platform::is_gpu_place(src.place())) { // NOLINT memory::Copy( - dst_place, dst_ptr, boost::get(src.place()), + dst_place, dst_ptr, boost::get(src.place()), src_ptr, size, reinterpret_cast(ctx).stream()); } #endif } +/** + * @brief CopyToVector CPUTensor <-> CPU Vector + */ +template +inline void CopyToVector(const Tensor& src, std::vector* dst) { + auto src_ptr = static_cast(src.data()); + auto size = src.numel() * sizeof(T); + + platform::CPUPlace dst_place; + dst->resize(src.numel()); + auto dst_ptr = static_cast(dst->data()); + + PADDLE_ENFORCE(platform::is_cpu_place(src.place())); + + memory::Copy(dst_place, dst_ptr, boost::get(src.place()), + src_ptr, size); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/tensor_util_test.cc b/paddle/framework/tensor_util_test.cc index 03a70de182..f388c19f28 100644 --- a/paddle/framework/tensor_util_test.cc +++ b/paddle/framework/tensor_util_test.cc @@ -17,6 +17,7 @@ namespace paddle { namespace framework { + TEST(CopyFrom, Tensor) { Tensor src_tensor; Tensor dst_tensor; @@ -27,9 +28,10 @@ TEST(CopyFrom, Tensor) { int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; memcpy(src_ptr, arr, 9 * sizeof(int)); + src_tensor.set_layout(DataLayout::kAnyLayout); auto cpu_place = new platform::CPUPlace(); - CopyFrom(src_tensor, *cpu_place, cpu_ctx, &dst_tensor); + CopyFrom(src_tensor, *cpu_place, &dst_tensor); const int* dst_ptr = dst_tensor.data(); ASSERT_NE(src_ptr, dst_ptr); @@ -37,14 +39,18 @@ TEST(CopyFrom, Tensor) { EXPECT_EQ(src_ptr[i], dst_ptr[i]); } + EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout()); + Tensor slice_tensor = src_tensor.Slice(1, 2); - CopyFrom(slice_tensor, *cpu_place, cpu_ctx, &dst_tensor); + CopyFrom(slice_tensor, *cpu_place, &dst_tensor); const int* slice_ptr = slice_tensor.data(); dst_ptr = dst_tensor.data(); ASSERT_NE(dst_ptr, slice_ptr); for (size_t i = 0; i < 3; ++i) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } + EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout()); + #ifdef PADDLE_WITH_CUDA { Tensor src_tensor; @@ -58,7 +64,7 @@ TEST(CopyFrom, Tensor) { memcpy(src_ptr, arr, 9 * sizeof(int)); // CPU Tensor to GPU Tensor - auto gpu_place = new platform::GPUPlace(0); + auto gpu_place = new platform::CUDAPlace(0); platform::CUDADeviceContext gpu_ctx(*gpu_place); CopyFrom(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor); @@ -90,6 +96,8 @@ TEST(CopyFrom, Tensor) { for (size_t i = 0; i < 3; ++i) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } + + EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout()); } #endif } @@ -104,8 +112,7 @@ TEST(CopyFromVector, Tensor) { // Copy to CPU Tensor cpu_tensor.Resize(make_ddim({3, 3})); auto cpu_place = new paddle::platform::CPUPlace(); - CPUDeviceContext cpu_ctx(*cpu_place); - CopyFromVector(src_vec, cpu_ctx, &cpu_tensor); + CopyFromVector(src_vec, &cpu_tensor); // Compare Tensors const int* cpu_ptr = cpu_tensor.data(); @@ -117,7 +124,7 @@ TEST(CopyFromVector, Tensor) { src_vec.erase(src_vec.begin(), src_vec.begin() + 5); cpu_tensor.Resize(make_ddim({2, 2})); - CopyFromVector(src_vec, cpu_ctx, &cpu_tensor); + CopyFromVector(src_vec, &cpu_tensor); cpu_ptr = cpu_tensor.data(); src_ptr = src_vec.data(); ASSERT_NE(src_ptr, cpu_ptr); @@ -143,7 +150,7 @@ TEST(CopyFromVector, Tensor) { // Copy to GPUTensor gpu_tensor.Resize(make_ddim({3, 3})); - auto gpu_place = new paddle::platform::GPUPlace(); + auto gpu_place = new paddle::platform::CUDAPlace(); CUDADeviceContext gpu_ctx(*gpu_place); CopyFromVector(src_vec, gpu_ctx, &gpu_tensor); // Copy from GPU to CPU tensor for comparison @@ -198,9 +205,8 @@ TEST(CopyToVector, Tensor) { } CPUPlace place; - CPUDeviceContext cpu_ctx(place); std::vector dst; - CopyToVector(src, cpu_ctx, &dst); + CopyToVector(src, &dst); for (int i = 0; i < 3 * 3; ++i) { EXPECT_EQ(src_ptr[i], dst[i]); @@ -210,7 +216,7 @@ TEST(CopyToVector, Tensor) { { std::vector src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9}; Tensor gpu_tensor; - GPUPlace place; + CUDAPlace place; CUDADeviceContext gpu_ctx(place); CopyFromVector(src_vec, gpu_ctx, &gpu_tensor); diff --git a/paddle/framework/threadpool.cc b/paddle/framework/threadpool.cc new file mode 100644 index 0000000000..109a7e7dc4 --- /dev/null +++ b/paddle/framework/threadpool.cc @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/threadpool.h" + +namespace paddle { +namespace framework { + +std::unique_ptr ThreadPool::threadpool(nullptr); +std::once_flag ThreadPool::init_flag; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/threadpool.h b/paddle/framework/threadpool.h new file mode 100644 index 0000000000..5f6b2d458f --- /dev/null +++ b/paddle/framework/threadpool.h @@ -0,0 +1,156 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "paddle/platform/enforce.h" + +namespace paddle { +namespace framework { + +typedef std::function Task; + +class ThreadPool { + public: + /** + * @brief Get a instance of threadpool, the thread number will + * be specified as the number of hardware thread contexts + */ + static ThreadPool* GetInstance() { + std::call_once(init_flag, &ThreadPool::Init); + return threadpool.get(); + } + + ~ThreadPool() { + { + // notify all threads to stop running + running_ = false; + scheduled_.notify_all(); + } + + for (auto& t : threads_) { + t->join(); + t.reset(nullptr); + } + } + + int GetNumThreads() const { return num_threads_; } + + int GetAvailable() { + std::unique_lock lock(mutex_); + return available_; + } + + /** + * @brief Push a function to the queue, and will be scheduled and + * executed if a thread is available. + * @param[in] Task will be pushed to the task queue. + */ + void Run(const Task& fn) { + std::unique_lock lock(mutex_); + tasks_.push(fn); + lock.unlock(); + scheduled_.notify_one(); + } + + /** + * @brief Wait until all the tasks are completed. + */ + void Wait() { + std::unique_lock lock(mutex_); + completed_.wait(lock, [=] { return Done() == true; }); + } + + private: + DISABLE_COPY_AND_ASSIGN(ThreadPool); + + explicit ThreadPool(int num_threads) + : num_threads_(num_threads), available_(num_threads), running_(true) { + threads_.resize(num_threads); + for (auto& thread : threads_) { + // TODO(Yancey1989): binding the thread on the specify CPU number + thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this))); + } + } + + /** + * @brief If the task queue is empty and avaialbe + * is equal to the number of threads, means that + * all tasks are completed. + * + * Note: this function is not thread-safe. + * + * @return true if all tasks are completed. + */ + bool Done() { return tasks_.empty() && available_ == num_threads_; } + + void TaskLoop() { + while (running_) { + std::unique_lock lock(mutex_); + scheduled_.wait(lock, [=] { return !tasks_.empty() || !running_; }); + + if (!running_) { + break; + } + // pop a task from the task queue + auto task = tasks_.front(); + tasks_.pop(); + + --available_; + lock.unlock(); + + // run the task + task(); + + { + std::unique_lock lock(mutex_); + ++available_; + if (Done()) { + completed_.notify_all(); + } + } + } + } + + static void Init() { + if (threadpool.get() == nullptr) { + // TODO(Yancey1989): specify the max threads number + int num_threads = std::thread::hardware_concurrency(); + PADDLE_ENFORCE_GT(num_threads, 0); + threadpool.reset(new ThreadPool(num_threads)); + } + } + + private: + static std::unique_ptr threadpool; + static std::once_flag init_flag; + + int num_threads_; + int available_; + bool running_; + std::queue tasks_; + std::vector> threads_; + std::mutex mutex_; + std::condition_variable scheduled_; + std::condition_variable completed_; +}; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/threadpool_test.cc b/paddle/framework/threadpool_test.cc new file mode 100644 index 0000000000..012d92a5ed --- /dev/null +++ b/paddle/framework/threadpool_test.cc @@ -0,0 +1,56 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "threadpool.h" + +namespace framework = paddle::framework; + +void do_sum(framework::ThreadPool* pool, std::atomic& sum, int cnt) { + for (int i = 0; i < cnt; ++i) { + pool->Run([&sum]() { sum.fetch_add(1); }); + } +} + +TEST(ThreadPool, ConcurrentInit) { + framework::ThreadPool* pool; + int concurrent_cnt = 50; + std::vector threads; + for (int i = 0; i < concurrent_cnt; ++i) { + std::thread t([&pool]() { pool = framework::ThreadPool::GetInstance(); }); + threads.push_back(std::move(t)); + } + for (auto& t : threads) { + t.join(); + } +} + +TEST(ThreadPool, ConcurrentStart) { + framework::ThreadPool* pool = framework::ThreadPool::GetInstance(); + std::atomic sum(0); + std::vector threads; + int concurrent_cnt = 50; + // sum = (n * (n + 1)) / 2 + for (int i = 1; i <= concurrent_cnt; ++i) { + std::thread t(do_sum, pool, std::ref(sum), i); + threads.push_back(std::move(t)); + } + for (auto& t : threads) { + t.join(); + } + pool->Wait(); + EXPECT_EQ(sum, ((concurrent_cnt + 1) * concurrent_cnt) / 2); +} diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h index baeb98c9bd..d834d34375 100644 --- a/paddle/framework/type_defs.h +++ b/paddle/framework/type_defs.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include @@ -25,11 +25,9 @@ namespace paddle { namespace framework { class OperatorBase; -class OpDescBind; -class BlockDescBind; -class BlockDesc; +class OpDesc; class InferShapeContext; -class BlockDescBind; +class BlockDesc; using VariableNameMap = std::map>; @@ -37,7 +35,7 @@ using VariableNameMap = std::map>; using Attribute = boost::variant, std::vector, std::vector, bool, - std::vector, BlockDescBind*>; + std::vector, BlockDesc*>; using AttributeMap = std::unordered_map; @@ -45,13 +43,13 @@ using OpCreator = std::function; -using GradOpMakerFN = std::function>( - const OpDescBind&, const std::unordered_set& /*no_grad_set*/, +using GradOpMakerFN = std::function>( + const OpDesc&, const std::unordered_set& /*no_grad_set*/, std::unordered_map* /*grad_to_var*/, - const std::vector& grad_block)>; + const std::vector& grad_block)>; -using InferVarTypeFN = std::function; +using InferVarTypeFN = + std::function; using InferShapeFN = std::function; diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 0babec29f6..bd8973eeb3 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -18,30 +18,32 @@ limitations under the License. */ namespace paddle { namespace framework { -VarDesc::VarType VarDescBind::GetType() const { return desc_.type(); } +proto::VarDesc::VarType VarDesc::GetType() const { return desc_.type(); } -void VarDescBind::SetType(VarDesc::VarType type) { desc_.set_type(type); } +void VarDesc::SetType(proto::VarDesc::VarType type) { desc_.set_type(type); } -void VarDescBind::SetShape(const std::vector &dims) { +void VarDesc::SetShape(const std::vector &dims) { VectorToRepeated(dims, mutable_tensor_desc()->mutable_dims()); } -void VarDescBind::SetDataType(DataType data_type) { +void VarDesc::SetDataType(proto::DataType data_type) { mutable_tensor_desc()->set_data_type(data_type); } -std::vector VarDescBind::Shape() const { +std::vector VarDesc::Shape() const { return RepeatedToVector(tensor_desc().dims()); } -DataType VarDescBind::GetDataType() const { return tensor_desc().data_type(); } +proto::DataType VarDesc::GetDataType() const { + return tensor_desc().data_type(); +} -void VarDescBind::SetLoDLevel(int32_t lod_level) { +void VarDesc::SetLoDLevel(int32_t lod_level) { switch (desc_.type()) { - case VarDesc::LOD_TENSOR: + case proto::VarDesc::LOD_TENSOR: desc_.mutable_lod_tensor()->set_lod_level(lod_level); break; - case VarDesc::LOD_TENSOR_ARRAY: + case proto::VarDesc::LOD_TENSOR_ARRAY: desc_.mutable_tensor_array()->set_lod_level(lod_level); break; default: @@ -50,11 +52,11 @@ void VarDescBind::SetLoDLevel(int32_t lod_level) { } } -int32_t VarDescBind::GetLodLevel() const { +int32_t VarDesc::GetLodLevel() const { switch (desc_.type()) { - case VarDesc::LOD_TENSOR: + case proto::VarDesc::LOD_TENSOR: return desc_.lod_tensor().lod_level(); - case VarDesc::LOD_TENSOR_ARRAY: + case proto::VarDesc::LOD_TENSOR_ARRAY: return desc_.tensor_array().lod_level(); default: PADDLE_THROW("Tensor type=%d does not support LoDLevel", @@ -62,29 +64,29 @@ int32_t VarDescBind::GetLodLevel() const { } } -const TensorDesc &VarDescBind::tensor_desc() const { +const proto::TensorDesc &VarDesc::tensor_desc() const { PADDLE_ENFORCE(desc_.has_type(), "invoke TensorDesc must after set type"); switch (desc_.type()) { - case VarDesc::SELECTED_ROWS: + case proto::VarDesc::SELECTED_ROWS: return desc_.selected_rows(); - case VarDesc::LOD_TENSOR: + case proto::VarDesc::LOD_TENSOR: return desc_.lod_tensor().tensor(); - case VarDesc::LOD_TENSOR_ARRAY: + case proto::VarDesc::LOD_TENSOR_ARRAY: return desc_.tensor_array().tensor(); default: PADDLE_THROW("Unexpected branch."); } } -TensorDesc *VarDescBind::mutable_tensor_desc() { +proto::TensorDesc *VarDesc::mutable_tensor_desc() { PADDLE_ENFORCE(desc_.has_type(), "invoke MutableTensorDesc must after set type"); switch (desc_.type()) { - case VarDesc::SELECTED_ROWS: + case proto::VarDesc::SELECTED_ROWS: return desc_.mutable_selected_rows(); - case VarDesc::LOD_TENSOR: + case proto::VarDesc::LOD_TENSOR: return desc_.mutable_lod_tensor()->mutable_tensor(); - case VarDesc::LOD_TENSOR_ARRAY: + case proto::VarDesc::LOD_TENSOR_ARRAY: return desc_.mutable_tensor_array()->mutable_tensor(); default: PADDLE_THROW("Unexpected branch."); diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h index 5cf4608944..4fd2abe7fb 100644 --- a/paddle/framework/var_desc.h +++ b/paddle/framework/var_desc.h @@ -53,44 +53,44 @@ inline void VectorToRepeated(const std::vector &vec, } } -class VarDescBind { +class VarDesc { public: - explicit VarDescBind(const std::string &name) { + explicit VarDesc(const std::string &name) { desc_.set_name(name); - desc_.set_type(VarDesc::LOD_TENSOR); + desc_.set_type(proto::VarDesc::LOD_TENSOR); } - explicit VarDescBind(const VarDesc &desc) : desc_(desc) {} + explicit VarDesc(const proto::VarDesc &desc) : desc_(desc) {} - VarDesc *Proto() { return &desc_; } + proto::VarDesc *Proto() { return &desc_; } std::string Name() const { return desc_.name(); } void SetShape(const std::vector &dims); - void SetDataType(DataType data_type); + void SetDataType(proto::DataType data_type); std::vector Shape() const; - DataType GetDataType() const; + proto::DataType GetDataType() const; void SetLoDLevel(int32_t lod_level); int32_t GetLodLevel() const; - VarDesc::VarType GetType() const; + proto::VarDesc::VarType GetType() const; - void SetType(VarDesc::VarType type); + void SetType(proto::VarDesc::VarType type); bool Persistable() const { return desc_.persistable(); } void SetPersistable(bool persistable) { desc_.set_persistable(persistable); } private: - const TensorDesc &tensor_desc() const; - TensorDesc *mutable_tensor_desc(); + const proto::TensorDesc &tensor_desc() const; + proto::TensorDesc *mutable_tensor_desc(); - VarDesc desc_; + proto::VarDesc desc_; }; } // namespace framework } // namespace paddle diff --git a/paddle/framework/var_type.h b/paddle/framework/var_type.h index 0f19870bec..0e6ea8dc69 100644 --- a/paddle/framework/var_type.h +++ b/paddle/framework/var_type.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/framework.pb.h" @@ -20,15 +20,15 @@ namespace paddle { namespace framework { -inline VarDesc::VarType ToVarType(std::type_index type) { +inline proto::VarDesc::VarType ToVarType(std::type_index type) { if (type.hash_code() == typeid(LoDTensor).hash_code()) { - return VarDesc_VarType_LOD_TENSOR; + return proto::VarDesc_VarType_LOD_TENSOR; } else if (type.hash_code() == typeid(LoDRankTable).hash_code()) { - return VarDesc_VarType_LOD_RANK_TABLE; + return proto::VarDesc_VarType_LOD_RANK_TABLE; } else if (type.hash_code() == typeid(LoDTensorArray).hash_code()) { - return VarDesc_VarType_LOD_TENSOR_ARRAY; + return proto::VarDesc_VarType_LOD_TENSOR_ARRAY; } else if (type.hash_code() == typeid(SelectedRows).hash_code()) { - return VarDesc_VarType_SELECTED_ROWS; + return proto::VarDesc_VarType_SELECTED_ROWS; } else { PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); } @@ -37,16 +37,16 @@ inline VarDesc::VarType ToVarType(std::type_index type) { template inline void VisitVarType(const Variable& var, Visitor visitor) { switch (ToVarType(var.Type())) { - case VarDesc_VarType_LOD_TENSOR: + case proto::VarDesc_VarType_LOD_TENSOR: visitor(var.Get()); return; - case VarDesc_VarType_LOD_RANK_TABLE: + case proto::VarDesc_VarType_LOD_RANK_TABLE: visitor(var.Get()); return; - case VarDesc_VarType_LOD_TENSOR_ARRAY: + case proto::VarDesc_VarType_LOD_TENSOR_ARRAY: visitor(var.Get()); return; - case VarDesc_VarType_SELECTED_ROWS: + case proto::VarDesc_VarType_SELECTED_ROWS: visitor(var.Get()); return; default: diff --git a/paddle/framework/var_type_inference.h b/paddle/framework/var_type_inference.h index 32abbeb334..6c11f2fee7 100644 --- a/paddle/framework/var_type_inference.h +++ b/paddle/framework/var_type_inference.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/type_defs.h" @@ -21,8 +21,7 @@ namespace framework { class VarTypeInference { public: virtual ~VarTypeInference() {} - virtual void operator()(const OpDescBind& op_desc, - BlockDescBind* block) const = 0; + virtual void operator()(const OpDesc& op_desc, BlockDesc* block) const = 0; }; } // namespace framework diff --git a/paddle/framework/var_type_inference_test.cc b/paddle/framework/var_type_inference_test.cc index 9035e63fa4..fa6018b1c5 100644 --- a/paddle/framework/var_type_inference_test.cc +++ b/paddle/framework/var_type_inference_test.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/var_type_inference.h" #include "gtest/gtest.h" @@ -33,17 +33,16 @@ class SumOpMaker : public OpProtoAndCheckerMaker { class SumOpVarTypeInference : public VarTypeInference { public: - void operator()(const OpDescBind &op_desc, - BlockDescBind *block) const override { + void operator()(const OpDesc &op_desc, BlockDesc *block) const override { auto &inputs = op_desc.Input("X"); - auto default_var_type = VarDesc::SELECTED_ROWS; + auto default_var_type = proto::VarDesc::SELECTED_ROWS; bool any_input_is_lod_tensor = std::any_of( inputs.begin(), inputs.end(), [block](const std::string &name) { - return block->Var(name)->GetType() == VarDesc::LOD_TENSOR; + return block->Var(name)->GetType() == proto::VarDesc::LOD_TENSOR; }); if (any_input_is_lod_tensor) { - default_var_type = VarDesc::LOD_TENSOR; + default_var_type = proto::VarDesc::LOD_TENSOR; } auto out_var_name = op_desc.Output("Out").front(); @@ -62,43 +61,43 @@ namespace paddle { namespace framework { TEST(InferVarType, sum_op) { - ProgramDescBind prog; + ProgramDesc prog; auto *op = prog.MutableBlock(0)->AppendOp(); op->SetType("sum"); op->SetInput("X", {"test_a", "test_b", "test_c"}); op->SetOutput("Out", {"test_out"}); - prog.MutableBlock(0)->Var("test_a")->SetType(VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test_b")->SetType(VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test_c")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_a")->SetType(proto::VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_c")->SetType(proto::VarDesc::SELECTED_ROWS); prog.MutableBlock(0)->Var("test_out"); op->InferVarType(prog.MutableBlock(0)); - ASSERT_EQ(VarDesc::SELECTED_ROWS, + ASSERT_EQ(proto::VarDesc::SELECTED_ROWS, prog.MutableBlock(0)->Var("test_out")->GetType()); - prog.MutableBlock(0)->Var("test_b")->SetType(VarDesc::LOD_TENSOR); + prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarDesc::LOD_TENSOR); op->InferVarType(prog.MutableBlock(0)); - ASSERT_EQ(VarDesc::LOD_TENSOR, + ASSERT_EQ(proto::VarDesc::LOD_TENSOR, prog.MutableBlock(0)->Var("test_out")->GetType()); } TEST(InferVarType, sum_op_without_infer_var_type) { - ProgramDescBind prog; + ProgramDesc prog; auto *op = prog.MutableBlock(0)->AppendOp(); op->SetType("sum_without_infer_var_type"); op->SetInput("X", {"test2_a", "test2_b", "test2_c"}); op->SetOutput("Out", {"test2_out"}); - prog.MutableBlock(0)->Var("test2_a")->SetType(VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test2_b")->SetType(VarDesc::SELECTED_ROWS); - prog.MutableBlock(0)->Var("test2_c")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarDesc::SELECTED_ROWS); prog.MutableBlock(0)->Var("test2_out"); op->InferVarType(prog.MutableBlock(0)); - ASSERT_EQ(VarDesc_VarType_LOD_TENSOR, + ASSERT_EQ(proto::VarDesc_VarType_LOD_TENSOR, prog.MutableBlock(0)->Var("test2_out")->GetType()); } diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp index 8d34eee886..de7b70e271 100644 --- a/paddle/function/GemmConvOp.cpp +++ b/paddle/function/GemmConvOp.cpp @@ -126,6 +126,11 @@ public: inputData += inputChannels * inputHeight * inputWidth; outputData += outputChannels * outputHeight * outputWidth; } +#ifdef PADDLE_MOBILE_INFERENCE + if (Device == DEVICE_TYPE_CPU) { + memory_.reset(); + } +#endif } }; diff --git a/paddle/gserver/layers/ROIPoolLayer.cpp b/paddle/gserver/layers/ROIPoolLayer.cpp index 2c8256b91c..7d7c30b4d8 100644 --- a/paddle/gserver/layers/ROIPoolLayer.cpp +++ b/paddle/gserver/layers/ROIPoolLayer.cpp @@ -84,12 +84,15 @@ void ROIPoolLayer::forward(PassType passType) { size_t poolChannelOffset = pooledHeight_ * pooledWidth_; real* outputData = outputValue->getData(); - Matrix::resizeOrCreate(maxIdxs_, - numROIs, - channels_ * pooledHeight_ * pooledWidth_, - false, - false); - real* argmaxData = maxIdxs_->getData(); + real* argmaxData = nullptr; + if (passType != PASS_TEST) { + Matrix::resizeOrCreate(maxIdxs_, + numROIs, + channels_ * pooledHeight_ * pooledWidth_, + false, + false); + argmaxData = maxIdxs_->getData(); + } for (size_t n = 0; n < numROIs; ++n) { // the first five elememts of each RoI should be: @@ -128,14 +131,18 @@ void ROIPoolLayer::forward(PassType passType) { bool isEmpty = (hend <= hstart) || (wend <= wstart); size_t poolIndex = ph * pooledWidth_ + pw; outputData[poolIndex] = isEmpty ? 0 : -FLT_MAX; - argmaxData[poolIndex] = -1; + if (argmaxData) { + argmaxData[poolIndex] = -1; + } for (size_t h = hstart; h < hend; ++h) { for (size_t w = wstart; w < wend; ++w) { size_t index = h * width_ + w; if (batchData[index] > outputData[poolIndex]) { outputData[poolIndex] = batchData[index]; - argmaxData[poolIndex] = index; + if (argmaxData) { + argmaxData[poolIndex] = index; + } } } } @@ -143,7 +150,9 @@ void ROIPoolLayer::forward(PassType passType) { } batchData += channelOffset; outputData += poolChannelOffset; - argmaxData += poolChannelOffset; + if (argmaxData) { + argmaxData += poolChannelOffset; + } } bottomROIs += roiOffset; } diff --git a/paddle/gserver/layers/SequenceToBatch.cpp b/paddle/gserver/layers/SequenceToBatch.cpp index 5fa7b6f488..6b769378d2 100644 --- a/paddle/gserver/layers/SequenceToBatch.cpp +++ b/paddle/gserver/layers/SequenceToBatch.cpp @@ -171,12 +171,31 @@ void SequenceToBatch::sequence2BatchCopy(Matrix &batch, hl_sequence2batch_copy( batchData, seqData, idxData, seqWidth, batchCount, seq2batch); } else { - for (int i = 0; i < batchCount; ++i) { - if (seq2batch) { + if (seq2batch) { +#ifdef PADDLE_USE_MKLML + const int blockMemSize = 8 * 1024; + const int blockSize = blockMemSize / sizeof(real); +#pragma omp parallel for collapse(2) + for (int i = 0; i < batchCount; ++i) { + for (int j = 0; j < seqWidth; j += blockSize) { + memcpy(batch.rowBuf(i) + j, + sequence.rowBuf(idxData[i]) + j, + (j + blockSize > seqWidth) ? (seqWidth - j) * sizeof(real) + : blockMemSize); + } + } +#else + for (int i = 0; i < batchCount; ++i) { memcpy(batch.rowBuf(i), sequence.rowBuf(idxData[i]), seqWidth * sizeof(real)); - } else { + } +#endif + } else { +#ifdef PADDLE_USE_MKLML +#pragma omp parallel for +#endif + for (int i = 0; i < batchCount; ++i) { memcpy(sequence.rowBuf(idxData[i]), batch.rowBuf(i), seqWidth * sizeof(real)); diff --git a/paddle/math/float16.h b/paddle/math/float16.h index 76ad3a0123..efebbce504 100644 --- a/paddle/math/float16.h +++ b/paddle/math/float16.h @@ -79,7 +79,7 @@ public: #ifdef PADDLE_CUDA_FP16 HOSTDEVICE inline explicit float16(const half& h) { #if CUDA_VERSION >= 9000 - x = reinterpret_cast<__half_raw*>(&h)->x; + x = reinterpret_cast<__half_raw*>(const_cast(&h))->x; #else x = h.x; #endif // CUDA_VERSION >= 9000 @@ -145,7 +145,7 @@ public: #ifdef PADDLE_CUDA_FP16 HOSTDEVICE inline float16& operator=(const half& rhs) { #if CUDA_VERSION >= 9000 - x = reinterpret_cast<__half_raw*>(&rhs)->x; + x = reinterpret_cast<__half_raw*>(const_cast(&rhs))->x; #else x = rhs.x; #endif diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 7e5a1db44a..afb8d9d599 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -244,7 +244,7 @@ TEST(Matrix, unary) { LOG(WARNING) << "This version of PaddlePaddle was not built with LAPACK" << "support so we cannot test matrix inverse. To test " << "matrix inverse, please install LAPACKE " - << "and MKL/Openblas/ATLAS, and re-build PaddlePaddle."; + << "and MKL/Openblas, and re-build PaddlePaddle."; #endif } } diff --git a/paddle/memory/README.md b/paddle/memory/README.md index 6cb003c50b..7cf61d089b 100644 --- a/paddle/memory/README.md +++ b/paddle/memory/README.md @@ -12,13 +12,13 @@ p = memory::Alloc(platform::CPUPlace(), 4*1024); To allocate 4KB memory on the 3rd GPU: ```cpp -p = memory::Alloc(platform::GPUPlace(2), 4*1024); +p = memory::Alloc(platform::CUDAPlace(2), 4*1024); ``` To free memory and check the so-far used amount of memory on a place: ```cpp -auto pl = platform::GPUPlace(0); +auto pl = platform::CUDAPlace(0); p = memory::Alloc(pl, 4*1024); cout << memory::Used(pl); memory::Free(pl, p); @@ -36,7 +36,7 @@ template size_t Used(Place); } // namespace memory ``` -These function templates have specializations on either `platform::CPUPlace` or `platform::GPUPlace`: +These function templates have specializations on either `platform::CPUPlace` or `platform::CUDAPlace`: ```cpp template<> @@ -49,7 +49,7 @@ and ```cpp template<> -void Alloc(GPUPlace p, size_t size) { +void Alloc(CUDAPlace p, size_t size) { return GetGPUBuddyAllocator(p.id)->Alloc(size); } ``` @@ -122,7 +122,7 @@ There are two implementations of `Context`: 1. [`CPUContext`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context.h#L105), whose [`New` method](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context.h#L131) calls [`g_cpu_allocator.get()->New(size_t)`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context.cc#L15) to allocate the memory. -1. [`CUDAContext`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.h#L99), which has a data member [`int gpu_id_`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.h#L202). This looks very similar to class `majel::GPUPlace`, who also has an `int id_` data member. `CUDAContext::New(size_t)` calls [`g_cub_allocator->DeviceAllocate(&ptr, nbytes)`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.cu#L355) to allocate the memory. +1. [`CUDAContext`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.h#L99), which has a data member [`int gpu_id_`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.h#L202). This looks very similar to class `majel::CUDAPlace`, who also has an `int id_` data member. `CUDAContext::New(size_t)` calls [`g_cub_allocator->DeviceAllocate(&ptr, nbytes)`](https://github.com/caffe2/caffe2/blob/v0.7.0/caffe2/core/context_gpu.cu#L355) to allocate the memory. ### Majel diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc index 64ee538038..2bc2c06a15 100644 --- a/paddle/memory/detail/buddy_allocator.cc +++ b/paddle/memory/detail/buddy_allocator.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/memory/detail/buddy_allocator.h" #include "glog/logging.h" diff --git a/paddle/memory/detail/buddy_allocator.h b/paddle/memory/detail/buddy_allocator.h index 9c41378483..4e0135dd65 100644 --- a/paddle/memory/detail/buddy_allocator.h +++ b/paddle/memory/detail/buddy_allocator.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/memory/detail/memory_block.cc b/paddle/memory/detail/memory_block.cc index fc40993208..f50eceba09 100644 --- a/paddle/memory/detail/memory_block.cc +++ b/paddle/memory/detail/memory_block.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/memory/detail/memory_block.h" #include "paddle/memory/detail/meta_cache.h" diff --git a/paddle/memory/detail/memory_block.h b/paddle/memory/detail/memory_block.h index a5168b519f..a4ca51b31b 100644 --- a/paddle/memory/detail/memory_block.h +++ b/paddle/memory/detail/memory_block.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/memory/detail/meta_cache.cc b/paddle/memory/detail/meta_cache.cc index 7e2f92b00c..2bacca7510 100644 --- a/paddle/memory/detail/meta_cache.cc +++ b/paddle/memory/detail/meta_cache.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/memory/detail/meta_cache.h" #include "glog/logging.h" diff --git a/paddle/memory/detail/meta_cache.h b/paddle/memory/detail/meta_cache.h index cf58156442..db8ffd49ae 100644 --- a/paddle/memory/detail/meta_cache.h +++ b/paddle/memory/detail/meta_cache.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/memory/detail/meta_data.cc b/paddle/memory/detail/meta_data.cc index 70c5c1f439..dc57d4d237 100644 --- a/paddle/memory/detail/meta_data.cc +++ b/paddle/memory/detail/meta_data.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/memory/detail/meta_data.h" diff --git a/paddle/memory/detail/meta_data.h b/paddle/memory/detail/meta_data.h index 628cf1f2e3..6b83c42eb8 100644 --- a/paddle/memory/detail/meta_data.h +++ b/paddle/memory/detail/meta_data.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index 6a815a1b57..509250debc 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -19,6 +19,7 @@ limitations under the License. */ #include // for malloc and free #include // for mlock and munlock +#include // for std::max #include "gflags/gflags.h" @@ -28,7 +29,7 @@ limitations under the License. */ // of memory available to the system for paging. So, by default, we // should set false to use_pinned_memory. DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory."); - +DECLARE_double(fraction_of_gpu_memory_to_use); namespace paddle { namespace memory { namespace detail { @@ -77,45 +78,20 @@ void* GPUAllocator::Alloc(size_t& index, size_t size) { // CUDA documentation doesn't explain if cudaMalloc returns nullptr // if size is 0. We just make sure it does. if (size <= 0) return nullptr; - - size_t available = 0; - size_t capacity = 0; - paddle::platform::GpuMemoryUsage(available, capacity); - - // Reserve memory for page tables, etc. - size_t reserving = 0.05 * capacity + paddle::platform::GpuMinChunkSize(); - size_t usable = available > reserving ? available - reserving : 0; - - // If remaining size no less than expected size, using general - // cudaMalloc to allocate GPU memory. - void* p = 0; - if (size <= usable) { - cudaError_t result = cudaMalloc(&p, size); - if (result == cudaSuccess) { - index = 0; - gpu_alloc_size_ += size; - return p; - } - } - - // If remaining size less than expected size or cudaMalloc failed, - // cudaMallocHost will be considered as a fallback allocator. - // - // NOTE: here, we use GpuMaxAllocSize() as the maximum memory size - // of host fallback allocation. Allocates too much would reduce - // the amount of memory available to the underlying system for paging. - usable = paddle::platform::GpuMaxAllocSize() - fallback_alloc_size_; - - if (size > usable) return nullptr; - - cudaError_t result = cudaMallocHost(&p, size); + void* p; + cudaError_t result = cudaMalloc(&p, size); if (result == cudaSuccess) { - index = 1; - fallback_alloc_size_ += size; + index = 0; + gpu_alloc_size_ += size; return p; + } else { + LOG(WARNING) + << "Cannot malloc " << size / 1024.0 / 1024.0 + << " MB GPU memory. Please shrink FLAGS_fraction_of_gpu_memory_to_use " + "environment variable to a lower value. Current value is " + << FLAGS_fraction_of_gpu_memory_to_use; + return nullptr; } - - return nullptr; } void GPUAllocator::Free(void* p, size_t size, size_t index) { diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc index 1df88a6da9..b46141aafd 100644 --- a/paddle/memory/memcpy.cc +++ b/paddle/memory/memcpy.cc @@ -28,31 +28,25 @@ void Copy(platform::CPUPlace, void* dst, #ifdef PADDLE_WITH_CUDA template <> -void Copy(platform::CPUPlace dst_place, - void* dst, - platform::GPUPlace src_place, - const void* src, size_t num, - cudaStream_t stream) { +void Copy( + platform::CPUPlace dst_place, void* dst, platform::CUDAPlace src_place, + const void* src, size_t num, cudaStream_t stream) { platform::SetDeviceId(src_place.device); platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyDeviceToHost, stream); } template <> -void Copy(platform::GPUPlace dst_place, - void* dst, - platform::CPUPlace src_place, - const void* src, size_t num, - cudaStream_t stream) { +void Copy( + platform::CUDAPlace dst_place, void* dst, platform::CPUPlace src_place, + const void* src, size_t num, cudaStream_t stream) { platform::SetDeviceId(dst_place.device); platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyHostToDevice, stream); } template <> -void Copy(platform::GPUPlace dst_place, - void* dst, - platform::GPUPlace src_place, - const void* src, size_t num, - cudaStream_t stream) { +void Copy( + platform::CUDAPlace dst_place, void* dst, platform::CUDAPlace src_place, + const void* src, size_t num, cudaStream_t stream) { if (dst_place == src_place) { platform::SetDeviceId(src_place.device); platform::GpuMemcpyAsync(dst, src, num, cudaMemcpyDeviceToDevice, stream); @@ -62,33 +56,6 @@ void Copy(platform::GPUPlace dst_place, } } -template <> -void Copy(platform::CPUPlace dst_place, - void* dst, - platform::GPUPlace src_place, - const void* src, size_t num) { - platform::SetDeviceId(src_place.device); - platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToHost); -} - -template <> -void Copy(platform::GPUPlace dst_place, - void* dst, - platform::CPUPlace src_place, - const void* src, size_t num) { - platform::SetDeviceId(dst_place.device); - platform::GpuMemcpySync(dst, src, num, cudaMemcpyHostToDevice); -} - -template <> -void Copy(platform::GPUPlace dst_place, - void* dst, - platform::GPUPlace src_place, - const void* src, size_t num) { - platform::SetDeviceId(dst_place.device); - platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToDevice); -} - #endif } // namespace memory diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 9cafdfda75..c4bb6baee7 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -83,12 +83,12 @@ BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { } template <> -size_t Used(platform::GPUPlace place) { +size_t Used(platform::CUDAPlace place) { return GetGPUBuddyAllocator(place.device)->Used(); } template <> -void* Alloc(platform::GPUPlace place, size_t size) { +void* Alloc(platform::CUDAPlace place, size_t size) { auto* buddy_allocator = GetGPUBuddyAllocator(place.device); auto* ptr = buddy_allocator->Alloc(size); if (ptr == nullptr) { @@ -101,14 +101,14 @@ void* Alloc(platform::GPUPlace place, size_t size) { LOG(WARNING) << "total " << total; LOG(WARNING) << "GpuMinChunkSize " << platform::GpuMinChunkSize(); LOG(WARNING) << "GpuMaxChunkSize " << platform::GpuMaxChunkSize(); - LOG(WARNING) << "GPU memory used: " << Used(place); + LOG(WARNING) << "GPU memory used: " << Used(place); platform::SetDeviceId(cur_dev); } return ptr; } template <> -void Free(platform::GPUPlace place, void* p) { +void Free(platform::CUDAPlace place, void* p) { GetGPUBuddyAllocator(place.device)->Free(p); } diff --git a/paddle/memory/memory_test.cc b/paddle/memory/memory_test.cc index 2444931e26..f476bf7126 100644 --- a/paddle/memory/memory_test.cc +++ b/paddle/memory/memory_test.cc @@ -82,7 +82,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { #ifdef PADDLE_WITH_CUDA -size_t align(size_t size, paddle::platform::GPUPlace place) { +size_t align(size_t size, paddle::platform::CUDAPlace place) { size += sizeof(paddle::memory::detail::Metadata); size_t alignment = paddle::platform::GpuMinChunkSize(); size_t remaining = size % alignment; @@ -94,7 +94,7 @@ TEST(BuddyAllocator, GPUAllocation) { EXPECT_EQ(p, nullptr); - paddle::platform::GPUPlace gpu(0); + paddle::platform::CUDAPlace gpu(0); p = paddle::memory::Alloc(gpu, 4096); EXPECT_NE(p, nullptr); @@ -103,7 +103,7 @@ TEST(BuddyAllocator, GPUAllocation) { } TEST(BuddyAllocator, GPUMultAlloc) { - paddle::platform::GPUPlace gpu; + paddle::platform::CUDAPlace gpu; std::unordered_map ps; diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 76da21c472..d7baa6e905 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -53,7 +53,7 @@ class AccuracyOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Out")->type()), @@ -63,8 +63,7 @@ class AccuracyOp : public framework::OperatorWithKernel { class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { public: - AccuracyOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + AccuracyOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { // TODO(typhoonzero): support both inference value and indices. AddInput("Out", "The network output of topk (inferences)"); diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu index 539a935302..0aadd5af41 100644 --- a/paddle/operators/accuracy_op.cu +++ b/paddle/operators/accuracy_op.cu @@ -26,7 +26,7 @@ template __global__ void AccuracyCudaKernel(const int N, const int D, const int64_t* Xdata, const int64_t* labeldata, int* correct_data, - float* accuracy) { + float* accuracy, int* total_data) { int count = 0; __shared__ int total[BlockSize]; @@ -47,6 +47,7 @@ __global__ void AccuracyCudaKernel(const int N, const int D, if (threadIdx.x == 0) { *correct_data = result; *accuracy = static_cast(result) / static_cast(N); + *total_data = N; } } @@ -55,7 +56,7 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto* inference = ctx.Input("Out"); auto* indices = ctx.Input("Indices"); auto* label = ctx.Input("Label"); @@ -80,22 +81,11 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { if (num_samples == 0) { return; } - platform::GpuMemcpyAsync(total_data, &num_samples, sizeof(int), - cudaMemcpyHostToDevice, stream); AccuracyCudaKernel< PADDLE_CUDA_NUM_THREADS><<<1, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( num_samples, infer_width, indices_data, label_data, correct_data, - accuracy_data); - - int d_num_samples, d_num_correct; - float d_accuracy; - platform::GpuMemcpyAsync(&d_num_correct, correct_data, sizeof(int), - cudaMemcpyDeviceToHost, stream); - platform::GpuMemcpyAsync(&d_num_samples, total_data, sizeof(int), - cudaMemcpyDeviceToHost, stream); - platform::GpuMemcpyAsync(&d_accuracy, accuracy_data, sizeof(float), - cudaMemcpyDeviceToHost, stream); + accuracy_data, total_data); } }; diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 63490f0ec9..4188858a90 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/activation_op.h" @@ -22,8 +22,8 @@ class ActivationOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - ctx->SetOutputDim("Y", ctx->GetInputDim("X")); - ctx->ShareLoD("X", /*->*/ "Y"); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); } }; @@ -32,21 +32,20 @@ class ActivationOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Y")); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out")); } }; class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: - SigmoidOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Sigmoid operator"); - AddOutput("Y", "Output of Sigmoid operator"); + AddOutput("Out", "Output of Sigmoid operator"); AddComment(R"DOC( Sigmoid Activation Operator -$$y = \frac{1}{1 + e^{-x}}$$ +$$out = \frac{1}{1 + e^{-x}}$$ )DOC"); } @@ -54,15 +53,14 @@ $$y = \frac{1}{1 + e^{-x}}$$ class LogSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: - LogSigmoidOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + LogSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of LogSigmoid operator"); - AddOutput("Y", "Output of LogSigmoid operator"); + AddOutput("Out", "Output of LogSigmoid operator"); AddComment(R"DOC( Logsigmoid Activation Operator -$$y = \log \frac{1}{1 + e^{-x}}$$ +$$out = \log \frac{1}{1 + e^{-x}}$$ )DOC"); } @@ -70,14 +68,14 @@ $$y = \log \frac{1}{1 + e^{-x}}$$ class ExpOpMaker : public framework::OpProtoAndCheckerMaker { public: - ExpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + ExpOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Exp operator"); - AddOutput("Y", "Output of Exp operator"); + AddOutput("Out", "Output of Exp operator"); AddComment(R"DOC( Exp Activation Operator. -$y = e^x$ +$out = e^x$ )DOC"); } @@ -85,14 +83,14 @@ $y = e^x$ class ReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + ReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Relu operator"); - AddOutput("Y", "Output of Relu operator"); + AddOutput("Out", "Output of Relu operator"); AddComment(R"DOC( Relu Activation Operator. -$y = \max(x, 0)$ +$out = \max(x, 0)$ )DOC"); } @@ -100,16 +98,15 @@ $y = \max(x, 0)$ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - LeakyReluOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + LeakyReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of LeakyRelu operator"); - AddOutput("Y", "Output of LeakyRelu operator"); + AddOutput("Out", "Output of LeakyRelu operator"); AddAttr("alpha", "The small negative slope").SetDefault(0.02f); AddComment(R"DOC( LeakyRelu Activation Operator. -$y = \max(x, \alpha * x)$ +$out = \max(x, \alpha * x)$ )DOC"); } @@ -117,17 +114,16 @@ $y = \max(x, \alpha * x)$ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftShrinkOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SoftShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Softshrink operator"); - AddOutput("Y", "Output of Softshrink operator"); + AddOutput("Out", "Output of Softshrink operator"); AddAttr("lambda", "non-negative offset").SetDefault(0.5f); AddComment(R"DOC( Softshrink Activation Operator. $$ -y = \begin{cases} +out = \begin{cases} x - \lambda, \text{if } x > \lambda \\ x + \lambda, \text{if } x < -\lambda \\ 0, \text{otherwise} @@ -140,14 +136,14 @@ $$ class TanhOpMaker : public framework::OpProtoAndCheckerMaker { public: - TanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + TanhOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Tanh operator"); - AddOutput("Y", "Output of Tanh operator"); + AddOutput("Out", "Output of Tanh operator"); AddComment(R"DOC( Tanh Activation Operator. -$$y = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ +$$out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ )DOC"); } @@ -155,15 +151,14 @@ $$y = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker { public: - TanhShrinkOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + TanhShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of TanhShrink operator"); - AddOutput("Y", "Output of TanhShrink operator"); + AddOutput("Out", "Output of TanhShrink operator"); AddComment(R"DOC( TanhShrink Activation Operator. -$$y = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ +$$out = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ )DOC"); } @@ -171,18 +166,17 @@ $$y = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker { public: - HardShrinkOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + HardShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of HardShrink operator"); - AddOutput("Y", "Output of HardShrink operator"); + AddOutput("Out", "Output of HardShrink operator"); AddAttr("threshold", "The value of threshold for HardShrink") .SetDefault(0.5f); AddComment(R"DOC( HardShrink Activation Operator. $$ -y = \begin{cases} +out = \begin{cases} x, \text{if } x > \lambda \\ x, \text{if } x < -\lambda \\ 0, \text{otherwise} @@ -195,14 +189,14 @@ $$ class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { public: - SqrtOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SqrtOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Sqrt operator"); - AddOutput("Y", "Output of Sqrt operator"); + AddOutput("Out", "Output of Sqrt operator"); AddComment(R"DOC( Sqrt Activation Operator. -$y = \sqrt{x}$ +$out = \sqrt{x}$ )DOC"); } @@ -210,14 +204,14 @@ $y = \sqrt{x}$ class AbsOpMaker : public framework::OpProtoAndCheckerMaker { public: - AbsOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + AbsOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Abs operator"); - AddOutput("Y", "Output of Abs operator"); + AddOutput("Out", "Output of Abs operator"); AddComment(R"DOC( Abs Activation Operator. -$y = |x|$ +$out = |x|$ )DOC"); } @@ -225,14 +219,14 @@ $y = |x|$ class CeilOpMaker : public framework::OpProtoAndCheckerMaker { public: - CeilOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + CeilOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Ceil operator"); - AddOutput("Y", "Output of Ceil operator"); + AddOutput("Out", "Output of Ceil operator"); AddComment(R"DOC( Ceil Activation Operator. -$y = ceil(x)$ +$out = ceil(x)$ )DOC"); } @@ -240,14 +234,14 @@ $y = ceil(x)$ class FloorOpMaker : public framework::OpProtoAndCheckerMaker { public: - FloorOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + FloorOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Floor operator"); - AddOutput("Y", "Output of Floor operator"); + AddOutput("Out", "Output of Floor operator"); AddComment(R"DOC( Floor Activation Operator. -$y = floor(x)$ +$out = floor(x)$ )DOC"); } @@ -255,14 +249,14 @@ $y = floor(x)$ class RoundOpMaker : public framework::OpProtoAndCheckerMaker { public: - RoundOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + RoundOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Round operator"); - AddOutput("Y", "Output of Round operator"); + AddOutput("Out", "Output of Round operator"); AddComment(R"DOC( Round Activation Operator. -$y = [x]$ +$out = [x]$ )DOC"); } @@ -270,15 +264,14 @@ $y = [x]$ class ReciprocalOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReciprocalOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + ReciprocalOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Reciprocal operator"); - AddOutput("Y", "Output of Reciprocal operator"); + AddOutput("Out", "Output of Reciprocal operator"); AddComment(R"DOC( Reciprocal Activation Operator. -$$y = \frac{1}{x}$$ +$$out = \frac{1}{x}$$ )DOC"); } @@ -286,14 +279,14 @@ $$y = \frac{1}{x}$$ class LogOpMaker : public framework::OpProtoAndCheckerMaker { public: - LogOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + LogOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Log operator"); - AddOutput("Y", "Output of Log operator"); + AddOutput("Out", "Output of Log operator"); AddComment(R"DOC( Log Activation Operator. -$y = \ln(x)$ +$out = \ln(x)$ Natural logarithm of x. @@ -303,14 +296,14 @@ Natural logarithm of x. class SquareOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquareOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SquareOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Square operator"); - AddOutput("Y", "Output of Square operator"); + AddOutput("Out", "Output of Square operator"); AddComment(R"DOC( Square Activation Operator. -$y = x^2$ +$out = x^2$ )DOC"); } @@ -318,15 +311,14 @@ $y = x^2$ class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftplusOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SoftplusOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Softplus operator"); - AddOutput("Y", "Output of Softplus operator"); + AddOutput("Out", "Output of Softplus operator"); AddComment(R"DOC( Softplus Activation Operator. -$y = \ln(1 + e^{x})$ +$out = \ln(1 + e^{x})$ )DOC"); } @@ -334,15 +326,14 @@ $y = \ln(1 + e^{x})$ class SoftsignOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftsignOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SoftsignOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Softsign operator"); - AddOutput("Y", "Output of Softsign operator"); + AddOutput("Out", "Output of Softsign operator"); AddComment(R"DOC( Softsign Activation Operator. -$$y = \frac{x}{1 + |x|}$$ +$$out = \frac{x}{1 + |x|}$$ )DOC"); } @@ -350,10 +341,10 @@ $$y = \frac{x}{1 + |x|}$$ class BReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - BReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + BReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of BRelu operator"); - AddOutput("Y", "Output of BRelu operator"); + AddOutput("Out", "Output of BRelu operator"); AddAttr("t_min", "The min marginal value of BRelu") .SetDefault(static_cast(0)); AddAttr("t_max", "The max marginal value of BRelu") @@ -361,7 +352,7 @@ class BReluOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( BRelu Activation Operator. -$y = \max(\min(x, t_{min}), t_{max})$ +$out = \max(\min(x, t_{min}), t_{max})$ )DOC"); } @@ -369,17 +360,16 @@ $y = \max(\min(x, t_{min}), t_{max})$ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftReluOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SoftReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of SoftRelu operator"); - AddOutput("Y", "Output of SoftRelu operator"); + AddOutput("Out", "Output of SoftRelu operator"); AddAttr("threshold", "The threshold value of SoftRelu") .SetDefault(40.0f); AddComment(R"DOC( SoftRelu Activation Operator. -$y = \ln(1 + \exp(\max(\min(x, threshold), threshold))$ +$out = \ln(1 + \exp(\max(\min(x, threshold), threshold))$ )DOC"); } @@ -387,10 +377,10 @@ $y = \ln(1 + \exp(\max(\min(x, threshold), threshold))$ class ELUOpMaker : public framework::OpProtoAndCheckerMaker { public: - ELUOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + ELUOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of ELU operator"); - AddOutput("Y", "Output of ELU operator"); + AddOutput("Out", "Output of ELU operator"); AddAttr("alpha", "The alpha value of ELU").SetDefault(1.0f); AddComment(R"DOC( ELU Activation Operator. @@ -398,7 +388,7 @@ ELU Activation Operator. Applies the following element-wise computation on the input according to https://arxiv.org/abs/1511.07289. -$y = \max(0, x) + \min(0, \alpha * (e^x - 1))$ +$out = \max(0, x) + \min(0, \alpha * (e^x - 1))$ )DOC"); } @@ -406,16 +396,16 @@ $y = \max(0, x) + \min(0, \alpha * (e^x - 1))$ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { public: - Relu6OpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + Relu6OpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Relu6 operator"); - AddOutput("Y", "Output of Relu6 operator"); + AddOutput("Out", "Output of Relu6 operator"); AddAttr("threshold", "The threshold value of Relu6") .SetDefault(6.0f); AddComment(R"DOC( Relu6 Activation Operator. -$y = \min(\max(0, x), 6)$ +$out = \min(\max(0, x), 6)$ )DOC"); } @@ -423,15 +413,15 @@ $y = \min(\max(0, x), 6)$ class PowOpMaker : public framework::OpProtoAndCheckerMaker { public: - PowOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + PowOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Pow operator"); - AddOutput("Y", "Output of Pow operator"); + AddOutput("Out", "Output of Pow operator"); AddAttr("factor", "The exponential factor of Pow").SetDefault(1.0f); AddComment(R"DOC( Pow Activation Operator. -$y = x^{factor}$ +$out = x^{factor}$ )DOC"); } @@ -439,10 +429,10 @@ $y = x^{factor}$ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { public: - STanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + STanhOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of STanh operator"); - AddOutput("Y", "Output of STanh operator"); + AddOutput("Out", "Output of STanh operator"); AddAttr("scale_a", "The scale parameter of a for the input") .SetDefault(2.0f / 3.0f); AddAttr("scale_b", "The scale parameter of b for the input") @@ -450,7 +440,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( STanh Activation Operator. -$$y = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ +$$out = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ )DOC"); } @@ -458,18 +448,17 @@ $$y = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - ThresholdedReluOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + ThresholdedReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of ThresholdedRelu operator"); - AddOutput("Y", "Output of ThresholdedRelu operator"); + AddOutput("Out", "Output of ThresholdedRelu operator"); AddAttr("threshold", "The threshold location of activation") .SetDefault(1.0f); AddComment(R"DOC( ThresholdedRelu Activation Operator. $$ -y = \begin{cases} +out = \begin{cases} x, \text{if } x > threshold \\ 0, \text{otherwise} \end{cases} @@ -481,11 +470,10 @@ $$ class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: - HardSigmoidOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + HardSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of HardSigmoid operator"); - AddOutput("Y", "Output of HardSigmoid operator"); + AddOutput("Out", "Output of HardSigmoid operator"); AddAttr("slope", "Slope for linear approximation of sigmoid") .SetDefault(0.2f); AddAttr("offset", "Offset for linear approximation of sigmoid") @@ -496,7 +484,7 @@ HardSigmoid Activation Operator. Segment-wise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391), which is much faster than sigmoid. -$y = \max(0, \min(1, slope * x + shift))$ +$out = \max(0, \min(1, slope * x + shift))$ The slope should be positive. The offset can be either positive or negative. The default slope and shift are set according to the above reference. @@ -508,15 +496,15 @@ It is recommended to use the defaults for this activation. class SwishOpMaker : public framework::OpProtoAndCheckerMaker { public: - SwishOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + SwishOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input of Swish operator"); - AddOutput("Y", "Output of Swish operator"); + AddOutput("Out", "Output of Swish operator"); AddAttr("beta", "Constant beta of swish operator").SetDefault(1.0f); AddComment(R"DOC( Swish Activation Operator. -$$y = \frac{x}{1 + e^{- \beta x}}$$ +$$out = \frac{x}{1 + e^{- \beta x}}$$ )DOC"); } diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 856d3fc35d..b9ccdf639c 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/activation_op.h" diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 75eefca8b8..0885f7c570 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" @@ -27,11 +27,11 @@ class ActivationKernel void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); - auto* Y = context.Output("Y"); - Y->mutable_data(context.GetPlace()); + auto* Out = context.Output("Out"); + Out->mutable_data(context.GetPlace()); auto x = framework::EigenVector::Flatten(*X); - auto y = framework::EigenVector::Flatten(*Y); + auto out = framework::EigenVector::Flatten(*Out); auto* place = context.template device_context().eigen_device(); Functor functor; @@ -40,7 +40,7 @@ class ActivationKernel for (auto& attr : attrs) { *attr.second = context.Attr(attr.first); } - functor(*place, x, y); + functor(*place, x, out); } }; @@ -51,14 +51,15 @@ class ActivationGradKernel using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); - auto* Y = context.Input("Y"); - auto* dY = context.Input(framework::GradVarName("Y")); + auto* Out = context.Input("Out"); + auto* dOut = + context.Input(framework::GradVarName("Out")); auto* dX = context.Output(framework::GradVarName("X")); dX->mutable_data(context.GetPlace()); - auto dy = framework::EigenVector::Flatten(*dY); + auto dout = framework::EigenVector::Flatten(*dOut); auto x = framework::EigenVector::Flatten(*X); - auto y = framework::EigenVector::Flatten(*Y); + auto out = framework::EigenVector::Flatten(*Out); auto dx = framework::EigenVector::Flatten(*dX); auto* place = context.template device_context().eigen_device(); @@ -67,7 +68,7 @@ class ActivationGradKernel for (auto& attr : attrs) { *attr.second = context.Attr(attr.first); } - functor(*place, x, y, dy, dx); + functor(*place, x, out, dout, dx); } }; @@ -83,17 +84,18 @@ struct BaseActivationFunctor { // sigmoid(x) = 1 / (1 + exp(-x)) template struct SigmoidFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = static_cast(1) / (static_cast(1) + (-x).exp()); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = static_cast(1) / (static_cast(1) + (-x).exp()); } }; template struct SigmoidGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * y * (static_cast(1) - y); + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * out * (static_cast(1) - out); } }; @@ -101,7 +103,7 @@ struct SigmoidGradFunctor : public BaseActivationFunctor { // For numerical stability, we can use the log-sum-exp trick: // https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/ // We can rewrite the above equation as: -// y = -log( exp(0) + exp(-x)) [since exp(0) = 1] +// out = -log( exp(0) + exp(-x)) [since exp(0) = 1] // = -log( exp(max(-x, 0) - max(-x, 0)) + exp(-x + max(-x, 0) - max(-x, 0))) // = -log( exp(max(-x, 0)) * exp(-max(-x, 0)) - exp(max(-x, 0)) * exp(-x - // max(-x, 0))) @@ -112,10 +114,10 @@ struct SigmoidGradFunctor : public BaseActivationFunctor { // + exp(-x - max(-x, 0)))) template struct LogSigmoidFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { + template + void operator()(Device d, X x, Out out) const { auto temp = (-x).cwiseMax(static_cast(0)); // temp = max(-x, 0) - y.device(d) = -temp - (((-temp).exp() + (-x - temp).exp()).log()); + out.device(d) = -temp - (((-temp).exp() + (-x - temp).exp()).log()); } }; @@ -124,62 +126,66 @@ struct LogSigmoidFunctor : public BaseActivationFunctor { // exp(-x - max(-x, 0))) template struct LogSigmoidGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto temp = (-x).cwiseMax(static_cast(0)); // temp = max(-x, 0) dx.device(d) = - dy * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp())); + dout * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp())); } }; // exp(x) = e^x template struct ExpFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.exp(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.exp(); } }; template struct ExpGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * y; + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * out; } }; // relu(x) = max(x, 0) template struct ReluFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.cwiseMax(static_cast(0)); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.cwiseMax(static_cast(0)); } }; template struct ReluGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * (x > static_cast(0)).template cast(); + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * (x > static_cast(0)).template cast(); } }; // tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) template struct TanhFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.tanh(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.tanh(); } }; template struct TanhGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * (static_cast(1) - y * y); + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * (static_cast(1) - out * out); } }; @@ -187,17 +193,18 @@ struct TanhGradFunctor : public BaseActivationFunctor { // where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) template struct TanhShrinkFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x - x.tanh(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x - x.tanh(); } }; template struct TanhShrinkGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * (x.tanh() * x.tanh()); + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * (x.tanh() * x.tanh()); } }; @@ -210,11 +217,11 @@ struct HardShrinkFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y) const { + template + void operator()(Device d, X x, Out out) const { auto temp1 = (x < static_cast(threshold * -1)).template cast().eval(); auto temp2 = (x > static_cast(threshold)).template cast().eval(); - y.device(d) = x * (temp1 + temp2); + out.device(d) = x * (temp1 + temp2); } }; @@ -226,11 +233,12 @@ struct HardShrinkGradFunctor : public BaseActivationFunctor { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto temp1 = (x < static_cast(threshold * -1)).template cast().eval(); auto temp2 = (x > static_cast(threshold)).template cast().eval(); - dx.device(d) = dy * (temp1 + temp2).template cast(); + dx.device(d) = dout * (temp1 + temp2).template cast(); } }; @@ -243,12 +251,12 @@ struct SoftShrinkFunctor : public BaseActivationFunctor { return {{"lambda", &lambda}}; } - template - void operator()(Device d, X x, Y y) const { + template + void operator()(Device d, X x, Out out) const { auto lambdaT = static_cast(lambda); auto temp1 = (x > lambdaT).template cast().eval(); auto temp2 = (x < -lambdaT).template cast().eval(); - y.device(d) = temp1 * (x - lambdaT) + temp2 * (x + lambdaT); + out.device(d) = temp1 * (x - lambdaT) + temp2 * (x + lambdaT); } }; @@ -258,46 +266,49 @@ struct SoftShrinkGradFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"lambda", &lambda}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto lambdaT = static_cast(lambda); auto temp1 = (x > lambdaT).template cast().eval(); auto temp2 = (x < -lambdaT).template cast().eval(); - dx.device(d) = dy * (temp1 + temp2).template cast(); + dx.device(d) = dout * (temp1 + temp2).template cast(); } }; // sqrt(x) = x^(1/2) template struct SqrtFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.sqrt(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.sqrt(); } }; template struct SqrtGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - const Y y_conj = Eigen::numext::conj(y); - dx.device(d) = static_cast(0.5) * dy / y_conj; + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + const Out out_conj = Eigen::numext::conj(out); + dx.device(d) = static_cast(0.5) * dout / out_conj; } }; // ceil(x) = ceiling(x) template struct CeilFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.ceil(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.ceil(); } }; template struct ZeroGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { dx.device(d) = static_cast(0) / x; } }; @@ -305,86 +316,90 @@ struct ZeroGradFunctor : public BaseActivationFunctor { // floor(x) = flooring(x) template struct FloorFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.ceil(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.ceil(); } }; // round(x) = [x] template struct RoundFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.round(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.round(); } }; // abs(x) = |x| template struct AbsFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.abs(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.abs(); } }; template struct AbsGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * x.sign(); + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * x.sign(); } }; // reciprocal(x) = 1 / x template struct ReciprocalFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = static_cast(1) / x; + template + void operator()(Device d, X x, Out out) const { + out.device(d) = static_cast(1) / x; } }; template struct ReciprocalGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * static_cast(-1) * y * y; + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * static_cast(-1) * out * out; } }; // log(x) = natural logarithm of x template struct LogFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.log(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.log(); } }; template struct LogGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * (static_cast(1) / x); + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * (static_cast(1) / x); } }; // square(x) = x^2 template struct SquareFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.square(); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.square(); } }; template struct SquareGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * static_cast(2) * x; + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * static_cast(2) * x; } }; @@ -399,9 +414,9 @@ struct BReluFunctor : public BaseActivationFunctor { return {{"t_min", &t_min}, {"t_max", &t_max}}; } - template - void operator()(Device d, X x, Y y) const { - y.device(d) = + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.cwiseMax(static_cast(t_min)).cwiseMin(static_cast(t_max)); } }; @@ -413,9 +428,10 @@ struct BReluGradFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"t_min", &t_min}, {"t_max", &t_max}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * ((x > static_cast(t_min)) * (x < static_cast(t_max))) .template cast(); } @@ -430,9 +446,9 @@ struct Relu6Functor : public BaseActivationFunctor { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y) const { - y.device(d) = + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.cwiseMax(static_cast(0)).cwiseMin(static_cast(threshold)); } }; @@ -443,9 +459,10 @@ struct Relu6GradFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * ((x > static_cast(0)) * (x < static_cast(threshold))) .template cast(); } @@ -458,10 +475,10 @@ struct Relu6GradFunctor : public BaseActivationFunctor { // Then: softplus(x) = max(x, 0) + log(exp(-max(x, 0)) + exp(x - max(x, 0))) template struct SoftplusFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) { + template + void operator()(Device d, X x, Out out) { auto temp = x.cwiseMax(static_cast(0)); // temp = max(x, 0) - y.device(d) = temp + (((-temp).exp() + (x - temp).exp()).log()); + out.device(d) = temp + (((-temp).exp() + (x - temp).exp()).log()); } }; @@ -471,19 +488,21 @@ struct SoftplusFunctor : public BaseActivationFunctor { // exp(x - max(x, 0))) template struct SoftplusGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) { auto temp = x.cwiseMax(static_cast(0)); // temp = max(x, 0) - dx.device(d) = dy * ((x - temp).exp() / ((-temp).exp() + (x - temp).exp())); + dx.device(d) = + dout * ((x - temp).exp() / ((-temp).exp() + (x - temp).exp())); } }; // softsign(x) = x / (1 + |x|) template struct SoftsignFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y) { - y.device(d) = x / (static_cast(1) + x.abs()); + template + void operator()(Device d, X x, Out out) { + out.device(d) = x / (static_cast(1) + x.abs()); } }; @@ -491,10 +510,11 @@ struct SoftsignFunctor : public BaseActivationFunctor { // Taken from https://en.wikipedia.org/wiki/Activation_function template struct SoftsignGradFunctor : public BaseActivationFunctor { - template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) { dx.device(d) = - dy * (static_cast(1) / (static_cast(1) + x.abs()).square()); + dout * (static_cast(1) / (static_cast(1) + x.abs()).square()); } }; @@ -505,11 +525,11 @@ struct SoftReluFunctor : public BaseActivationFunctor { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y) const { + template + void operator()(Device d, X x, Out out) const { auto tmp = static_cast(threshold); auto temp = x.cwiseMax(-tmp).cwiseMin(tmp); - y.device(d) = (static_cast(1) + temp.exp()).log(); + out.device(d) = (static_cast(1) + temp.exp()).log(); } }; @@ -519,11 +539,12 @@ struct SoftReluGradFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto tmp = static_cast(threshold); auto temp = ((x > -tmp) * (x < tmp)).template cast().eval(); - dx.device(d) = dy * (static_cast(1) - (-y).exp()) * temp; + dx.device(d) = dout * (static_cast(1) - (-out).exp()) * temp; } }; @@ -534,9 +555,9 @@ struct LeakyReluFunctor : public BaseActivationFunctor { return {{"alpha", &alpha}}; } - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.cwiseMax(static_cast(alpha) * x); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.cwiseMax(static_cast(alpha) * x); } }; @@ -546,12 +567,13 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto temp1 = static_cast(alpha) * (x < static_cast(0)).template cast().eval(); auto temp2 = (x >= static_cast(0)).template cast().eval(); - dx.device(d) = dy * (temp1 + temp2).template cast(); + dx.device(d) = dout * (temp1 + temp2).template cast(); } }; @@ -562,11 +584,11 @@ struct ELUFunctor : public BaseActivationFunctor { return {{"alpha", &alpha}}; } - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.cwiseMax(static_cast(0)) + - (static_cast(alpha) * (x.exp() - static_cast(1))) - .cwiseMin(static_cast(0)); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.cwiseMax(static_cast(0)) + + (static_cast(alpha) * (x.exp() - static_cast(1))) + .cwiseMin(static_cast(0)); } }; @@ -576,10 +598,11 @@ struct ELUGradFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"alpha", &alpha}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * (x > static_cast(0)).template cast() + - dy * (y + static_cast(alpha)) * + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * (x > static_cast(0)).template cast() + + dout * (out + static_cast(alpha)) * (x < static_cast(0)).template cast(); } }; @@ -591,9 +614,9 @@ struct PowFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"factor", &factor}}; } - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x.pow(static_cast(factor)); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x.pow(static_cast(factor)); } }; @@ -603,9 +626,10 @@ struct PowGradFunctor : public BaseActivationFunctor { typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"factor", &factor}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = dy * static_cast(factor) * + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * static_cast(factor) * x.pow(static_cast(factor - static_cast(1))); } }; @@ -618,9 +642,9 @@ struct STanhFunctor : public BaseActivationFunctor { return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; } - template - void operator()(Device d, X x, Y y) const { - y.device(d) = + template + void operator()(Device d, X x, Out out) const { + out.device(d) = static_cast(scale_b) * (static_cast(scale_a) * x).tanh(); } }; @@ -633,12 +657,13 @@ struct STanhGradFunctor : public BaseActivationFunctor { return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto a = static_cast(scale_a); auto b = static_cast(scale_b); auto temp = (a * x).tanh() * (a * x).tanh(); - dx.device(d) = dy * a * b * (static_cast(1) - temp); + dx.device(d) = dout * a * b * (static_cast(1) - temp); } }; @@ -649,10 +674,10 @@ struct ThresholdedReluFunctor : public BaseActivationFunctor { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y) const { + template + void operator()(Device d, X x, Out out) const { auto th = static_cast(threshold); - y.device(d) = (x > th).template cast() * x; + out.device(d) = (x > th).template cast() * x; } }; @@ -663,10 +688,11 @@ struct ThresholdedReluGradFunctor : public BaseActivationFunctor { return {{"threshold", &threshold}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto th = static_cast(threshold); - dx.device(d) = dy * (x > th).template cast(); + dx.device(d) = dout * (x > th).template cast(); } }; @@ -678,10 +704,11 @@ struct HardSigmoidFunctor : public BaseActivationFunctor { return {{"slope", &slope}, {"offset", &offset}}; } - template - void operator()(Device d, X x, Y y) const { + template + void operator()(Device d, X x, Out out) const { auto temp = x * static_cast(slope) + static_cast(offset); - y.device(d) = temp.cwiseMax(static_cast(0)).cwiseMin(static_cast(1)); + out.device(d) = + temp.cwiseMax(static_cast(0)).cwiseMin(static_cast(1)); } }; @@ -693,12 +720,13 @@ struct HardSigmoidGradFunctor : public BaseActivationFunctor { return {{"slope", &slope}, {"offset", &offset}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { - dx.device(d) = - dy * - ((y > static_cast(0)) * (y < static_cast(1))).template cast() * - static_cast(slope); + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { + dx.device(d) = dout * + ((out > static_cast(0)) * (out < static_cast(1))) + .template cast() * + static_cast(slope); } }; @@ -709,9 +737,9 @@ struct SwishFunctor : public BaseActivationFunctor { return {{"beta", &beta}}; } - template - void operator()(Device d, X x, Y y) const { - y.device(d) = x / (static_cast(1) + (static_cast(-beta) * x).exp()); + template + void operator()(Device d, X x, Out out) const { + out.device(d) = x / (static_cast(1) + (static_cast(-beta) * x).exp()); } }; @@ -722,12 +750,13 @@ struct SwishGradFunctor : public BaseActivationFunctor { return {{"beta", &beta}}; } - template - void operator()(Device d, X x, Y y, dY dy, dX dx) const { + template + void operator()(Device d, X x, Out out, dOut dout, dX dx) const { auto temp1 = static_cast(1) / (static_cast(1) + (static_cast(-beta) * x).exp()); - auto temp2 = temp1 * (static_cast(1) - (beta * y)); - dx.device(d) = dy * ((beta * y) + temp2); + auto temp2 = temp1 * (static_cast(1) - (beta * out)); + dx.device(d) = dout * ((beta * out) + temp2); } }; diff --git a/paddle/operators/adadelta_op.cc b/paddle/operators/adadelta_op.cc index 507811e7b5..d8a9491c82 100644 --- a/paddle/operators/adadelta_op.cc +++ b/paddle/operators/adadelta_op.cc @@ -59,8 +59,7 @@ class AdadeltaOp : public framework::OperatorWithKernel { class AdadeltaOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdadeltaOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + AdadeltaOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); diff --git a/paddle/operators/adadelta_op.cu b/paddle/operators/adadelta_op.cu index eee2d0a2f5..91294a0d5d 100644 --- a/paddle/operators/adadelta_op.cu +++ b/paddle/operators/adadelta_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/adadelta_op.h" diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index 5d00716316..052c793a01 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -59,8 +59,7 @@ class AdagradOp : public framework::OperatorWithKernel { class AdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdagradOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + AdagradOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu index 585b2d9289..75bc7affd6 100644 --- a/paddle/operators/adagrad_op.cu +++ b/paddle/operators/adagrad_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/adagrad_op.h" diff --git a/paddle/operators/adam_op.cc b/paddle/operators/adam_op.cc index cf6ef6dd53..03527de936 100644 --- a/paddle/operators/adam_op.cc +++ b/paddle/operators/adam_op.cc @@ -73,7 +73,7 @@ class AdamOp : public framework::OperatorWithKernel { class AdamOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdamOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + AdamOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); diff --git a/paddle/operators/adam_op.cu b/paddle/operators/adam_op.cu index c135b37378..94f840c188 100644 --- a/paddle/operators/adam_op.cu +++ b/paddle/operators/adam_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/adam_op.h" diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index 45157842a6..c4e2c8bb88 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -13,59 +13,113 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" +#include // for sqrt in CPU and CUDA #include "paddle/framework/op_registry.h" +#include "paddle/operators/detail/safe_ref.h" +#include "paddle/platform/for_range.h" namespace paddle { namespace operators { +template +struct AdamFunctor { + T beta1_; + T beta2_; + T epsilon_; + + const T* beta1_pow_; + const T* beta2_pow_; + const T* moment1_; + T* moment1_out_; + const T* moment2_; + T* moment2_out_; + const T* lr_; + const T* grad_; + const T* param_; + T* param_out_; + + AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, + const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, + T* mom2_out, const T* lr, const T* grad, const T* param, + T* param_out) + : beta1_(beta1), + beta2_(beta2), + epsilon_(epsilon), + beta1_pow_(beta1_pow), + beta2_pow_(beta2_pow), + moment1_(mom1), + moment1_out_(mom1_out), + moment2_(mom2), + moment2_out_(mom2_out), + lr_(lr), + grad_(grad), + param_(param), + param_out_(param_out) {} + + inline HOSTDEVICE void operator()(size_t i) const { + // Merge all memory access together. + T g = grad_[i]; + T mom1 = moment1_[i]; + T mom2 = moment2_[i]; + T lr = *lr_; + T beta1_pow = *beta1_pow_; + T beta2_pow = *beta2_pow_; + T p = param_[i]; + + // Calculation + lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; + mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; + p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); + + // Write back to global memory + moment1_out_[i] = mom1; + moment2_out_[i] = mom2; + param_out_[i] = p; + } +}; + template class AdamOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto param_out_tensor = ctx.Output("ParamOut"); - auto moment1_out_tensor = ctx.Output("Moment1Out"); - auto moment2_out_tensor = ctx.Output("Moment2Out"); - - param_out_tensor->mutable_data(ctx.GetPlace()); - moment1_out_tensor->mutable_data(ctx.GetPlace()); - moment2_out_tensor->mutable_data(ctx.GetPlace()); + using paddle::framework::LoDTensor; + using paddle::operators::detail::Ref; T beta1 = static_cast(ctx.Attr("beta1")); T beta2 = static_cast(ctx.Attr("beta2")); T epsilon = static_cast(ctx.Attr("epsilon")); + auto& param = Ref(ctx.Input("Param"), "Must set Param"); + auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); + auto& mom1 = Ref(ctx.Input("Moment1"), "Must set Moment1"); + auto& mom2 = Ref(ctx.Input("Moment2"), "Must set Moment2"); + auto& lr = + Ref(ctx.Input("LearningRate"), "Must set LearningRate"); + + auto& beta1_pow = + Ref(ctx.Input("Beta1Pow"), "Must set Beta1Pow"); + auto& beta2_pow = + Ref(ctx.Input("Beta2Pow"), "Must set Beta2Pow"); + + auto& param_out = + Ref(ctx.Output("ParamOut"), "Must set ParamOut"); + auto& mom1_out = + Ref(ctx.Output("Moment1Out"), "Must set Moment1Out"); + auto& mom2_out = + Ref(ctx.Output("Moment2Out"), "Must set Moment1Out"); - auto param = framework::EigenVector::Flatten( - *ctx.Input("Param")); - auto grad = framework::EigenVector::Flatten( - *ctx.Input("Grad")); - auto moment1 = framework::EigenVector::Flatten( - *ctx.Input("Moment1")); - auto moment2 = framework::EigenVector::Flatten( - *ctx.Input("Moment2")); - auto lr = framework::EigenVector::Flatten( - *ctx.Input("LearningRate")); - auto beta1_pow = framework::EigenVector::Flatten( - *ctx.Input("Beta1Pow")); - auto beta2_pow = framework::EigenVector::Flatten( - *ctx.Input("Beta2Pow")); - auto param_out = framework::EigenVector::Flatten(*param_out_tensor); - auto moment1_out = framework::EigenVector::Flatten(*moment1_out_tensor); - auto moment2_out = framework::EigenVector::Flatten(*moment2_out_tensor); - auto* place = ctx.template device_context().eigen_device(); - - moment1_out.device(*place) = beta1 * moment1 + (1 - beta1) * grad; - moment2_out.device(*place) = beta2 * moment2 + (1 - beta2) * grad.square(); - - // All of these are tensors of 1 element - auto lr_t = lr * (1 - beta2_pow).sqrt() / (1 - beta1_pow); - // Eigen does not support automatic broadcast - // Get dimensions of moment vector to broadcast lr_t - Eigen::DSizes m_dsize(moment1_out_tensor->numel()); - param_out.device(*place) = - param - - lr_t.broadcast(m_dsize) * - (moment1_out / (moment2_out.sqrt() + epsilon)); + AdamFunctor functor(beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), + mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + platform::ForRange for_range( + static_cast(ctx.device_context()), param.numel()); + for_range(functor); } }; diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc index 49ce497bb7..3b0b714184 100644 --- a/paddle/operators/adamax_op.cc +++ b/paddle/operators/adamax_op.cc @@ -67,7 +67,7 @@ class AdamaxOp : public framework::OperatorWithKernel { class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdamaxOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + AdamaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); diff --git a/paddle/operators/adamax_op.cu b/paddle/operators/adamax_op.cu index 2d143905c4..8f87bb2867 100644 --- a/paddle/operators/adamax_op.cu +++ b/paddle/operators/adamax_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/adamax_op.h" diff --git a/paddle/operators/array_operator.h b/paddle/operators/array_operator.h index 1f2b4fdb4b..060ffac827 100644 --- a/paddle/operators/array_operator.h +++ b/paddle/operators/array_operator.h @@ -1,20 +1,21 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -27,11 +28,16 @@ class ArrayOp : public framework::OperatorBase { protected: size_t GetOffset(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const { + const platform::Place &place) const { auto *i = scope.FindVar(Input("I")); PADDLE_ENFORCE(i != nullptr, "I must be set"); auto &i_tensor = i->Get(); PADDLE_ENFORCE_EQ(i_tensor.numel(), 1); + + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + size_t offset; if (platform::is_gpu_place(i_tensor.place())) { // FIXME: Avoid copy from GPU to CPU diff --git a/paddle/operators/array_to_lod_tensor_op.cc b/paddle/operators/array_to_lod_tensor_op.cc index faeba7f3ed..0aa04c268b 100644 --- a/paddle/operators/array_to_lod_tensor_op.cc +++ b/paddle/operators/array_to_lod_tensor_op.cc @@ -1,21 +1,23 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include + #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" #include "paddle/memory/memcpy.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -30,7 +32,7 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &rank_table = scope.FindVar(Input("RankTable"))->Get(); @@ -103,6 +105,10 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { continue; } auto slice = out->Slice(out_offset, out_offset + len); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + framework::CopyFrom(x[x_idx].Slice(start_offset, end_offset), place, dev_ctx, &slice); out_offset += len; @@ -114,8 +120,7 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ArrayToLoDTensorOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ArrayToLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(std::vector) A vector of tensors that is going to " @@ -150,14 +155,14 @@ class ArrayToLoDTensorGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("lod_tensor_to_array"); grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetInput("RankTable", Input("RankTable")); grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttrMap(Attrs()); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/assign_op.cc b/paddle/operators/assign_op.cc index 0a37f18729..0560040509 100644 --- a/paddle/operators/assign_op.cc +++ b/paddle/operators/assign_op.cc @@ -1,20 +1,21 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/data_type.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/var_type.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -71,7 +72,7 @@ class AssignOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto *x = scope.FindVar(Input("X")); if (x == nullptr) { return; @@ -80,14 +81,17 @@ class AssignOp : public framework::OperatorBase { PADDLE_ENFORCE( out != nullptr, "The Output(Out) should not be null if the Input(X) is set."); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + framework::VisitVarType(*x, AssignFunctor(out, dev_ctx)); } }; class AssignOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - AssignOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + AssignOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor, SelectedRows or LoDTensorArray) The input variable " @@ -109,8 +113,8 @@ class AssignInferShape : public framework::InferShapeBase { void operator()(framework::InferShapeContext *context) const override { if (context->HasInput("X")) { auto type = context->GetInputsVarType("X")[0]; - if (type == framework::VarDesc_VarType_SELECTED_ROWS || - type == framework::VarDesc_VarType_LOD_TENSOR) { + if (type == framework::proto::VarDesc_VarType_SELECTED_ROWS || + type == framework::proto::VarDesc_VarType_LOD_TENSOR) { context->SetOutputDim("Out", context->GetInputDim("X")); } } @@ -122,12 +126,12 @@ class AssignGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *op = new framework::OpDesc(); op->SetType("assign"); op->SetInput("X", OutputGrad("Out")); op->SetOutput("Out", InputGrad("X")); - return std::unique_ptr(op); + return std::unique_ptr(op); } }; diff --git a/paddle/operators/auc_op.cc b/paddle/operators/auc_op.cc index 6c3f67ec32..c16bc11931 100644 --- a/paddle/operators/auc_op.cc +++ b/paddle/operators/auc_op.cc @@ -39,7 +39,7 @@ class AucOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Out")->type()), @@ -49,7 +49,7 @@ class AucOp : public framework::OperatorWithKernel { class AucOpMaker : public framework::OpProtoAndCheckerMaker { public: - AucOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + AucOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Out", "A floating point 2D tensor, values are in the range [0, 1]." diff --git a/paddle/operators/batch_norm_op.cc b/paddle/operators/batch_norm_op.cc index 94a972b7ab..98db28ddee 100644 --- a/paddle/operators/batch_norm_op.cc +++ b/paddle/operators/batch_norm_op.cc @@ -13,12 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/batch_norm_op.h" +#include "paddle/framework/data_layout.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; template using EigenArrayMap = @@ -48,10 +50,6 @@ class BatchNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("SavedMean"), ""); PADDLE_ENFORCE(ctx->HasOutput("SavedVariance"), ""); - const float epsilon = ctx->Attrs().Get("epsilon"); - PADDLE_ENFORCE_GE(epsilon, 0.0, "epsilon should be larger than 0"); - PADDLE_ENFORCE_LE(epsilon, 0.001, "epsilon should not be too large"); - // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0], "Mean and MeanOut should share the same memory"); @@ -60,15 +58,15 @@ class BatchNormOp : public framework::OperatorWithKernel { "Variance and VarianceOut should share the same memory"); const auto x_dims = ctx->GetInputDim("X"); - const TensorFormat tensor_format = - StringToTensorFormat(ctx->Attrs().Get("tensor_format")); + const DataLayout data_layout = framework::StringToDataLayout( + ctx->Attrs().Get("data_layout")); PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, "Input X must have 2 to 5 dimensions."); const int C = - (tensor_format == TensorFormat::NCHW ? x_dims[1] - : x_dims[x_dims.size() - 1]); + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], C); @@ -85,13 +83,17 @@ class BatchNormOp : public framework::OperatorWithKernel { class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { public: - BatchNormOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + BatchNormOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddAttr("is_test", "").SetDefault(false); AddAttr("momentum", "").SetDefault(0.9); - AddAttr("epsilon", "").SetDefault(1e-5); - AddAttr("tensor_format", "").SetDefault("NCHW"); + AddAttr("epsilon", "") + .SetDefault(1e-5) + .AddCustomChecker([](const float &epsilon) { + PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f, + "'epsilon' should be between 0.0 and 0.001."); + }); + AddAttr("data_layout", "").SetDefault("NCHW"); AddInput("X", "The input tensor"); AddInput("Scale", "Scale is a 1-dimensional tensor of size C " @@ -142,9 +144,9 @@ class BatchNormKernel const float epsilon = ctx.Attr("epsilon"); const float momentum = ctx.Attr("momentum"); const bool is_test = ctx.Attr("is_test"); - const std::string tensor_format_str = - ctx.Attr("tensor_format"); - const TensorFormat tensor_format = StringToTensorFormat(tensor_format_str); + const std::string data_layout_str = ctx.Attr("data_layout"); + const DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); const auto *x = ctx.Input("X"); const auto &x_dims = x->dims(); @@ -152,8 +154,8 @@ class BatchNormKernel "The Input dim size should be between 2 and 5"); const int N = x_dims[0]; const int C = - (tensor_format == TensorFormat::NCHW ? x_dims[1] - : x_dims[x_dims.size() - 1]); + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); const int sample_size = x->numel() / N / C; auto *y = ctx.Output("Y"); @@ -178,8 +180,8 @@ class BatchNormKernel saved_mean_e.setZero(); saved_variance_e.setZero(); - switch (tensor_format) { - case TensorFormat::NCHW: { + switch (data_layout) { + case DataLayout::kNCHW: { ConstEigenArrayMap x_arr(x->data(), sample_size, N * C); for (int nc = 0; nc < N * C; ++nc) { saved_mean_e(nc % C) += x_arr.col(nc).sum(); @@ -192,7 +194,7 @@ class BatchNormKernel saved_variance_e /= N * sample_size; break; } - case TensorFormat::NHWC: { + case DataLayout::kNHWC: { ConstEigenArrayMap x_arr(x->data(), C, N * sample_size); for (int i = 0; i < N * sample_size; ++i) { saved_mean_e += x_arr.col(i); @@ -206,7 +208,7 @@ class BatchNormKernel break; } default: - PADDLE_THROW("Unknown storage order: %s", tensor_format_str); + PADDLE_THROW("Unknown storage order: %s", data_layout_str); } EigenVectorArrayMap running_mean_arr( @@ -248,8 +250,8 @@ class BatchNormKernel Eigen::Array new_bias = bias_arr - mean_arr * inv_std * scale_arr; - switch (tensor_format) { - case TensorFormat::NCHW: { + switch (data_layout) { + case DataLayout::kNCHW: { EigenArrayMap y_arr(y->mutable_data(ctx.GetPlace()), sample_size, N * C); ConstEigenArrayMap x_arr(x->data(), sample_size, N * C); @@ -258,7 +260,7 @@ class BatchNormKernel } break; } - case TensorFormat::NHWC: { + case DataLayout::kNHWC: { EigenArrayMap(y->mutable_data(ctx.GetPlace()), C, N * sample_size) = (ConstEigenArrayMap(x->data(), C, N * sample_size).colwise() * @@ -268,7 +270,7 @@ class BatchNormKernel break; } default: - PADDLE_THROW("Unknown storage order: %d", tensor_format); + PADDLE_THROW("Unknown storage order: %d", data_layout); } } }; @@ -291,11 +293,11 @@ class BatchNormGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Bias")), ""); const auto x_dims = ctx->GetInputDim("X"); - const TensorFormat tensor_format = - StringToTensorFormat(ctx->Attrs().Get("tensor_format")); + const DataLayout data_layout = framework::StringToDataLayout( + ctx->Attrs().Get("data_layout")); const int C = - (tensor_format == TensorFormat::NCHW ? x_dims[1] - : x_dims[x_dims.size() - 1]); + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("Scale"), {C}); @@ -303,7 +305,7 @@ class BatchNormGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { const auto *var = ctx.InputVar(framework::GradVarName("Y")); if (var == nullptr) { @@ -334,9 +336,9 @@ class BatchNormGradKernel const auto *saved_mean = ctx.Input("SavedMean"); // SavedVariance have been reverted in forward operator const auto *saved_inv_variance = ctx.Input("SavedVariance"); - const std::string tensor_format_str = - ctx.Attr("tensor_format"); - const TensorFormat tensor_format = StringToTensorFormat(tensor_format_str); + const std::string data_layout_str = ctx.Attr("data_layout"); + const DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] @@ -345,8 +347,8 @@ class BatchNormGradKernel "The Input dim size should be between 2 and 5"); const int N = x_dims[0]; const int C = - (tensor_format == TensorFormat::NCHW ? x_dims[1] - : x_dims[x_dims.size() - 1]); + (data_layout == DataLayout::kNCHW ? x_dims[1] + : x_dims[x_dims.size() - 1]); const int sample_size = x->numel() / N / C; ConstEigenVectorArrayMap scale_arr(scale->data(), C); @@ -377,8 +379,8 @@ class BatchNormGradKernel const auto scale_inv_var_nhw = scale_arr * inv_var_arr / (N * sample_size); - switch (tensor_format) { - case TensorFormat::NCHW: { + switch (data_layout) { + case DataLayout::kNCHW: { ConstEigenArrayMap x_arr(x->data(), sample_size, N * C); ConstEigenArrayMap d_y_arr(d_y->data(), sample_size, N * C); EigenArrayMap d_x_arr(d_x->mutable_data(ctx.GetPlace()), @@ -401,7 +403,7 @@ class BatchNormGradKernel } break; } - case TensorFormat::NHWC: { + case DataLayout::kNHWC: { ConstEigenArrayMap x_arr(x->data(), C, N * sample_size); ConstEigenArrayMap d_y_arr(d_y->data(), C, N * sample_size); EigenArrayMap d_x_arr(d_x->mutable_data(ctx.GetPlace()), C, @@ -426,7 +428,7 @@ class BatchNormGradKernel break; } default: - PADDLE_THROW("Unknown storage order: %s", tensor_format_str); + PADDLE_THROW("Unknown storage order: %s", data_layout_str); } } }; diff --git a/paddle/operators/batch_norm_op.cu.cc b/paddle/operators/batch_norm_op.cu.cc index c7adc3d80e..3d17725ab4 100644 --- a/paddle/operators/batch_norm_op.cu.cc +++ b/paddle/operators/batch_norm_op.cu.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/batch_norm_op.h" +#include "paddle/framework/data_layout.h" #include #include "paddle/operators/math/math_function.h" @@ -22,12 +23,12 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; +using DataLayout = framework::DataLayout; template using CudnnDataType = platform::CudnnDataType; -void ExtractNCWHD(const framework::DDim &dims, - const TensorFormat &tensor_format, int *N, int *C, int *H, - int *W, int *D) { +void ExtractNCWHD(const framework::DDim &dims, const DataLayout &data_layout, + int *N, int *C, int *H, int *W, int *D) { *N = dims[0]; if (dims.size() == 2) { *C = dims[1]; @@ -35,13 +36,13 @@ void ExtractNCWHD(const framework::DDim &dims, *W = 1; *D = 1; } else { - *C = tensor_format == TensorFormat::NCHW ? dims[1] : dims[dims.size() - 1]; - *H = tensor_format == TensorFormat::NCHW ? dims[2] : dims[1]; + *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; + *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = dims.size() > 3 - ? (tensor_format == TensorFormat::NCHW ? dims[3] : dims[2]) + ? (data_layout == DataLayout::kNCHW ? dims[3] : dims[2]) : 1; *D = dims.size() > 4 - ? (tensor_format == TensorFormat::NCHW ? dims[4] : dims[3]) + ? (data_layout == DataLayout::kNCHW ? dims[4] : dims[3]) : 1; } } @@ -52,13 +53,13 @@ class BatchNormKernel public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); double epsilon = static_cast(ctx.Attr("epsilon")); const float momentum = ctx.Attr("momentum"); const bool is_test = ctx.Attr("is_test"); - const std::string tensor_format_str = - ctx.Attr("tensor_format"); - const TensorFormat tensor_format = StringToTensorFormat(tensor_format_str); + const std::string data_layout_str = ctx.Attr("data_layout"); + const DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] @@ -67,7 +68,7 @@ class BatchNormKernel PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, "The Input dim size should be between 2 and 5"); int N, C, H, W, D; - ExtractNCWHD(x_dims, tensor_format, &N, &C, &H, &W, &D); + ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; @@ -93,7 +94,7 @@ class BatchNormKernel VLOG(1) << "Setting descriptors."; std::vector dims; std::vector strides; - if (tensor_format == TensorFormat::NCHW) { + if (data_layout == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { @@ -178,11 +179,11 @@ class BatchNormGradKernel public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); double epsilon = static_cast(ctx.Attr("epsilon")); - const std::string tensor_format_str = - ctx.Attr("tensor_format"); - const TensorFormat tensor_format = StringToTensorFormat(tensor_format_str); + const std::string data_layout_str = ctx.Attr("data_layout"); + const DataLayout data_layout = + framework::StringToDataLayout(data_layout_str); const auto *x = ctx.Input("X"); const auto *d_y = ctx.Input(framework::GradVarName("Y")); const auto *scale = ctx.Input("Scale"); @@ -192,7 +193,7 @@ class BatchNormGradKernel PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5, "The Input dim size should be between 2 and 5"); int N, C, H, W, D; - ExtractNCWHD(x_dims, tensor_format, &N, &C, &H, &W, &D); + ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL); PADDLE_ENFORCE_EQ(scale->dims()[0], C); @@ -219,7 +220,7 @@ class BatchNormGradKernel std::vector dims; std::vector strides; - if (tensor_format == TensorFormat::NCHW) { + if (data_layout == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { diff --git a/paddle/operators/batch_norm_op.h b/paddle/operators/batch_norm_op.h index 8d99b68647..a817ef41fc 100644 --- a/paddle/operators/batch_norm_op.h +++ b/paddle/operators/batch_norm_op.h @@ -19,21 +19,6 @@ limitations under the License. */ namespace paddle { namespace operators { -enum TensorFormat { - NHWC = 0, - NCHW = 1, -}; - -inline TensorFormat StringToTensorFormat(const std::string& str) { - if (str == "NHWC" || str == "nhwc") { - return TensorFormat::NHWC; - } else if (str == "NCHW" || str == "nchw") { - return TensorFormat::NCHW; - } else { - PADDLE_THROW("Unknown storage order string: %s", str); - } -} - template class BatchNormKernel : public framework::OpKernel { public: diff --git a/paddle/operators/beam_search_decode_op.cc b/paddle/operators/beam_search_decode_op.cc index c796a0c5d0..52c28e7f53 100644 --- a/paddle/operators/beam_search_decode_op.cc +++ b/paddle/operators/beam_search_decode_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/beam_search_decode_op.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -55,7 +56,10 @@ class BeamSearchDecodeOp : public framework::OperatorBase { const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const override { + const platform::Place& dev_place) const override { + platform::DeviceContextPool& pool = platform::DeviceContextPool::Get(); + auto& dev_ctx = *pool.Borrow(dev_place); + framework::ExecutionContext ctx(*this, scope, dev_ctx); const LoDTensorArray* ids = ctx.Input("Ids"); @@ -83,9 +87,8 @@ class BeamSearchDecodeOp : public framework::OperatorBase { class BeamSearchDecodeOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - BeamSearchDecodeOpProtoMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + BeamSearchDecodeOpProtoMaker(OpProto* proto, OpAttrChecker* op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Ids", "(LodTensorArray)" "score of the candidate words in each step"); @@ -120,13 +123,13 @@ class BeamSearchDecodeInferShape : public framework::InferShapeBase { class BeamSearchDecodeInferVarType : public framework::VarTypeInference { public: - void operator()(const framework::OpDescBind& op_desc, - framework::BlockDescBind* block) const override { + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { for (auto& o : op_desc.Output("SentenceIds")) { - block->Var(o)->SetType(framework::VarDesc::LOD_TENSOR); + block->Var(o)->SetType(framework::proto::VarDesc::LOD_TENSOR); } for (auto& o : op_desc.Output("SentenceScores")) { - block->Var(o)->SetType(framework::VarDesc::LOD_TENSOR); + block->Var(o)->SetType(framework::proto::VarDesc::LOD_TENSOR); } } }; diff --git a/paddle/operators/beam_search_op.cc b/paddle/operators/beam_search_op.cc index 8c3e2a303f..2e0513b37a 100644 --- a/paddle/operators/beam_search_op.cc +++ b/paddle/operators/beam_search_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/beam_search_op.h" @@ -153,8 +153,7 @@ bool BeamSearch::NextItemSet(std::vector *items) { class BeamSearchProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { public: - BeamSearchProtoAndCheckerMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + BeamSearchProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { // inputs and outputs stored in proto AddInput("pre_ids", "ids in previous step"); diff --git a/paddle/operators/beam_search_op.h b/paddle/operators/beam_search_op.h index cc556bfe42..08b551ef9b 100644 --- a/paddle/operators/beam_search_op.h +++ b/paddle/operators/beam_search_op.h @@ -189,7 +189,7 @@ class BeamSearchOp : public framework::OperatorBase { } void Run(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const override { + const platform::Place& dev_place) const override { LOG(INFO) << "run beam search op"; auto ids_var = scope.FindVar(Input("ids")); auto scores_var = scope.FindVar(Input("scores")); diff --git a/paddle/operators/bilinear_tensor_product_op.cc b/paddle/operators/bilinear_tensor_product_op.cc index 217fd52366..7640147a12 100644 --- a/paddle/operators/bilinear_tensor_product_op.cc +++ b/paddle/operators/bilinear_tensor_product_op.cc @@ -65,8 +65,7 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { class BilinearTensorProductOpMaker : public framework::OpProtoAndCheckerMaker { public: - BilinearTensorProductOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + BilinearTensorProductOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of bilinear_tensor_product operator."); AddInput("Y", "The second input of bilinear_tensor_product operator."); diff --git a/paddle/operators/cast_op.cc b/paddle/operators/cast_op.cc index 42bff69a1e..446976edaf 100644 --- a/paddle/operators/cast_op.cc +++ b/paddle/operators/cast_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/cast_op.h" #include "paddle/framework/op_registry.h" @@ -20,8 +20,7 @@ namespace operators { class CastOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - CastOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + CastOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of cast op"); AddOutput("Out", "The output tensor of cast op"); @@ -53,14 +52,14 @@ class CastOpGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto grad = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto grad = new framework::OpDesc(); grad->SetType("cast"); grad->SetInput("X", OutputGrad("Out")); grad->SetOutput("Out", InputGrad("X")); grad->SetAttr("out_dtype", GetAttr("in_dtype")); grad->SetAttr("in_dtype", GetAttr("out_dtype")); - return std::unique_ptr(grad); + return std::unique_ptr(grad); } }; @@ -74,4 +73,5 @@ REGISTER_OP_WITH_KERNEL(cast, ops::CastOpGradMaker, ops::CastOpInferShape, REGISTER_OP_CPU_KERNEL(cast, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, - ops::CastOpKernel); + ops::CastOpKernel, + ops::CastOpKernel); diff --git a/paddle/operators/cast_op.cu b/paddle/operators/cast_op.cu index 4681deaa62..d68bbe6e39 100644 --- a/paddle/operators/cast_op.cu +++ b/paddle/operators/cast_op.cu @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/cast_op.h" @@ -19,4 +19,5 @@ using CastOpKernel = paddle::operators::CastOpKernel; REGISTER_OP_CUDA_KERNEL(cast, CastOpKernel, CastOpKernel, - CastOpKernel, CastOpKernel); + CastOpKernel, CastOpKernel, + CastOpKernel); diff --git a/paddle/operators/cast_op.h b/paddle/operators/cast_op.h index a6773f13a8..9f39d91edd 100644 --- a/paddle/operators/cast_op.h +++ b/paddle/operators/cast_op.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -55,7 +55,7 @@ class CastOpKernel : public framework::OpKernel { auto* in = context.Input("X"); auto* out = context.Output("Out"); framework::VisitDataType( - static_cast(context.Attr("out_dtype")), + static_cast(context.Attr("out_dtype")), CastOpFunctor( in, out, context.template device_context())); } diff --git a/paddle/operators/chunk_eval_op.cc b/paddle/operators/chunk_eval_op.cc index 94127ab33e..a040404266 100644 --- a/paddle/operators/chunk_eval_op.cc +++ b/paddle/operators/chunk_eval_op.cc @@ -32,6 +32,13 @@ class ChunkEvalOp : public framework::OperatorWithKernel { "Output(Recall) of ChunkEvalOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("F1-Score"), "Output(F1-Score) of ChunkEvalOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("NumInferChunks"), + "Output(NumInferChunks) of ChunkEvalOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("NumLabelChunks"), + "Output(NumLabelChunks) of ChunkEvalOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("NumCorrectChunks"), + "Output(NumCorrectChunks) of ChunkEvalOp should not be null."); auto inference_dim = ctx->GetInputDim("Inference"); auto label_dim = ctx->GetInputDim("Label"); @@ -42,20 +49,22 @@ class ChunkEvalOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Precision", {1}); ctx->SetOutputDim("Recall", {1}); ctx->SetOutputDim("F1-Score", {1}); + ctx->SetOutputDim("NumInferChunks", {1}); + ctx->SetOutputDim("NumLabelChunks", {1}); + ctx->SetOutputDim("NumCorrectChunks", {1}); } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType(framework::DataType::FP32, + return framework::OpKernelType(framework::proto::DataType::FP32, ctx.device_context()); } }; class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChunkEvalOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ChunkEvalOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Inference", "(Tensor, default: Tensor). " @@ -70,6 +79,16 @@ class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { "sensitivity) of chunks on the given mini-batch."); AddOutput("F1-Score", "(float). The evaluated F1-Score on the given mini-batch."); + AddOutput("NumInferChunks", + "(int64_t). The number of chunks in Inference on the given " + "mini-batch."); + AddOutput( + "NumLabelChunks", + "(int64_t). The number of chunks in Label on the given mini-batch."); + AddOutput( + "NumCorrectChunks", + "(int64_t). The number of chunks both in Inference and Label on the " + "given mini-batch."); AddAttr("num_chunk_types", "(int). The number of chunk type. See below for details."); AddAttr( diff --git a/paddle/operators/chunk_eval_op.h b/paddle/operators/chunk_eval_op.h index 9cd758a825..74ab435c86 100644 --- a/paddle/operators/chunk_eval_op.h +++ b/paddle/operators/chunk_eval_op.h @@ -111,9 +111,7 @@ class ChunkEvalKernel : public framework::OpKernel { std::vector label_segments; std::vector output_segments; std::set excluded_chunk_types; - int64_t num_output_segments = 0; - int64_t num_label_segments = 0; - int64_t num_correct = 0; + if (context.Attr("chunk_scheme") == "IOB") { num_tag_types = 2; tag_begin = 0; @@ -151,12 +149,24 @@ class ChunkEvalKernel : public framework::OpKernel { auto* precision = context.Output("Precision"); auto* recall = context.Output("Recall"); auto* f1 = context.Output("F1-Score"); + auto* num_infer_chunks = context.Output("NumInferChunks"); + auto* num_label_chunks = context.Output("NumLabelChunks"); + auto* num_correct_chunks = context.Output("NumCorrectChunks"); const int64_t* inference_data = inference->data(); const int64_t* label_data = label->data(); T* precision_data = precision->mutable_data(context.GetPlace()); T* racall_data = recall->mutable_data(context.GetPlace()); T* f1_data = f1->mutable_data(context.GetPlace()); + int64_t* num_infer_chunks_data = + num_infer_chunks->mutable_data(context.GetPlace()); + int64_t* num_label_chunks_data = + num_label_chunks->mutable_data(context.GetPlace()); + int64_t* num_correct_chunks_data = + num_correct_chunks->mutable_data(context.GetPlace()); + *num_infer_chunks_data = 0; + *num_label_chunks_data = 0; + *num_correct_chunks_data = 0; auto lod = label->lod(); PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); @@ -166,17 +176,23 @@ class ChunkEvalKernel : public framework::OpKernel { for (int i = 0; i < num_sequences; ++i) { int seq_length = lod[0][i + 1] - lod[0][i]; EvalOneSeq(inference_data + lod[0][i], label_data + lod[0][i], seq_length, - output_segments, label_segments, num_output_segments, - num_label_segments, num_correct, num_chunk_types, - num_tag_types, other_chunk_type, tag_begin, tag_inside, - tag_end, tag_single, excluded_chunk_types); + output_segments, label_segments, *num_infer_chunks_data, + *num_label_chunks_data, *num_correct_chunks_data, + num_chunk_types, num_tag_types, other_chunk_type, tag_begin, + tag_inside, tag_end, tag_single, excluded_chunk_types); } - *precision_data = !num_output_segments ? 0 : static_cast(num_correct) / - num_output_segments; - *racall_data = !num_label_segments ? 0 : static_cast(num_correct) / - num_label_segments; - *f1_data = !num_correct ? 0 : 2 * (*precision_data) * (*racall_data) / - ((*precision_data) + (*racall_data)); + *precision_data = !(*num_infer_chunks_data) + ? 0 + : static_cast(*num_correct_chunks_data) / + (*num_infer_chunks_data); + *racall_data = !(*num_label_chunks_data) + ? 0 + : static_cast(*num_correct_chunks_data) / + (*num_label_chunks_data); + *f1_data = !(*num_correct_chunks_data) + ? 0 + : 2 * (*precision_data) * (*racall_data) / + ((*precision_data) + (*racall_data)); } void EvalOneSeq(const int64_t* output, const int64_t* label, int length, diff --git a/paddle/operators/clip_by_norm_op.cc b/paddle/operators/clip_by_norm_op.cc index 0b7975a63f..b90921d79b 100644 --- a/paddle/operators/clip_by_norm_op.cc +++ b/paddle/operators/clip_by_norm_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/clip_by_norm_op.h" @@ -37,8 +37,7 @@ class ClipByNormOp : public framework::OperatorWithKernel { class ClipByNormOpMaker : public framework::OpProtoAndCheckerMaker { public: - ClipByNormOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ClipByNormOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input of clip_by_norm op." diff --git a/paddle/operators/clip_by_norm_op.cu b/paddle/operators/clip_by_norm_op.cu index acd7543823..cbf8fa4413 100644 --- a/paddle/operators/clip_by_norm_op.cu +++ b/paddle/operators/clip_by_norm_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/clip_by_norm_op.h" diff --git a/paddle/operators/clip_by_norm_op.h b/paddle/operators/clip_by_norm_op.h index d8db1566b0..87956a707c 100644 --- a/paddle/operators/clip_by_norm_op.h +++ b/paddle/operators/clip_by_norm_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc index 6092212de4..573bb9c7df 100644 --- a/paddle/operators/clip_op.cc +++ b/paddle/operators/clip_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/clip_op.h" @@ -38,7 +38,7 @@ class ClipOp : public framework::OperatorWithKernel { template class ClipOpMaker : public framework::OpProtoAndCheckerMaker { public: - ClipOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + ClipOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor)The input of clip op." diff --git a/paddle/operators/clip_op.cu b/paddle/operators/clip_op.cu index bb7dcc671a..5ccbc96434 100644 --- a/paddle/operators/clip_op.cu +++ b/paddle/operators/clip_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/clip_op.h" diff --git a/paddle/operators/clip_op.h b/paddle/operators/clip_op.h index 0c40797410..51db185dff 100644 --- a/paddle/operators/clip_op.h +++ b/paddle/operators/clip_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/compare_op.cc b/paddle/operators/compare_op.cc index bf7e883681..44665b7872 100644 --- a/paddle/operators/compare_op.cc +++ b/paddle/operators/compare_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/compare_op.h" #include "paddle/framework/op_registry.h" @@ -20,8 +20,7 @@ namespace operators { template class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - CompareOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + CompareOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { OpComment comment; AddInput("X", @@ -67,9 +66,9 @@ class CompareOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { - framework::OpKernelType kt = OperatorWithKernel::GetKernelType(ctx); + framework::OpKernelType kt = OperatorWithKernel::GetActualKernelType(ctx); // CompareOp kernel's device type is decided by input tensor place kt.place_ = ctx.Input("X")->place(); return kt; diff --git a/paddle/operators/compare_op.cu b/paddle/operators/compare_op.cu index 596a878bcf..26049271be 100644 --- a/paddle/operators/compare_op.cu +++ b/paddle/operators/compare_op.cu @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/compare_op.h" diff --git a/paddle/operators/compare_op.h b/paddle/operators/compare_op.h index a56536e155..567e89c0a7 100644 --- a/paddle/operators/compare_op.h +++ b/paddle/operators/compare_op.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index cf522d6921..32b61edfd0 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -58,7 +58,7 @@ class ConcatOp : public framework::OperatorWithKernel { class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { public: - ConcatOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + ConcatOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input tensors of concat operator.").AsDuplicable(); AddOutput("Out", "Output tensor of concat operator."); @@ -98,8 +98,8 @@ class ConcatOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(concat, ops::ConcatOp, ops::ConcatOpMaker, concat_grad, - ops::ConcatOpGrad) +REGISTER_OP_EX(concat, ops::ConcatOp, ops::ConcatOpMaker, concat_grad, + ops::ConcatOpGrad, false) REGISTER_OP_CPU_KERNEL(concat, ops::ConcatKernel) REGISTER_OP_CPU_KERNEL(concat_grad, diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index b809bdc3a0..455fbd8ca3 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/cond_op.h" - #include "paddle/operators/gather.h" #include "paddle/operators/scatter.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -193,20 +193,22 @@ void CondOp::MergeDataFromSubnet(const framework::Scope& scope, } } -void CondOp::Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const { +void CondOp::Run(const Scope& scope, const platform::Place& place) const { + // get device context from pool + platform::DeviceContextPool& pool = platform::DeviceContextPool::Get(); + auto& dev_ctx = *pool.Borrow(place); + PrepareDataForSubnet(scope, dev_ctx); std::vector& sub_scopes = GetSubScopes(scope); for (int i = 0; i < BRANCH_NUM; ++i) { - sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); + sub_net_op_[i]->Run(*sub_scopes[i], place); } MergeDataFromSubnet(scope, dev_ctx); } class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { public: - CondOpProtoAndCheckerMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + CondOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Cond", "The condition, which is a bool vector"); AddInput("Xs", "Inputs of Subnets").AsDuplicable(); diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h index 93121fb31b..7dcdc47e0b 100644 --- a/paddle/operators/cond_op.h +++ b/paddle/operators/cond_op.h @@ -78,7 +78,7 @@ class CondOp : public framework::OperatorBase { } void Run(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const override; + const platform::Place& place) const override; private: const int TRUE_BRANCH = 0; diff --git a/paddle/operators/conditional_block_op.cc b/paddle/operators/conditional_block_op.cc index 03c58a7eab..3cae61a438 100644 --- a/paddle/operators/conditional_block_op.cc +++ b/paddle/operators/conditional_block_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include "paddle/framework/executor.h" #include "paddle/framework/op_registry.h" @@ -51,7 +51,7 @@ class ConditionalBlockOp : public ConditionalOp { const framework::AttributeMap &attrs) : ConditionalOp(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto xs = InputTensors(scope); bool need_run = std::all_of( xs.begin(), xs.end(), @@ -65,8 +65,8 @@ class ConditionalBlockOp : public ConditionalOp { scopes->front() = &scope.NewScope(); auto &cur_scope = *scopes->front(); - auto *block = Attr("block"); - framework::Executor exec(dev_ctx); + framework::Executor exec(dev_place); + auto *block = Attr("sub_block"); exec.Run(*block->Program(), &cur_scope, block->ID(), false); } } @@ -74,8 +74,7 @@ class ConditionalBlockOp : public ConditionalOp { class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ConditionalBlockOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ConditionalBlockOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The conditional variable of this operator. If X is empty, the " @@ -87,8 +86,8 @@ class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { "(std::vector) The step scope of conditional block. To " "unify the conditional block, rnn and while op, the type of " "scope is std::vector"); - AddAttr( - "block", "The step block of conditional block operator"); + AddAttr( + "sub_block", "The step block of conditional block operator"); AddComment(R"DOC(Conditional block operator Run the sub-block if X is not empty. Params is the other inputs and Out is the @@ -105,7 +104,7 @@ class ConditionalBlockGradOp : public ConditionalOp { const framework::AttributeMap &attrs) : ConditionalOp(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto xs = this->InputTensors(scope); bool need_run = std::all_of( xs.begin(), xs.end(), @@ -117,21 +116,21 @@ class ConditionalBlockGradOp : public ConditionalOp { auto &scopes = scope_var->Get>(); framework::Scope &cur_scope = *scopes[0]; - auto *block = Attr("block"); - framework::Executor exec(dev_ctx); + framework::Executor exec(dev_place); + auto *block = Attr("sub_block"); exec.Run(*block->Program(), &cur_scope, block->ID(), false); - AssignLocalGradientToGlobal(dev_ctx, cur_scope, Inputs("Params"), + AssignLocalGradientToGlobal(dev_place, cur_scope, Inputs("Params"), Outputs(framework::GradVarName("Params"))); - AssignLocalGradientToGlobal(dev_ctx, cur_scope, Inputs("X"), + AssignLocalGradientToGlobal(dev_place, cur_scope, Inputs("X"), Outputs(framework::GradVarName("X"))); } } private: void AssignLocalGradientToGlobal( - const platform::DeviceContext &dev_ctx, const framework::Scope &cur_scope, + const platform::Place &place, const framework::Scope &cur_scope, const std::vector &p_names, const std::vector &pg_names) const { for (size_t i = 0; i < p_names.size(); ++i) { @@ -145,7 +144,7 @@ class ConditionalBlockGradOp : public ConditionalOp { auto assign = framework::OpRegistry::CreateOp( "assign", {{"X", {new_in_grad_name}}}, {{"Out", {out_grad_name}}}, framework::AttributeMap{}); - assign->Run(cur_scope, dev_ctx); + assign->Run(cur_scope, place); cur_scope.Rename(new_in_grad_name, in_grad_name); } } @@ -171,18 +170,19 @@ class ConditionalBlockGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto grad_op = new framework::OpDesc(); grad_op->SetType("conditional_block_grad"); grad_op->SetInput("X", Input("X")); grad_op->SetInput("Params", Input("Params")); grad_op->SetInput("Out", Output("Out")); grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); grad_op->SetInput("Scope", Output("Scope")); - grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); - grad_op->SetOutput(framework::GradVarName("Params"), InputGrad("Params")); - grad_op->SetBlockAttr("block", *this->grad_block_[0]); - return std::unique_ptr(grad_op); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X", false)); + grad_op->SetOutput(framework::GradVarName("Params"), + InputGrad("Params", false)); + grad_op->SetBlockAttr("sub_block", *this->grad_block_[0]); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/conv_cudnn_op.cc b/paddle/operators/conv_cudnn_op.cc index 008bf01885..84d9ce1973 100644 --- a/paddle/operators/conv_cudnn_op.cc +++ b/paddle/operators/conv_cudnn_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_op.h" @@ -19,8 +19,7 @@ namespace operators { class CudnnConv2DOpMaker : public Conv2DOpMaker { public: - CudnnConv2DOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + CudnnConv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) : Conv2DOpMaker(proto, op_checker) { AddAttr("workspace_size_MB", "workspace size for cudnn, in MB, " @@ -34,8 +33,7 @@ class CudnnConv2DOpMaker : public Conv2DOpMaker { class CudnnConv3DOpMaker : public Conv3DOpMaker { public: - CudnnConv3DOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + CudnnConv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker) : Conv3DOpMaker(proto, op_checker) { AddAttr("workspace_size_MB", "workspace size for cudnn, in MB, " diff --git a/paddle/operators/conv_cudnn_op.cu.cc b/paddle/operators/conv_cudnn_op.cu.cc index 3da0a9001a..0aa7dd48ca 100644 --- a/paddle/operators/conv_cudnn_op.cu.cc +++ b/paddle/operators/conv_cudnn_op.cu.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" @@ -36,7 +36,7 @@ class CudnnConvOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto* input = ctx.Input("Input"); auto* filter = ctx.Input("Filter"); auto* output = ctx.Output("Output"); @@ -130,7 +130,7 @@ class CudnnConvOpKernel : public framework::OpKernel { handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); // Allocate on GPU memory - platform::GPUPlace gpu = boost::get(ctx.GetPlace()); + platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv forward --------------------- T alpha = 1.0f, beta = 0.0f; @@ -151,7 +151,7 @@ class CudnnConvGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto input = ctx.Input("Input"); auto filter = ctx.Input("Filter"); auto output_grad = ctx.Input(framework::GradVarName("Output")); @@ -277,7 +277,7 @@ class CudnnConvGradOpKernel : public framework::OpKernel { // ------------------- cudnn conv workspace --------------------- // Already on GPU void* cudnn_workspace = nullptr; - platform::GPUPlace gpu = boost::get(ctx.GetPlace()); + platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv backward data --------------------- T alpha = 1.0f, beta = 0.0f; @@ -315,6 +315,10 @@ class CudnnConvGradOpKernel : public framework::OpKernel { } // namespace operators } // namespace paddle +REGISTER_OP_KERNEL(conv2d, CUDNN, paddle::platform::CUDAPlace, + paddle::operators::CudnnConvOpKernel, + paddle::operators::CudnnConvOpKernel); + REGISTER_OP_CUDA_KERNEL(conv2d_cudnn, paddle::operators::CudnnConvOpKernel, paddle::operators::CudnnConvOpKernel); diff --git a/paddle/operators/conv_op.cc b/paddle/operators/conv_op.cc index 7ef805fd44..ab52a41b53 100644 --- a/paddle/operators/conv_op.cc +++ b/paddle/operators/conv_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_op.h" @@ -66,8 +66,7 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); } -Conv2DOpMaker::Conv2DOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) +Conv2DOpMaker::Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "Input", @@ -138,8 +137,7 @@ $$ )DOC"); } -Conv3DOpMaker::Conv3DOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) +Conv3DOpMaker::Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "Input", diff --git a/paddle/operators/conv_op.cu.cc b/paddle/operators/conv_op.cu.cc index 38615a8bef..4f942444f3 100644 --- a/paddle/operators/conv_op.cu.cc +++ b/paddle/operators/conv_op.cu.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_op.h" diff --git a/paddle/operators/conv_op.h b/paddle/operators/conv_op.h index 749258183b..83786e2329 100644 --- a/paddle/operators/conv_op.h +++ b/paddle/operators/conv_op.h @@ -50,14 +50,12 @@ inline bool IsExpand(std::vector& filter_dim, // operator implementations can reuse the code. class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv2DOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker); + Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker); }; class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv3DOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker); + Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker); }; class ConvOp : public framework::OperatorWithKernel { @@ -261,8 +259,12 @@ class GemmConvGradKernel : public framework::OpKernel { if (input_grad) { input_grad->mutable_data(context.GetPlace()); - set_zero(dev_ctx, input_grad, static_cast(0)); + // if is_expand is false, the operation of set_zero is unnecessary, + // because math::matmul will reset input_grad. + if (is_expand) { + set_zero(dev_ctx, input_grad, static_cast(0)); + } math::Col2VolFunctor col2vol; math::Col2ImFunctor col2im; diff --git a/paddle/operators/conv_shift_op.cc b/paddle/operators/conv_shift_op.cc index a4150a5664..106b68a0a0 100644 --- a/paddle/operators/conv_shift_op.cc +++ b/paddle/operators/conv_shift_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_shift_op.h" #include "paddle/framework/eigen.h" @@ -75,8 +75,7 @@ class ConvShiftGradOp : public framework::OperatorWithKernel { class ConvShiftOpMaker : public framework::OpProtoAndCheckerMaker { public: - ConvShiftOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ConvShiftOpMaker(OpProto *proto, OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape B x M, " diff --git a/paddle/operators/conv_shift_op.cu b/paddle/operators/conv_shift_op.cu index f7ca82ce26..cf7abc196e 100644 --- a/paddle/operators/conv_shift_op.cu +++ b/paddle/operators/conv_shift_op.cu @@ -1,16 +1,16 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_shift_op.h" #include "paddle/operators/math/math_function.h" diff --git a/paddle/operators/conv_shift_op.h b/paddle/operators/conv_shift_op.h index 1a70b38a0d..6781d87ef0 100644 --- a/paddle/operators/conv_shift_op.h +++ b/paddle/operators/conv_shift_op.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/conv_transpose_cudnn_op.cc b/paddle/operators/conv_transpose_cudnn_op.cc index 4cb6a2ccff..2e5333a265 100644 --- a/paddle/operators/conv_transpose_cudnn_op.cc +++ b/paddle/operators/conv_transpose_cudnn_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_transpose_op.h" @@ -19,11 +19,8 @@ namespace operators { class CudnnConv2DTransposeOpMaker : public Conv2DTransposeOpMaker { public: - CudnnConv2DTransposeOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + CudnnConv2DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) : Conv2DTransposeOpMaker(proto, op_checker) { - AddAttr>("dilations", "dilations of convolution operator.") - .SetDefault({1, 1}); AddAttr("workspace_size_MB", "workspace size for cudnn, in MB, " "workspace is a section of GPU memory which will be " @@ -36,11 +33,8 @@ class CudnnConv2DTransposeOpMaker : public Conv2DTransposeOpMaker { class CudnnConv3DTransposeOpMaker : public Conv3DTransposeOpMaker { public: - CudnnConv3DTransposeOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + CudnnConv3DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) : Conv3DTransposeOpMaker(proto, op_checker) { - AddAttr>("dilations", "dilations of convolution operator.") - .SetDefault({1, 1, 1}); AddAttr("workspace_size_MB", "workspace size for cudnn, in MB, " "workspace is a section of GPU memory which will be " diff --git a/paddle/operators/conv_transpose_cudnn_op.cu.cc b/paddle/operators/conv_transpose_cudnn_op.cu.cc index f0297f6c40..fc37776ba1 100644 --- a/paddle/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/operators/conv_transpose_cudnn_op.cu.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" @@ -35,7 +35,7 @@ class CudnnConvTransposeOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto* input = ctx.Input("Input"); auto* filter = ctx.Input("Filter"); auto* output = ctx.Output("Output"); @@ -100,7 +100,7 @@ class CudnnConvTransposeOpKernel : public framework::OpKernel { cudnn_output_desc, algo, &workspace_size_in_bytes)); // Allocate on GPU memory - platform::GPUPlace gpu = boost::get(ctx.GetPlace()); + platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv transpose forward --------------------- @@ -120,7 +120,7 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto input = ctx.Input("Input"); auto filter = ctx.Input("Filter"); auto output_grad = ctx.Input(framework::GradVarName("Output")); @@ -201,7 +201,7 @@ class CudnnConvTransposeGradOpKernel : public framework::OpKernel { // ------------------- cudnn conv workspace --------------------- // Already on GPU void* cudnn_workspace = nullptr; - platform::GPUPlace gpu = boost::get(ctx.GetPlace()); + platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv backward data --------------------- // FIXME(typhoonzero): template type T may not be the same as cudnn call. diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/operators/conv_transpose_op.cc index ca063e94bb..74636d138f 100644 --- a/paddle/operators/conv_transpose_op.cc +++ b/paddle/operators/conv_transpose_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_transpose_op.h" @@ -29,6 +29,7 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { auto filter_dims = ctx->GetInputDim("Filter"); std::vector strides = ctx->Attrs().Get>("strides"); std::vector paddings = ctx->Attrs().Get>("paddings"); + std::vector dilations = ctx->Attrs().Get>("dilations"); PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5, "ConvTransposeOp intput should be 4-D or 5-D tensor."); @@ -41,20 +42,24 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), "ConvTransposeOp paddings dimension and strides " "dimension should be the same."); + PADDLE_ENFORCE_EQ(paddings.size(), dilations.size(), + "ConvTransposeOp paddings dimension and dilations " + "dimension should be the same."); PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[0], "In ConvTransposeOp, The input channel should be the same " "as the number of filters."); std::vector output_shape({in_dims[0], filter_dims[1]}); for (size_t i = 0; i < strides.size(); ++i) { + auto filter_extent = dilations[i] * (filter_dims[i + 2] - 1) + 1; output_shape.push_back((in_dims[i + 2] - 1) * strides[i] - 2 * paddings[i] + - filter_dims[i + 2]); + filter_extent); } ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); } -Conv2DTransposeOpMaker::Conv2DTransposeOpMaker( - framework::OpProto* proto, framework::OpAttrChecker* op_checker) +Conv2DTransposeOpMaker::Conv2DTransposeOpMaker(OpProto* proto, + OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "Input", @@ -73,6 +78,12 @@ Conv2DTransposeOpMaker::Conv2DTransposeOpMaker( AddOutput("Output", "(Tensor) The output tensor of convolution transpose operator. " "The format of output tensor is also NCHW."); + + AddAttr>("dilations", + "(vector default:{1, 1}), the " + "dilations(h_dilation, w_dilation) of convolution " + "transpose operator.") + .SetDefault({1, 1}); AddAttr>( "strides", "(vector default:{1, 1}), the strides(h_stride, w_stride) of " @@ -87,7 +98,7 @@ Conv2DTransposeOpMaker::Conv2DTransposeOpMaker( Convolution2D Transpose Operator. The convolution transpose operation calculates the output based on the input, filter -and strides, paddings, groups parameters. The size of each dimension of the +and dilations, strides, paddings, groups parameters. The size of each dimension of the parameters is checked in the infer-shape. Input(Input) and output(Output) are in NCHW format. Where N is batchsize, C is the number of channels, H is the height of the feature, and W is the width of the feature. @@ -112,8 +123,8 @@ Example: )DOC"); } -Conv3DTransposeOpMaker::Conv3DTransposeOpMaker( - framework::OpProto* proto, framework::OpAttrChecker* op_checker) +Conv3DTransposeOpMaker::Conv3DTransposeOpMaker(OpProto* proto, + OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Input", "(Tensor) The input tensor of convolution transpose operator." @@ -136,6 +147,13 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker( "Where N is batch size, C is " "the number of channels, D is the depth of the feature, H is the " "height of the feature, and W is the width of the feature."); + + AddAttr>( + "dilations", + "(vector default:{1, 1, 1}), the " + "dilations(d_dilation,h_dilation, w_dilation) of convolution " + "transpose operator.") + .SetDefault({1, 1, 1}); AddAttr>("strides", "(vector default:{1, 1, 1}), the " "strides{d_stride, h_stride, w_stride} of " @@ -149,7 +167,7 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker( Convolution3D Transpose Operator. The convolution transpose operation calculates the output based on the input, filter -and strides, paddings, groups parameters. The size of each dimension of the +and dilations, strides, paddings, groups parameters. The size of each dimension of the parameters is checked in the infer-shape. Input(Input) and output(Output) are in NCDHW format. Where N is batch size, C is the number of channels, D is the depth of the feature, H is the height of the feature, diff --git a/paddle/operators/conv_transpose_op.cu.cc b/paddle/operators/conv_transpose_op.cu.cc index b91ebd7922..f1d827c606 100644 --- a/paddle/operators/conv_transpose_op.cu.cc +++ b/paddle/operators/conv_transpose_op.cu.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/conv_transpose_op.h" diff --git a/paddle/operators/conv_transpose_op.h b/paddle/operators/conv_transpose_op.h index 80600b5361..4c8f8a8067 100644 --- a/paddle/operators/conv_transpose_op.h +++ b/paddle/operators/conv_transpose_op.h @@ -30,14 +30,12 @@ using DDim = framework::DDim; // operator implementations can reuse the code. class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv2DTransposeOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker); + Conv2DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker); }; class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv3DTransposeOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker); + Conv3DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker); }; class ConvTransposeOp : public framework::OperatorWithKernel { @@ -63,6 +61,7 @@ class GemmConvTransposeKernel : public framework::OpKernel { std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + std::vector dilations = context.Attr>("dilations"); // groups will alway be disabled in conv2dtranspose. const int batch_size = static_cast(input->dims()[0]); @@ -115,7 +114,6 @@ class GemmConvTransposeKernel : public framework::OpKernel { math::Col2ImFunctor col2im; math::Col2VolFunctor col2vol; - std::vector dilations({1, 1, 1}); // convolution transpose: gemm + col2im or col2vol (similar to conv-backward // on input) @@ -167,6 +165,7 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + std::vector dilations = context.Attr>("dilations"); const int batch_size = static_cast(input->dims()[0]); @@ -221,11 +220,9 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { math::Im2ColFunctor im2col; math::Vol2ColFunctor vol2col; - std::vector dilations({1, 1, 1}); if (input_grad) { input_grad->mutable_data(context.GetPlace()); - set_zero(dev_ctx, input_grad, static_cast(0)); } if (filter_grad) { // filter size (m, c, k_h, k_w) filter_grad->mutable_data(context.GetPlace()); diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index ab9cf745e3..80e0780030 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/cos_sim_op.h" @@ -62,7 +62,7 @@ class CosSimOp : public framework::OperatorWithKernel { class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { public: - CosSimOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + CosSimOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The 1st input of cos_sim op."); AddInput("Y", "The 2nd input of cos_sim op."); @@ -162,7 +162,7 @@ struct CosSimDyFunctor { dy_(dy), cols_(static_cast(cols)) {} - inline void operator()(size_t offset) const { + inline HOSTDEVICE void operator()(size_t offset) const { auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; auto dz = dz_[offset]; auto z = z_[offset]; diff --git a/paddle/operators/cos_sim_op.cu b/paddle/operators/cos_sim_op.cu index eacac68bac..88f49c1b14 100644 --- a/paddle/operators/cos_sim_op.cu +++ b/paddle/operators/cos_sim_op.cu @@ -1,19 +1,20 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/cos_sim_op.h" +#include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { @@ -31,7 +32,7 @@ struct CosSimDyFunctor { dy_(dy), cols_(static_cast(cols)) {} - inline void operator()(size_t offset) const { + inline HOSTDEVICE void operator()(size_t offset) const { auto xy_norm_prod = x_norm_[offset] * y_norm_[0]; auto dz = dz_[offset]; auto z = z_[offset]; @@ -43,7 +44,8 @@ struct CosSimDyFunctor { for (size_t i = 0; i < cols_; ++i) { T dy = dz * (x[i] * reciprocal_xy_norm_prod - z * y_[i] * reciprocal_y_norm_square); - paddle::paddleAtomicAdd(dy_ + i, dy) + // platform::CudaAtomicAdd(dy_ + i, dy); + dy_[i] += dy; } } diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 8b2a06a41b..bb7c893a29 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -1,40 +1,27 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/op_registry.h" -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; -template -struct CosSimDyFunctor { - CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, - const T* z, const T* dz, T* dy, int cols); - inline void operator()(size_t) const; -}; - -template -static void ForEachZip(size_t num, Callback callback) { - for (size_t i = 0; i < num; ++i) { - callback(i); - } -} - template struct CosSimFunctor { CosSimFunctor(const T* x, const T* y, T* x_norm, T* y_norm, T* z, int cols) @@ -50,10 +37,13 @@ struct CosSimFunctor { T xx = 0, xy = 0, yy = 0; if (same_row) { auto* y = y_ + cols_ * offset; + T tep_x, tep_y; for (size_t i = 0; i < cols_; ++i) { - xx += x[i] * x[i]; - yy += y[i] * y[i]; - xy += x[i] * y[i]; + tep_x = x[i]; + tep_y = y[i]; + xx += tep_x * tep_x; + yy += tep_y * tep_y; + xy += tep_x * tep_y; } xx = sqrt(xx); yy = sqrt(yy); @@ -61,14 +51,17 @@ struct CosSimFunctor { x_norm_[offset] = xx; z_[offset] = xy / (xx * yy); } else { // This can be wrote in a better way. + T tep_x, tep_y; for (size_t i = 0; i < cols_; ++i) { - xx += x[i] * x[i]; - yy += y_[i] * y_[i]; // only need - xy += x[i] * y_[i]; + tep_x = x[i]; + tep_y = y_[i]; + xx += tep_x * tep_x; + yy += tep_y * tep_y; // only need + xy += tep_x * tep_y; } xx = sqrt(xx); yy = sqrt(yy); - y_norm_[0] = yy; + if (offset == 0) y_norm_[0] = yy; x_norm_[offset] = xx; z_[offset] = xy / (xx * yy); } @@ -105,12 +98,16 @@ class CosSimKernel : public framework::OpKernel { CosSimFunctor functor( in_x->data(), in_y->data(), out_x_norm->data(), out_y_norm->data(), out_z->data(), cols); - ForEachZip(rows_x, functor); + platform::ForRange for_range( + static_cast(context.device_context()), rows_x); + for_range(functor); } else { CosSimFunctor functor( in_x->data(), in_y->data(), out_x_norm->data(), out_y_norm->data(), out_z->data(), cols); - ForEachZip(rows_x, functor); + platform::ForRange for_range( + static_cast(context.device_context()), rows_x); + for_range(functor); } } }; @@ -194,6 +191,13 @@ struct CosSimDxFunctor { const size_t cols_; }; +template +struct CosSimDyFunctor { + CosSimDyFunctor(const T* x_norm, const T* y_norm, const T* x, const T* y, + const T* z, const T* dz, T* dy, int cols); + inline HOSTDEVICE void operator()(size_t) const; +}; + template class CosSimGradKernel : public framework::OpKernel { public: @@ -219,14 +223,20 @@ class CosSimGradKernel : public framework::OpKernel { in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), out_grad_x->mutable_data(context.GetPlace()), cols); - ForEachZip(rows_x, functor); + platform::ForRange for_range( + static_cast(context.device_context()), + rows_x); + for_range(functor); } if (out_grad_y) { CosSimGradFunctor functor( in_y_norm->data(), in_x_norm->data(), in_y->data(), in_x->data(), in_z->data(), in_grad_z->data(), out_grad_y->mutable_data(context.GetPlace()), cols); - ForEachZip(rows_x, functor); + platform::ForRange for_range( + static_cast(context.device_context()), + rows_x); + for_range(functor); } } else { if (out_grad_x) { @@ -234,7 +244,10 @@ class CosSimGradKernel : public framework::OpKernel { in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), out_grad_x->mutable_data(context.GetPlace()), cols); - ForEachZip(rows_x, functor); + platform::ForRange for_range( + static_cast(context.device_context()), + rows_x); + for_range(functor); } if (out_grad_y) { out_grad_y->mutable_data(context.GetPlace()); @@ -246,7 +259,10 @@ class CosSimGradKernel : public framework::OpKernel { in_x_norm->data(), in_y_norm->data(), in_x->data(), in_y->data(), in_z->data(), in_grad_z->data(), out_grad_y->data(), cols); - ForEachZip(rows_x, functor); + platform::ForRange for_range( + static_cast(context.device_context()), + rows_x); + for_range(functor); } } } diff --git a/paddle/operators/crf_decoding_op.cc b/paddle/operators/crf_decoding_op.cc index 1ce189fa6e..024e1d061a 100644 --- a/paddle/operators/crf_decoding_op.cc +++ b/paddle/operators/crf_decoding_op.cc @@ -18,8 +18,7 @@ namespace paddle { namespace operators { class CRFDecodingOpMaker : public framework::OpProtoAndCheckerMaker { public: - CRFDecodingOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + CRFDecodingOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Emission", "(LoDTensor, default: LoDTensor). A LoDTensor with shape " @@ -121,12 +120,18 @@ class CRFDecodingOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Emission")->type()), ctx.device_context()); } + + framework::OpKernelType GetExpectedKernelType( + const framework::OpKernelType& actual_kernel_type) const override { + return framework::OpKernelType(actual_kernel_type.data_type_, + platform::CPUPlace()); + } }; } // namespace operators } // namespace paddle diff --git a/paddle/operators/crop_op.cc b/paddle/operators/crop_op.cc index 7c2a0ac7a7..310e351443 100644 --- a/paddle/operators/crop_op.cc +++ b/paddle/operators/crop_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/crop_op.h" #include @@ -52,7 +52,7 @@ class CropOp : public framework::OperatorWithKernel { class CropOpMaker : public framework::OpProtoAndCheckerMaker { public: - CropOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + CropOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of pad op. " @@ -88,7 +88,8 @@ There are two ways to set shape: The input should be a k-D tensor(k > 0 and k < 7). As an example: -Given: +Case 1: +Given X = [[0, 1, 2, 0, 0] [0, 3, 4, 0, 0] @@ -107,6 +108,27 @@ we get: Out = [[1, 2], [3, 4]]. + +Case 2: +Given + + X = [[0, 1, 2, 5, 0] + [0, 3, 4, 6, 0] + [0, 0, 0, 0, 0]], + +and + + offsets = [0, 1], + +and + + Y = [[0, 0, 0] + [0, 0, 0]], + +we get: + + Out = [[1, 2, 5], + [3, 4, 6]]. )DOC"); } }; diff --git a/paddle/operators/crop_op.cu b/paddle/operators/crop_op.cu index 90fd83ca10..bba5db4c6c 100644 --- a/paddle/operators/crop_op.cu +++ b/paddle/operators/crop_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/crop_op.h" diff --git a/paddle/operators/crop_op.h b/paddle/operators/crop_op.h index d531a19c78..69d1a92977 100644 --- a/paddle/operators/crop_op.h +++ b/paddle/operators/crop_op.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 CropdleCropdle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 2b06012b69..a9c5c7046f 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -51,7 +51,7 @@ class CrossEntropyOp : public framework::OperatorWithKernel { protected: // Explicitly set that the data type of computation kernel of cross_entropy // is determined by its input "X". - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -101,7 +101,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { protected: // Explicitly set that the data type of computation kernel of cross_entropy // is determined by its input "X". - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -111,8 +111,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { public: - CrossEntropyOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + CrossEntropyOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape N x D, " diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 0546964588..3b04894e6c 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/cross_entropy_op.h" diff --git a/paddle/operators/decayed_adagrad_op.cc b/paddle/operators/decayed_adagrad_op.cc index fd29c7270b..739a8d881c 100644 --- a/paddle/operators/decayed_adagrad_op.cc +++ b/paddle/operators/decayed_adagrad_op.cc @@ -55,8 +55,7 @@ class DecayedAdagradOp : public framework::OperatorWithKernel { class DecayedAdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - DecayedAdagradOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + DecayedAdagradOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); diff --git a/paddle/operators/decayed_adagrad_op.cu b/paddle/operators/decayed_adagrad_op.cu index 282b90f275..7bc8161f23 100644 --- a/paddle/operators/decayed_adagrad_op.cu +++ b/paddle/operators/decayed_adagrad_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/decayed_adagrad_op.h" diff --git a/paddle/operators/detail/recv_impl.cc b/paddle/operators/detail/recv_impl.cc index 89dc504522..b746f9df46 100644 --- a/paddle/operators/detail/recv_impl.cc +++ b/paddle/operators/detail/recv_impl.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "send_recv_impl.h" @@ -20,25 +20,57 @@ namespace detail { Status SendRecvServerImpl::SendVariable(ServerContext *context, const VariableMessage *in_var, - VariableMessage *out_var) { - framework::LoDTensor t; - // TODO(typhoonzero): desirealize in_tensor and run pserver network. + VoidMessage *out_var) { + // TODO(typhoonzero): support different variable types. std::istringstream iss(in_var->serialized()); + framework::LoDTensor t; framework::DeserializeFromStream(iss, &t); - lodtensor_queue_.Push(std::move(t)); - // Block util the sub graph is done. - t = lodtensor_return_queue_.Pop(); + TensorWithName tensor_with_name = + std::make_pair(in_var->varname(), std::move(t)); + + var_recv_queue_.Push(std::move(tensor_with_name)); + return Status::OK; +} + +Status SendRecvServerImpl::GetVariable(ServerContext *context, + const VariableMessage *in_var, + VariableMessage *out_var) { + std::string get_var_name = in_var->varname(); + auto *var = scope_->FindVar(get_var_name); + auto tensor = var->Get(); std::ostringstream oss; - // FIXME(typhoonzero): get context from op. - framework::SerializeToStream(oss, t, platform::CPUDeviceContext()); + framework::SerializeToStream(oss, tensor, platform::CPUDeviceContext()); + std::string *varname = out_var->mutable_varname(); - *varname = in_var->varname(); + *varname = get_var_name; std::string *serialized = out_var->mutable_serialized(); *serialized = oss.str(); + return Status::OK; +} +Status SendRecvServerImpl::Wait(ServerContext *context, + const VoidMessage *in_var, + VoidMessage *out_var) { + { + std::unique_lock lock(this->mutex_); + condition_.wait(lock, [=] { return this->done_ == true; }); + } return Status::OK; } +void SendRecvServerImpl::Reset() { + std::lock_guard lock(this->mutex_); + done_ = false; +} + +void SendRecvServerImpl::Done() { + { + std::lock_guard lock(this->mutex_); + done_ = true; + } + condition_.notify_all(); +} + } // namespace detail } // namespace operators } // namespace paddle diff --git a/paddle/operators/detail/safe_ref.h b/paddle/operators/detail/safe_ref.h index b71af17309..ff2a156f3d 100644 --- a/paddle/operators/detail/safe_ref.h +++ b/paddle/operators/detail/safe_ref.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/detail/send_impl.cc b/paddle/operators/detail/send_impl.cc index da1ddf75d2..a812fcf39b 100644 --- a/paddle/operators/detail/send_impl.cc +++ b/paddle/operators/detail/send_impl.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "send_recv_impl.h" @@ -19,10 +19,10 @@ namespace operators { namespace detail { bool RPCClient::SendVariable(const framework::Scope& scope, - const std::string& inname, - const std::string& outname) { + const std::string& inname) { ClientContext context; - VariableMessage msg, out_msg; + VariableMessage msg; + VoidMessage out_msg; // FIXME(typhoonzero): pass device context to here. auto ctx = platform::CPUDeviceContext(); auto* var = scope.FindVar(inname); @@ -37,9 +37,26 @@ bool RPCClient::SendVariable(const framework::Scope& scope, msg.set_serialized(oss.str()); Status status = stub_->SendVariable(&context, msg, &out_msg); if (!status.ok()) { + LOG(ERROR) << "gRPC error: " << status.error_message(); return false; } - std::istringstream iss(out_msg.serialized()); + return true; +} + +bool RPCClient::GetVariable(const framework::Scope& scope, + const std::string& outname) { + ClientContext context; + VariableMessage call_msg, ret_msg; + call_msg.set_varname(outname); + auto ctx = platform::CPUDeviceContext(); + Status status = stub_->GetVariable(&context, call_msg, &ret_msg); + if (!status.ok()) { + LOG(ERROR) << "gRPC error: " << status.error_message(); + return false; + } + + std::istringstream iss(ret_msg.serialized()); + framework::LoDTensor ret_tensor; framework::DeserializeFromStream(iss, &ret_tensor); auto* outvar = scope.FindVar(outname); @@ -49,6 +66,12 @@ bool RPCClient::SendVariable(const framework::Scope& scope, return true; } +void RPCClient::Wait() { + ClientContext context; + VoidMessage call_msg, ret_msg; + stub_->Wait(&context, call_msg, &ret_msg); +} + } // namespace detail } // namespace operators } // namespace paddle diff --git a/paddle/operators/detail/send_recv.proto b/paddle/operators/detail/send_recv.proto index 07ff9d2c62..95c8e70898 100644 --- a/paddle/operators/detail/send_recv.proto +++ b/paddle/operators/detail/send_recv.proto @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ syntax = "proto3"; @@ -19,7 +19,12 @@ package sendrecv; service SendRecvService { // For parameter server round-robin like hashing, do not split tensors. // Send and recv only one tensor - rpc SendVariable(VariableMessage) returns (VariableMessage) {} + // TODO(typhoonzero): add streaming API + rpc SendVariable(VariableMessage) returns (VoidMessage) {} + // Argument VariableMessage for GetVariable should only contain varname. + rpc GetVariable(VariableMessage) returns (VariableMessage) {} + // wait for one execution of the program + rpc Wait(VoidMessage) returns (VoidMessage) {} } // VariableMessage is serialized paddle variable message. diff --git a/paddle/operators/detail/send_recv_impl.h b/paddle/operators/detail/send_recv_impl.h index b9a5340a86..47f730f7ae 100644 --- a/paddle/operators/detail/send_recv_impl.h +++ b/paddle/operators/detail/send_recv_impl.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -20,10 +20,6 @@ #include "paddle/framework/selected_rows.h" #include "paddle/operators/detail/simple_block_queue.h" -// #include -// #include -// #include -// #include #include "paddle/operators/detail/send_recv.grpc.pb.h" #include "paddle/operators/detail/send_recv.pb.h" @@ -48,24 +44,34 @@ namespace paddle { namespace operators { namespace detail { +typedef std::pair TensorWithName; + class SendRecvServerImpl final : public SendRecvService::Service { public: explicit SendRecvServerImpl() {} Status SendVariable(ServerContext *context, const VariableMessage *in_var, - VariableMessage *out_var) override; + VoidMessage *out_var) override; + Status GetVariable(ServerContext *context, const VariableMessage *in_var, + VariableMessage *out_var) override; + Status Wait(ServerContext *context, const VoidMessage *in_var, + VoidMessage *out_var) override; + void Reset(); + void Done(); + void SetScope(framework::Scope *scope) { scope_ = scope; }; - const framework::LoDTensor Get() { return this->lodtensor_queue_.Pop(); } + const TensorWithName Get() { return this->var_recv_queue_.Pop(); } - void Push(const framework::LoDTensor &tensor) { - this->lodtensor_return_queue_.Push(tensor); - } + void Push(const TensorWithName &msg) { this->var_recv_queue_.Push(msg); } private: - SimpleBlockQueue lodtensor_queue_; - SimpleBlockQueue lodtensor_return_queue_; - SimpleBlockQueue selected_rows_queue_; - SimpleBlockQueue selected_rows_return_queue_; + // received variable from RPC, operators fetch variable from this queue. + SimpleBlockQueue var_recv_queue_; + framework::Scope *scope_; + // condition of the sub program + std::mutex mutex_; + bool done_; + std::condition_variable condition_; }; // RPCClient is a class to send tensors to pserver sub-network @@ -75,8 +81,9 @@ class RPCClient { RPCClient(std::shared_ptr channel) : stub_(SendRecvService::NewStub(channel)) {} - bool SendVariable(const framework::Scope &scope, const std::string &inname, - const std::string &outname); + bool SendVariable(const framework::Scope &scope, const std::string &inname); + bool GetVariable(const framework::Scope &scope, const std::string &outname); + void Wait(); private: std::unique_ptr stub_; diff --git a/paddle/operators/detail/simple_block_queue.h b/paddle/operators/detail/simple_block_queue.h index 4489921757..c7f5ff4b5f 100644 --- a/paddle/operators/detail/simple_block_queue.h +++ b/paddle/operators/detail/simple_block_queue.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/detail/strided_memcpy.h b/paddle/operators/detail/strided_memcpy.h index 068c82f399..9ed524d4dc 100644 --- a/paddle/operators/detail/strided_memcpy.h +++ b/paddle/operators/detail/strided_memcpy.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/ddim.h" @@ -35,7 +35,7 @@ struct StridedMemcpyFunctor { memory::Copy(cpu_place, dst, cpu_place, src, sizeof(T) * dst_dim.head); } else { #ifdef PADDLE_WITH_CUDA - auto& gpu_place = boost::get(place); + auto& gpu_place = boost::get(place); auto& cuda_ctx = reinterpret_cast(dev_ctx); memory::Copy(gpu_place, dst, gpu_place, src, sizeof(T) * dst_dim.head, diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index acd526ae80..35cb18797f 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/dropout_op.h" @@ -25,8 +25,6 @@ class DropoutOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); - PADDLE_ENFORCE_GE(ctx->Attrs().Get("dropout_prob"), 0); - PADDLE_ENFORCE_LE(ctx->Attrs().Get("dropout_prob"), 1); auto x_dims = ctx->GetInputDim("X"); ctx->SetOutputDim("Out", x_dims); @@ -40,15 +38,18 @@ class DropoutOp : public framework::OperatorWithKernel { template class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { public: - DropoutOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + DropoutOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of dropout op."); AddOutput("Out", "The output of dropout op."); AddOutput("Mask", "The random sampled dropout mask.").AsIntermediate(); AddAttr("dropout_prob", "Probability of setting units to zero.") - .SetDefault(.5f); + .SetDefault(.5f) + .AddCustomChecker([](const float& drop_p) { + PADDLE_ENFORCE(drop_p >= 0.0f && drop_p <= 1.0f, + "'dropout_prob' must be between 0.0 and 1.0."); + }); AddAttr("is_test", "True if in test phase.").SetDefault(false); AddAttr("seed", "Dropout random seed.").SetDefault(0); @@ -79,8 +80,6 @@ class DropoutOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) must not be null."); - PADDLE_ENFORCE_GE(ctx->Attrs().Get("dropout_prob"), 0); - PADDLE_ENFORCE_LE(ctx->Attrs().Get("dropout_prob"), 1); auto x_dims = ctx->GetInputDim("X"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_EQ(x_dims, out_dims, diff --git a/paddle/operators/dropout_op.cu b/paddle/operators/dropout_op.cu index 10c670751d..c56930336e 100644 --- a/paddle/operators/dropout_op.cu +++ b/paddle/operators/dropout_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include @@ -30,16 +30,15 @@ struct MaskGenerator { __host__ __device__ MaskGenerator(AttrType dropout_prob, int seed) : dropout_prob(dropout_prob), seed(seed) {} - __host__ __device__ T operator()(const unsigned int n) const { + inline __host__ __device__ T operator()(const unsigned int n) const { thrust::minstd_rand rng; rng.seed(seed); thrust::uniform_real_distribution dist(0, 1); rng.discard(n); if (dist(rng) < dropout_prob) { return static_cast(0); - } else { - return static_cast(1); } + return static_cast(1); } }; @@ -71,7 +70,7 @@ class GPUDropoutKernel : public framework::OpKernel { auto M = EigenMatrix::Reshape(*mask, 1); Y.device(place) = X * M; } else { - Y.device(place) = X * dropout_prob; + Y.device(place) = X * (1.0f - dropout_prob); } } }; diff --git a/paddle/operators/dropout_op.h b/paddle/operators/dropout_op.h index 84ad39f0bb..c90b8d277e 100644 --- a/paddle/operators/dropout_op.h +++ b/paddle/operators/dropout_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include @@ -57,7 +57,7 @@ class CPUDropoutKernel : public framework::OpKernel { auto Y = EigenMatrix::Reshape(*y, 1); auto& place = *context.template device_context().eigen_device(); - Y.device(place) = X * dropout_prob; + Y.device(place) = X * (1.0f - dropout_prob); } } }; diff --git a/paddle/operators/elementwise_add_op.cc b/paddle/operators/elementwise_add_op.cc index a62eeeeb95..70b7c9f2ec 100644 --- a/paddle/operators/elementwise_add_op.cc +++ b/paddle/operators/elementwise_add_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/elementwise_add_op.h" #include "paddle/operators/elementwise_op.h" @@ -19,8 +19,7 @@ namespace paddle { namespace operators { class ElementwiseAddOpMaker : public ElementwiseOpMaker { public: - ElementwiseAddOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ElementwiseAddOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { SetComment("Add", "$Out = X + Y$"); AddComment(comment_); diff --git a/paddle/operators/elementwise_add_op.cu b/paddle/operators/elementwise_add_op.cu index 78642bb424..641cea323a 100644 --- a/paddle/operators/elementwise_add_op.cu +++ b/paddle/operators/elementwise_add_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/elementwise_add_op.h" diff --git a/paddle/operators/elementwise_add_op.h b/paddle/operators/elementwise_add_op.h index 069bdaf0ab..59abbb57d1 100644 --- a/paddle/operators/elementwise_add_op.h +++ b/paddle/operators/elementwise_add_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/elementwise_div_op.cc b/paddle/operators/elementwise_div_op.cc index 1c3e9e70ee..1fa960866f 100644 --- a/paddle/operators/elementwise_div_op.cc +++ b/paddle/operators/elementwise_div_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/elementwise_div_op.h" #include "paddle/operators/elementwise_op.h" @@ -19,8 +19,7 @@ namespace paddle { namespace operators { class ElementwiseDivOpMaker : public ElementwiseOpMaker { public: - ElementwiseDivOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ElementwiseDivOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { SetComment("Div", "$Out = X / Y$"); AddComment(comment_); diff --git a/paddle/operators/elementwise_div_op.cu b/paddle/operators/elementwise_div_op.cu index 502c528936..a0372123d6 100644 --- a/paddle/operators/elementwise_div_op.cu +++ b/paddle/operators/elementwise_div_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/elementwise_div_op.h" diff --git a/paddle/operators/elementwise_div_op.h b/paddle/operators/elementwise_div_op.h index d91313db42..875abd313f 100644 --- a/paddle/operators/elementwise_div_op.h +++ b/paddle/operators/elementwise_div_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index aadb95cbe3..a6d1173619 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/elementwise_mul_op.h" #include "paddle/operators/elementwise_op.h" @@ -20,8 +20,7 @@ namespace operators { class ElementwiseMulOpMaker : public ElementwiseOpMaker { public: - ElementwiseMulOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ElementwiseMulOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { SetComment("Mul", "$Out = X \\odot\\ Y$"); AddComment(comment_); diff --git a/paddle/operators/elementwise_mul_op.cu b/paddle/operators/elementwise_mul_op.cu index 089451b3e1..f73e8afda9 100644 --- a/paddle/operators/elementwise_mul_op.cu +++ b/paddle/operators/elementwise_mul_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/elementwise_mul_op.h" diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h index 16fa5ec4b3..3ee50207c0 100644 --- a/paddle/operators/elementwise_mul_op.h +++ b/paddle/operators/elementwise_mul_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/operators/elementwise_op_function.h" diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index ea533503e4..f308ee05e1 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -43,8 +43,7 @@ class ElementwiseOp : public framework::OperatorWithKernel { class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { public: - ElementwiseOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ElementwiseOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The first input tensor of elementwise op"); AddInput("Y", "(Tensor) The second input tensor of elementwise op"); diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 7ebfc7df8c..560247cb10 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" @@ -103,10 +103,12 @@ class MidWiseTransformIterator { MidWiseTransformIterator& operator++() { ++j_; - i_ = j_ / post_; - if (UNLIKELY(i_ == n_)) { + if (UNLIKELY(j_ == post_)) { + ++i_; j_ = 0; - i_ = 0; + if (UNLIKELY(i_ == n_)) { + i_ = 0; + } } return *this; } @@ -125,10 +127,10 @@ class MidWiseTransformIterator { private: const T* ptr_; - int i_; + int64_t i_; int64_t j_; int64_t n_; - int post_; + int64_t post_; }; #ifdef __NVCC__ diff --git a/paddle/operators/elementwise_sub_op.cc b/paddle/operators/elementwise_sub_op.cc index 3e4d19361e..2a8d0845b1 100644 --- a/paddle/operators/elementwise_sub_op.cc +++ b/paddle/operators/elementwise_sub_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/elementwise_sub_op.h" #include "paddle/operators/elementwise_op.h" @@ -19,8 +19,7 @@ namespace paddle { namespace operators { class ElementwiseSubOpMaker : public ElementwiseOpMaker { public: - ElementwiseSubOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ElementwiseSubOpMaker(OpProto* proto, OpAttrChecker* op_checker) : ElementwiseOpMaker(proto, op_checker) { SetComment("Sub", "$Out = X - Y$"); AddComment(comment_); diff --git a/paddle/operators/elementwise_sub_op.cu b/paddle/operators/elementwise_sub_op.cu index 0b2f0f7d4d..7a2516ef6a 100644 --- a/paddle/operators/elementwise_sub_op.cu +++ b/paddle/operators/elementwise_sub_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/elementwise_sub_op.h" diff --git a/paddle/operators/elementwise_sub_op.h b/paddle/operators/elementwise_sub_op.h index 731a30c5e3..66edf8672d 100644 --- a/paddle/operators/elementwise_sub_op.h +++ b/paddle/operators/elementwise_sub_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/operators/elementwise_op_function.h" diff --git a/paddle/operators/expand_op.cc b/paddle/operators/expand_op.cc index 8b3cddbb94..08fa91ed72 100644 --- a/paddle/operators/expand_op.cc +++ b/paddle/operators/expand_op.cc @@ -55,7 +55,7 @@ class ExpandOp : public framework::OperatorWithKernel { class ExpandOpMaker : public framework::OpProtoAndCheckerMaker { public: - ExpandOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + ExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor, default Tensor) A tensor with rank in [1, 6]." diff --git a/paddle/operators/expand_op.cu b/paddle/operators/expand_op.cu index 99ee584d08..84e8fa567b 100644 --- a/paddle/operators/expand_op.cu +++ b/paddle/operators/expand_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU diff --git a/paddle/operators/expand_op.h b/paddle/operators/expand_op.h index 14ef8b0912..1d9012cd4a 100644 --- a/paddle/operators/expand_op.h +++ b/paddle/operators/expand_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - You may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index ee43c22fb1..cecbb7226a 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/feed_fetch_type.h" #include "paddle/framework/op_registry.h" @@ -25,7 +25,7 @@ class FeedOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto feed_var_name = Input("X"); auto *feed_var = scope.FindVar(feed_var_name); @@ -47,15 +47,19 @@ class FeedOp : public framework::OperatorBase { auto &feed_list = feed_var->Get(); auto &feed_item = feed_list.at(static_cast(col)); auto *out_item = out_var->GetMutable(); - framework::CopyFrom(feed_item, dev_ctx.GetPlace(), dev_ctx, out_item); + + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + + framework::CopyFrom(feed_item, place, dev_ctx, out_item); out_item->set_lod(feed_item.lod()); } }; class FeedOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FeedOpInfoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + FeedOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of feed op"); AddOutput("Out", "The output of feed op"); diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 1ae07194c2..fa20a06540 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -1,19 +1,20 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/feed_fetch_type.h" #include "paddle/framework/op_registry.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -26,7 +27,7 @@ class FetchOp : public framework::OperatorBase { : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto fetch_var_name = Input("X"); auto *fetch_var = scope.FindVar(fetch_var_name); PADDLE_ENFORCE(fetch_var != nullptr, @@ -51,6 +52,9 @@ class FetchOp : public framework::OperatorBase { // FIXME(yuyang18): Should we assume the fetch operator always generate // CPU outputs? + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + CopyFrom(src_item, platform::CPUPlace(), dev_ctx, &dst_item); dev_ctx.Wait(); dst_item.set_lod(src_item.lod()); @@ -61,8 +65,7 @@ class FetchOp : public framework::OperatorBase { class FetchOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FetchOpInfoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + FetchOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of fetch op"); AddOutput("Out", "The output of fetch op"); diff --git a/paddle/operators/fill_constant_batch_size_like_op.cc b/paddle/operators/fill_constant_batch_size_like_op.cc index 7fb74e2b95..852ecdfe45 100644 --- a/paddle/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/operators/fill_constant_batch_size_like_op.cc @@ -49,10 +49,10 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.device_context()); } }; @@ -60,13 +60,12 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel { class FillConstantBatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillConstantBatchSizeLikeOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + FillConstantBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); AddInput("Input", "(Tensor) Tensor " "whose dim_idx th dimension is used to specify the batch_size"); diff --git a/paddle/operators/fill_constant_batch_size_like_op.cu.cc b/paddle/operators/fill_constant_batch_size_like_op.cu.cc index 2e0e15f36b..608f4b9162 100644 --- a/paddle/operators/fill_constant_batch_size_like_op.cu.cc +++ b/paddle/operators/fill_constant_batch_size_like_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/fill_constant_batch_size_like_op.h" #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc index 3d5f84bc23..fe0706c4a9 100644 --- a/paddle/operators/fill_constant_op.cc +++ b/paddle/operators/fill_constant_op.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/framework/data_type.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/math/math_function.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -33,8 +34,9 @@ class FillConstantOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { - auto data_type = static_cast(Attr("dtype")); + const platform::Place &dev_place) const override { + auto data_type = + static_cast(Attr("dtype")); auto value = Attr("value"); auto force_cpu = Attr("force_cpu"); auto &out = @@ -44,21 +46,23 @@ class FillConstantOp : public framework::OperatorBase { auto cpu = platform::CPUPlace(); out.mutable_data(cpu, framework::ToTypeIndex(data_type)); } else { - out.mutable_data(dev_ctx.GetPlace(), framework::ToTypeIndex(data_type)); + out.mutable_data(dev_place, framework::ToTypeIndex(data_type)); } + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(dev_place); math::set_constant(dev_ctx, &out, value); } }; class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillConstantOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + FillConstantOpMaker(OpProto *proto, OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); AddAttr>("shape", "(vector) The shape of the output"); AddAttr("value", "(float, default 0) The value to be filled") .SetDefault(0.0f); diff --git a/paddle/operators/fill_op.cc b/paddle/operators/fill_op.cc index 382e161c5d..57b4ec6938 100644 --- a/paddle/operators/fill_op.cc +++ b/paddle/operators/fill_op.cc @@ -1,20 +1,21 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/data_type.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/detail/safe_ref.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -42,21 +43,20 @@ class FillOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto &out = detail::Ref(detail::Ref(scope.FindVar(Output("Out")), "Cannot find variable %s", Output("Out")) .GetMutable()); out.Resize(framework::make_ddim(Attr>("shape"))); - auto dtype = static_cast(Attr("dtype")); + auto dtype = static_cast(Attr("dtype")); platform::CPUPlace cpu; auto force_cpu = Attr("force_cpu"); - out.mutable_data(force_cpu ? cpu : dev_ctx.GetPlace(), - framework::ToTypeIndex(dtype)); + out.mutable_data(force_cpu ? cpu : place, framework::ToTypeIndex(dtype)); framework::LoDTensor tensor; - if (force_cpu || platform::is_cpu_place(dev_ctx.GetPlace())) { + if (force_cpu || platform::is_cpu_place(place)) { tensor.ShareDataWith(out); } else { // Always make tensor in CPU memory. @@ -67,16 +67,18 @@ class FillOp : public framework::OperatorBase { framework::VisitDataType( dtype, FillOpVisitor(&tensor, Attr>("value"))); - if (!force_cpu && platform::is_gpu_place(dev_ctx.GetPlace())) { + if (!force_cpu && platform::is_gpu_place(place)) { // Copy tensor to out - framework::CopyFrom(tensor, dev_ctx.GetPlace(), dev_ctx, &out); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + framework::CopyFrom(tensor, place, dev_ctx, &out); } } }; class FillOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + FillOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddComment(R"DOC(Fill operator @@ -88,7 +90,7 @@ Fill an tensor with `value` and `shape`. The type of the tensor is specify by "value", "The float values of tensor, which are flatten in row major"); AddAttr>("shape", "The shape of output tensor"); AddAttr("dtype", "The data type of output tensor, Default is float") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); AddAttr("force_cpu", "Whether the output tensor must be at CPU memory or not. " "Default is false.") diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 720c11f5f1..b4ae1de876 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -24,20 +24,19 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FillZerosLikeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Y"), - "Output(Y) of FillZerosLikeOp should not be null."); - ctx->SetOutputDim("Y", ctx->GetInputDim("X")); - ctx->ShareLoD("X", /*->*/ "Y"); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of FillZerosLikeOp should not be null."); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); } }; class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillZerosLikeOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + FillZerosLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of fill-zeros-like op."); - AddOutput("Y", "The variable will be filled up with zeros."); + AddOutput("Out", "The variable will be filled up with zeros."); AddComment(R"DOC( FillZerosLike Operator. diff --git a/paddle/operators/fill_zeros_like_op.cu.cc b/paddle/operators/fill_zeros_like_op.cu.cc index 9f412306bb..b7048e8f58 100644 --- a/paddle/operators/fill_zeros_like_op.cu.cc +++ b/paddle/operators/fill_zeros_like_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/fill_zeros_like_op.h" #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h index a6e2941f52..351ecf8b2f 100644 --- a/paddle/operators/fill_zeros_like_op.h +++ b/paddle/operators/fill_zeros_like_op.h @@ -23,7 +23,7 @@ template class FillZerosLikeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* out = context.Output("Y"); + auto* out = context.Output("Out"); out->mutable_data(context.GetPlace()); math::SetConstant setter; diff --git a/paddle/operators/ftrl_op.cc b/paddle/operators/ftrl_op.cc index b14913ff21..d00700823d 100644 --- a/paddle/operators/ftrl_op.cc +++ b/paddle/operators/ftrl_op.cc @@ -57,7 +57,7 @@ class FTRLOp : public framework::OperatorWithKernel { class FTRLOpMaker : public framework::OpProtoAndCheckerMaker { public: - FTRLOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + FTRLOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor, default Tensor) " diff --git a/paddle/operators/gather.cu.h b/paddle/operators/gather.cu.h index c806aa5f05..9840c066f0 100644 --- a/paddle/operators/gather.cu.h +++ b/paddle/operators/gather.cu.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/tensor.h" diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index 8f80fb1625..45e9d8df70 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -40,7 +40,7 @@ class GatherOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -57,7 +57,7 @@ class GatherGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -67,7 +67,7 @@ class GatherGradOp : public framework::OperatorWithKernel { class GatherOpMaker : public framework::OpProtoAndCheckerMaker { public: - GatherOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + GatherOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The source input of gather op"); AddInput("Index", "The index input of gather op"); diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu index b37f0576e2..eec2415e1d 100644 --- a/paddle/operators/gather_op.cu +++ b/paddle/operators/gather_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "gather.cu.h" #include "paddle/framework/eigen.h" diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 254c83e137..9ed493a7d0 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -1,13 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include "paddle/framework/op_registry.h" @@ -57,18 +60,17 @@ class GaussianRandomOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.device_context()); } }; class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { public: - GaussianRandomOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + GaussianRandomOpMaker(OpProto* proto, OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddOutput("Out", "Output matrix of gaussian random op"); @@ -91,7 +93,7 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("dtype", "(int, default 5(FP32)) " "Output data type.") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); AddComment(R"DOC( GaussianRandom Operator. diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index ffce6f7138..8a70db17e1 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ -#include -#include +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/gru_op.cc b/paddle/operators/gru_op.cc index 311e7edcf1..76f2adefed 100644 --- a/paddle/operators/gru_op.cc +++ b/paddle/operators/gru_op.cc @@ -1,13 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/gru_op.h" @@ -67,7 +70,7 @@ class GRUOp : public framework::OperatorWithKernel { class GRUOpMaker : public framework::OpProtoAndCheckerMaker { public: - GRUOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + GRUOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Input", "(LoDTensor) The first input is a LodTensor, which supports " diff --git a/paddle/operators/gru_op.cu.cc b/paddle/operators/gru_op.cu.cc index 458630ca61..9cb0cc42d5 100644 --- a/paddle/operators/gru_op.cu.cc +++ b/paddle/operators/gru_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/gru_op.h" diff --git a/paddle/operators/gru_op.h b/paddle/operators/gru_op.h index 6d02dff578..c6228864d7 100644 --- a/paddle/operators/gru_op.h +++ b/paddle/operators/gru_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/gru_unit_op.cc b/paddle/operators/gru_unit_op.cc index 705de87be5..c354293be7 100644 --- a/paddle/operators/gru_unit_op.cc +++ b/paddle/operators/gru_unit_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/gru_unit_op.h" @@ -71,8 +71,7 @@ class GRUUnitOp : public framework::OperatorWithKernel { class GRUUnitOpMaker : public framework::OpProtoAndCheckerMaker { public: - GRUUnitOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + GRUUnitOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Input", "(Tensor) Matrix with shape [batch_size, frame_size * 3] for the " diff --git a/paddle/operators/gru_unit_op.cu b/paddle/operators/gru_unit_op.cu index 7c752db494..95c8c23dad 100644 --- a/paddle/operators/gru_unit_op.cu +++ b/paddle/operators/gru_unit_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/gru_unit_op.h" diff --git a/paddle/operators/gru_unit_op.h b/paddle/operators/gru_unit_op.h index 8fe60c750d..a77be46718 100644 --- a/paddle/operators/gru_unit_op.h +++ b/paddle/operators/gru_unit_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/hinge_loss_op.cc b/paddle/operators/hinge_loss_op.cc index 373b4d99b4..19d2e9dc56 100644 --- a/paddle/operators/hinge_loss_op.cc +++ b/paddle/operators/hinge_loss_op.cc @@ -46,8 +46,7 @@ class HingeLossOp : public framework::OperatorWithKernel { template class HingeLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - HingeLossOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + HingeLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Logits", "The input value (Logits) of Hinge loss op." diff --git a/paddle/operators/hinge_loss_op.cu b/paddle/operators/hinge_loss_op.cu index 31a5bde292..b9cfbc50c4 100644 --- a/paddle/operators/hinge_loss_op.cu +++ b/paddle/operators/hinge_loss_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/hinge_loss_op.h" diff --git a/paddle/operators/huber_loss_op.cc b/paddle/operators/huber_loss_op.cc index 11828d083a..5c92f2c7b2 100644 --- a/paddle/operators/huber_loss_op.cc +++ b/paddle/operators/huber_loss_op.cc @@ -45,8 +45,7 @@ class HuberLossOp : public framework::OperatorWithKernel { template class HuberLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - HuberLossOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + HuberLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input value of huber loss op." diff --git a/paddle/operators/huber_loss_op.cu b/paddle/operators/huber_loss_op.cu index d49a4d9d42..ccc83a16ba 100644 --- a/paddle/operators/huber_loss_op.cu +++ b/paddle/operators/huber_loss_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/huber_loss_op.h" diff --git a/paddle/operators/increment_op.cc b/paddle/operators/increment_op.cc index 54911267e3..e0b80cc4e7 100644 --- a/paddle/operators/increment_op.cc +++ b/paddle/operators/increment_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" @@ -52,7 +52,7 @@ class IncrementOp : public framework::OperatorBase { : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &out = *scope.FindVar(Output("Out"))->GetMutable(); @@ -70,8 +70,7 @@ class IncrementOp : public framework::OperatorBase { class IncrementOpMaker : public framework::OpProtoAndCheckerMaker { public: - IncrementOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + IncrementOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input tensor of increment operator"); AddOutput("Out", "(Tensor) The output tensor of increment operator."); @@ -94,13 +93,13 @@ class IncrementGradOpMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("increment"); grad_op->SetInput("X", Output("Out")); grad_op->SetOutput("Out", Input("X")); grad_op->SetAttr("step", -boost::get(GetAttr("step"))); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/is_empty_op.cc b/paddle/operators/is_empty_op.cc index 54fecf44e8..492ae48845 100644 --- a/paddle/operators/is_empty_op.cc +++ b/paddle/operators/is_empty_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" @@ -29,7 +29,7 @@ class IsEmptyOp : public framework::OperatorBase { : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { // get input auto *var = scope.FindVar(Input(kInput)); PADDLE_ENFORCE_NOT_NULL(var); @@ -47,8 +47,7 @@ class IsEmptyOp : public framework::OperatorBase { class IsEmptyOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - IsEmptyOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + IsEmptyOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput(kInput, "(Tensor) Tensor which is to be checked."); AddOutput(kOutput, "(Tensor) a boolean Tensor that indicate empty or not."); diff --git a/paddle/operators/l1_norm_op.cc b/paddle/operators/l1_norm_op.cc index c0b51202c6..1a5d6e1926 100644 --- a/paddle/operators/l1_norm_op.cc +++ b/paddle/operators/l1_norm_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/l1_norm_op.h" @@ -48,7 +48,7 @@ class L1NormGradOp : public framework::OperatorWithKernel { class L1NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - L1NormOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + L1NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input of l1_norm op."); AddOutput("Out", "(Scalar) The output of l1_norm op."); diff --git a/paddle/operators/l1_norm_op.cu b/paddle/operators/l1_norm_op.cu index fd725f86f6..7ecc774670 100644 --- a/paddle/operators/l1_norm_op.cu +++ b/paddle/operators/l1_norm_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/l1_norm_op.h" diff --git a/paddle/operators/l1_norm_op.h b/paddle/operators/l1_norm_op.h index ae3878f2b7..086d42705d 100644 --- a/paddle/operators/l1_norm_op.h +++ b/paddle/operators/l1_norm_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 896e3657d4..666207ea07 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -19,8 +19,7 @@ namespace operators { class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { public: - LinearChainCRFOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + LinearChainCRFOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Emission", "(LoDTensor, default LoDTensor) " @@ -184,7 +183,7 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { protected: // Explicitly set that the data type of computation kernel of linear_chain_crf // is determined by its input "Emission". - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Emission")->type()), @@ -243,7 +242,7 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { protected: // Explicitly set that the data type of output of the linear_chain_crf_grad // operator is determined by its input: gradients of LogLikelihood. - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType( diff --git a/paddle/operators/linear_chain_crf_op.cu b/paddle/operators/linear_chain_crf_op.cu index 3b105ec341..da612510b4 100644 --- a/paddle/operators/linear_chain_crf_op.cu +++ b/paddle/operators/linear_chain_crf_op.cu @@ -1,10 +1,10 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index 694584e79c..19c6715ec8 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -219,8 +219,8 @@ class LinearChainCRFOpKernel : public framework::OpKernel { // operators runs on GPU device. auto copyTensor = [](const platform::DeviceContext& ctx, const Tensor& src, Tensor* dst) { - dst->mutable_data(platform::GPUPlace()); - framework::CopyFrom(src, platform::GPUPlace(), ctx, dst); + dst->mutable_data(platform::CUDAPlace()); + framework::CopyFrom(src, platform::CUDAPlace(), ctx, dst); }; copyTensor(ctx, emission_exps_src, emission_exps_dst); copyTensor(ctx, transition_exps_src, transition_exps_dst); @@ -433,8 +433,8 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { auto copyTensor = [](const platform::DeviceContext& ctx, const Tensor* src, Tensor* dst) { if (src && dst) { - dst->mutable_data(platform::GPUPlace()); - framework::CopyFrom(*src, platform::GPUPlace(), ctx, dst); + dst->mutable_data(platform::CUDAPlace()); + framework::CopyFrom(*src, platform::CUDAPlace(), ctx, dst); } }; copyTensor(ctx, emission_grad_src, emission_grad_dst); diff --git a/paddle/operators/load_op.cc b/paddle/operators/load_op.cc index 4e58b84430..5425375c1f 100644 --- a/paddle/operators/load_op.cc +++ b/paddle/operators/load_op.cc @@ -1,20 +1,20 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include #include "paddle/framework/op_registry.h" - -#include +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -26,7 +26,7 @@ class LoadOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto filename = Attr("file_path"); std::ifstream fin(filename); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s for load op", @@ -40,7 +40,9 @@ class LoadOp : public framework::OperatorBase { auto *tensor = out_var->GetMutable(); framework::DeserializeFromStream(fin, tensor); - auto place = dev_ctx.GetPlace(); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + if (platform::is_gpu_place(place)) { // copy CPU to GPU framework::LoDTensor cpu_tensor; @@ -58,8 +60,7 @@ class LoadOp : public framework::OperatorBase { class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoadOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + LoadOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddOutput("Out", "(Tensor) The tensor need to be loaded"); AddAttr("file_path", diff --git a/paddle/operators/lod_array_length_op.cc b/paddle/operators/lod_array_length_op.cc index b2f4ec57fa..d2c52745cf 100644 --- a/paddle/operators/lod_array_length_op.cc +++ b/paddle/operators/lod_array_length_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" @@ -26,7 +26,7 @@ class LoDArrayLengthOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &out = *scope.FindVar(Output("Out"))->GetMutable(); @@ -38,8 +38,7 @@ class LoDArrayLengthOp : public framework::OperatorBase { class LoDArrayLengthProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDArrayLengthProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + LoDArrayLengthProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensorArray) The input tensor array."); AddOutput("Out", "(Tensor) 1x1 CPU Tensor of length, int64_t"); diff --git a/paddle/operators/lod_rank_table_op.cc b/paddle/operators/lod_rank_table_op.cc index f7d4db1947..8711dd62c8 100644 --- a/paddle/operators/lod_rank_table_op.cc +++ b/paddle/operators/lod_rank_table_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/op_registry.h" namespace paddle { @@ -24,19 +24,19 @@ class LoDRankTableOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto x = scope.FindVar(Input("X"))->Get(); auto *out = scope.FindVar(Output("Out"))->GetMutable(); VLOG(10) << "Level = " << static_cast(Attr("level")); out->Reset(x.lod(), static_cast(Attr("level"))); + VLOG(10) << Input("X") << "'s lod information is " << *out; } }; class LoDRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDRankTableOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + LoDRankTableOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor) input lod tensor, must contain lod information."); @@ -63,11 +63,11 @@ class LoDRankTableInferShape : public framework::InferShapeBase { class LoDRankTableInferVarType : public framework::VarTypeInference { public: - void operator()(const framework::OpDescBind &op_desc, - framework::BlockDescBind *block) const override { + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { for (auto &o : op_desc.Output("Out")) { block->FindRecursiveOrCreateVar(o)->SetType( - framework::VarDesc::LOD_RANK_TABLE); + framework::proto::VarDesc::LOD_RANK_TABLE); } } }; diff --git a/paddle/operators/lod_reset_op.cc b/paddle/operators/lod_reset_op.cc index 32831cb1e2..f3c0badf2a 100644 --- a/paddle/operators/lod_reset_op.cc +++ b/paddle/operators/lod_reset_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/lod_reset_op.h" @@ -38,7 +38,7 @@ class LoDResetOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -48,8 +48,7 @@ class LoDResetOp : public framework::OperatorWithKernel { class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker { public: - LoDResetOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + LoDResetOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor) The input tensor of lod_reset operator."); AddInput("TargetLoD", @@ -98,7 +97,7 @@ class LoDResetGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), diff --git a/paddle/operators/lod_reset_op.cu b/paddle/operators/lod_reset_op.cu index f7c2358980..910866ea63 100644 --- a/paddle/operators/lod_reset_op.cu +++ b/paddle/operators/lod_reset_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/lod_reset_op.h" diff --git a/paddle/operators/lod_reset_op.h b/paddle/operators/lod_reset_op.h index b86f8b1313..306373fb1f 100644 --- a/paddle/operators/lod_reset_op.h +++ b/paddle/operators/lod_reset_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/lod_tensor_to_array_op.cc b/paddle/operators/lod_tensor_to_array_op.cc index b970bf3177..ed99915bb7 100644 --- a/paddle/operators/lod_tensor_to_array_op.cc +++ b/paddle/operators/lod_tensor_to_array_op.cc @@ -1,20 +1,21 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/detail/safe_ref.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -32,7 +33,7 @@ class LoDTensorToArrayOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto &x = detail::Ref(scope.FindVar(Input("X")), "Cannot find input %s", Input("X")) .Get(); @@ -86,6 +87,10 @@ class LoDTensorToArrayOp : public framework::OperatorBase { // out[i][offset: offset+len] = x[each_range.begin: each_range.end] auto slice = out[i].Slice(static_cast(offset), static_cast(offset + len)); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + framework::CopyFrom(x.Slice(static_cast(each_range.begin), static_cast(each_range.end)), x.place(), dev_ctx, &slice); @@ -97,8 +102,7 @@ class LoDTensorToArrayOp : public framework::OperatorBase { class LoDTensorToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDTensorToArrayOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + LoDTensorToArrayOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", ""); AddInput("RankTable", ""); @@ -128,10 +132,10 @@ class LoDTensorToArrayInferShape : public framework::InferShapeBase { class LoDTensorToArrayInferVarType : public framework::VarTypeInference { public: - void operator()(const framework::OpDescBind &op_desc, - framework::BlockDescBind *block) const override { + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { for (auto &out_var : op_desc.Output("Out")) { - block->Var(out_var)->SetType(framework::VarDesc::LOD_TENSOR_ARRAY); + block->Var(out_var)->SetType(framework::proto::VarDesc::LOD_TENSOR_ARRAY); } } }; @@ -141,14 +145,14 @@ class LoDTensorToArrayGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("array_to_lod_tensor"); grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetInput("RankTable", Input("RankTable")); grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttrMap(Attrs()); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/log_loss_op.cc b/paddle/operators/log_loss_op.cc index 4524229a33..f714945354 100644 --- a/paddle/operators/log_loss_op.cc +++ b/paddle/operators/log_loss_op.cc @@ -46,8 +46,7 @@ class LogLossOp : public framework::OperatorWithKernel { template class LogLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - LogLossOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + LogLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Predicted", "The input value (Predicted) of Log loss op." diff --git a/paddle/operators/log_loss_op.cu b/paddle/operators/log_loss_op.cu index e87ac7d12a..be283e4700 100644 --- a/paddle/operators/log_loss_op.cu +++ b/paddle/operators/log_loss_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/log_loss_op.h" diff --git a/paddle/operators/logical_op.cc b/paddle/operators/logical_op.cc index c818d5e9c1..7417192479 100644 --- a/paddle/operators/logical_op.cc +++ b/paddle/operators/logical_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/logical_op.h" #include "paddle/framework/op_registry.h" @@ -20,8 +20,7 @@ namespace operators { template class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - BinaryLogicalOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + BinaryLogicalOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { OpComment comment; AddInput("X", @@ -45,8 +44,7 @@ Each element of Out is calculated by %s template class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - UnaryLogicalOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + UnaryLogicalOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { OpComment comment; AddInput("X", string::Sprintf("(LoDTensor) Operand of %s operator", @@ -101,9 +99,9 @@ class LogicalOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { - framework::OpKernelType kt = OperatorWithKernel::GetKernelType(ctx); + framework::OpKernelType kt = OperatorWithKernel::GetActualKernelType(ctx); // LogicalOp kernel's device type is decided by input tensor place kt.place_ = ctx.Input("X")->place(); return kt; diff --git a/paddle/operators/logical_op.cu b/paddle/operators/logical_op.cu index 7fef60e0c9..87f2287b8f 100644 --- a/paddle/operators/logical_op.cu +++ b/paddle/operators/logical_op.cu @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/logical_op.h" diff --git a/paddle/operators/logical_op.h b/paddle/operators/logical_op.h index 629388cac8..4138576856 100644 --- a/paddle/operators/logical_op.h +++ b/paddle/operators/logical_op.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 93e812ac5b..6e5cbd6f8c 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/lookup_table_op.h" #include "paddle/framework/var_type_inference.h" @@ -41,7 +41,7 @@ class LookupTableOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("W")->type()), @@ -51,8 +51,7 @@ class LookupTableOp : public framework::OperatorWithKernel { class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { public: - LookupTableOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + LookupTableOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("W", "An input represents embedding tensors, " @@ -99,7 +98,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("W")->type()), @@ -109,19 +108,20 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { class LookupTableOpGradVarTypeInference : public framework::VarTypeInference { public: - void operator()(const framework::OpDescBind& op_desc, - framework::BlockDescBind* block) const override { + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { auto out_var_name = op_desc.Output(framework::GradVarName("W")).front(); auto attr = op_desc.GetAttr("is_sparse"); bool is_sparse = boost::get(attr); if (is_sparse) { VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W") << " is set to SelectedRows"; - block->Var(out_var_name)->SetType(framework::VarDesc::SELECTED_ROWS); + block->Var(out_var_name) + ->SetType(framework::proto::VarDesc::SELECTED_ROWS); } else { VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W") << " is set to LoDTensor"; - block->Var(out_var_name)->SetType(framework::VarDesc::LOD_TENSOR); + block->Var(out_var_name)->SetType(framework::proto::VarDesc::LOD_TENSOR); } } }; diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 9431030a53..261a28da69 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -1,13 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" @@ -101,7 +104,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { // copy GPU memory to CPU pinned memory framework::Vector new_rows; new_rows.resize(ids_dim[0]); - auto gpu_place = boost::get(context.GetPlace()); + auto gpu_place = boost::get(context.GetPlace()); memory::Copy(platform::CPUPlace(), new_rows.data(), gpu_place, ids_data, ids_dim[0] * sizeof(int64_t), stream); diff --git a/paddle/operators/lookup_table_op.h b/paddle/operators/lookup_table_op.h index 99b912163b..2fd3335868 100644 --- a/paddle/operators/lookup_table_op.h +++ b/paddle/operators/lookup_table_op.h @@ -1,13 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/lrn_op.cc b/paddle/operators/lrn_op.cc index b5b7bc940a..95673ba19e 100644 --- a/paddle/operators/lrn_op.cc +++ b/paddle/operators/lrn_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/lrn_op.h" @@ -140,7 +140,7 @@ class LRNOp : public framework::OperatorWithKernel { template class LRNOpMaker : public framework::OpProtoAndCheckerMaker { public: - LRNOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + LRNOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input of LRN operator. " diff --git a/paddle/operators/lrn_op.cu b/paddle/operators/lrn_op.cu index c6857c2b6d..eb9d66a73d 100644 --- a/paddle/operators/lrn_op.cu +++ b/paddle/operators/lrn_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/lrn_op.h" diff --git a/paddle/operators/lrn_op.h b/paddle/operators/lrn_op.h index 44063d3e03..ef3a2883a8 100644 --- a/paddle/operators/lrn_op.h +++ b/paddle/operators/lrn_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - You may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 2db7da30db..b8fcec0f29 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -92,7 +92,7 @@ class LSTMOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), @@ -102,7 +102,7 @@ class LSTMOp : public framework::OperatorWithKernel { class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { public: - LSTMOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + LSTMOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Input", "(LoDTensor) the first input is a LodTensor, which support " @@ -260,7 +260,7 @@ class LSTMGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), diff --git a/paddle/operators/lstm_op.cu.cc b/paddle/operators/lstm_op.cu.cc index 48519bed6f..cfcc1fc92a 100644 --- a/paddle/operators/lstm_op.cu.cc +++ b/paddle/operators/lstm_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/lstm_op.h" diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index 14abd4bf0a..c57ee414dc 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include "paddle/framework/op_registry.h" +#include "paddle/operators/math/detail/activation_functions.h" #include "paddle/operators/math/lstm_compute.h" #include "paddle/operators/math/math_function.h" #include "paddle/operators/math/sequence2batch.h" @@ -102,9 +103,12 @@ class LSTMKernel : public framework::OpKernel { auto batch_starts = batch_gate->lod()[0]; size_t num_batch = batch_starts.size() - 1; - auto gate_act = ctx.Attr("gate_activation"); - auto cell_act = ctx.Attr("cell_activation"); - auto cand_act = ctx.Attr("candidate_activation"); + auto gate_act = math::detail::GetActivationType( + ctx.Attr("gate_activation")); + auto cell_act = math::detail::GetActivationType( + ctx.Attr("cell_activation")); + auto cand_act = math::detail::GetActivationType( + ctx.Attr("candidate_activation")); for (size_t n = 0; n < num_batch; n++) { int bstart = static_cast(batch_starts[n]); @@ -264,9 +268,12 @@ class LSTMGradKernel : public framework::OpKernel { batch_gate_g.mutable_data(batch_gate->dims(), ctx.GetPlace()); batch_gate_g.set_lod(batch_gate->lod()); - auto gate_act = ctx.Attr("gate_activation"); - auto cell_act = ctx.Attr("cell_activation"); - auto cand_act = ctx.Attr("candidate_activation"); + auto gate_act = math::detail::GetActivationType( + ctx.Attr("gate_activation")); + auto cell_act = math::detail::GetActivationType( + ctx.Attr("cell_activation")); + auto cand_act = math::detail::GetActivationType( + ctx.Attr("candidate_activation")); auto batch_starts = batch_gate->lod()[0]; size_t num_batch = batch_starts.size() - 1; diff --git a/paddle/operators/lstm_unit_op.cc b/paddle/operators/lstm_unit_op.cc index 18b9cdf2a3..c2d2c43982 100644 --- a/paddle/operators/lstm_unit_op.cc +++ b/paddle/operators/lstm_unit_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/lstm_unit_op.h" @@ -48,10 +48,12 @@ class LstmUnitOp : public framework::OperatorWithKernel { class LstmUnitOpMaker : public framework::OpProtoAndCheckerMaker { public: - LstmUnitOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + LstmUnitOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "FC input before the non-linear activation."); + AddInput("X", + "Lstm unit only applies non-linear activations, please make sure" + "that linear tranformation has already been applied to `X`. " + "Linear tranformation can be applied by adding a `fc` layer"); AddInput( "C_prev", "The cell state tensor of last time-step in the Lstm Unit operator."); diff --git a/paddle/operators/lstm_unit_op.cu b/paddle/operators/lstm_unit_op.cu index 291f2c295e..5ee5ddd280 100644 --- a/paddle/operators/lstm_unit_op.cu +++ b/paddle/operators/lstm_unit_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ /* Acknowledgement: the following code is strongly inspired by https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.cu @@ -98,7 +98,7 @@ class LstmUnitOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto* x_tensor = ctx.Input("X"); auto* c_prev_tensor = ctx.Input("C_prev"); @@ -129,7 +129,7 @@ class LstmUnitGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto x_tensor = ctx.Input("X"); auto c_prev_tensor = ctx.Input("C_prev"); diff --git a/paddle/operators/lstm_unit_op.h b/paddle/operators/lstm_unit_op.h index 61705675d9..fa8d141bcb 100644 --- a/paddle/operators/lstm_unit_op.h +++ b/paddle/operators/lstm_unit_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ /* Acknowledgement: the following code is strongly inspired by https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op.h diff --git a/paddle/operators/margin_rank_loss_op.cc b/paddle/operators/margin_rank_loss_op.cc index 42e8961c0e..e0df307774 100644 --- a/paddle/operators/margin_rank_loss_op.cc +++ b/paddle/operators/margin_rank_loss_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/margin_rank_loss_op.h" @@ -42,8 +42,7 @@ class MarginRankLossOp : public framework::OperatorWithKernel { template class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - MarginRankLossOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + MarginRankLossOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X1", "(2-D tensor with shape [batch_size x 1]) The score for " diff --git a/paddle/operators/margin_rank_loss_op.cu b/paddle/operators/margin_rank_loss_op.cu index 1c2afccc5b..798c3ed182 100644 --- a/paddle/operators/margin_rank_loss_op.cu +++ b/paddle/operators/margin_rank_loss_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/margin_rank_loss_op.h" diff --git a/paddle/operators/margin_rank_loss_op.h b/paddle/operators/margin_rank_loss_op.h index 9c1f96cac1..7438e881e1 100644 --- a/paddle/operators/margin_rank_loss_op.h +++ b/paddle/operators/margin_rank_loss_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/math/cross_entropy.cc b/paddle/operators/math/cross_entropy.cc index 6011a196d4..d9cb016fb4 100644 --- a/paddle/operators/math/cross_entropy.cc +++ b/paddle/operators/math/cross_entropy.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/math/cross_entropy.h" diff --git a/paddle/operators/math/cross_entropy.cu b/paddle/operators/math/cross_entropy.cu index 2132d49c93..16c9e7b28e 100644 --- a/paddle/operators/math/cross_entropy.cu +++ b/paddle/operators/math/cross_entropy.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/math/cross_entropy.h" diff --git a/paddle/operators/math/cross_entropy.h b/paddle/operators/math/cross_entropy.h index 677adb5ada..b3b6d767a8 100644 --- a/paddle/operators/math/cross_entropy.h +++ b/paddle/operators/math/cross_entropy.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" diff --git a/paddle/operators/math/detail/activation_functions.h b/paddle/operators/math/detail/activation_functions.h index a20c35d1d9..585a012343 100644 --- a/paddle/operators/math/detail/activation_functions.h +++ b/paddle/operators/math/detail/activation_functions.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include +#include "paddle/platform/enforce.h" #include "paddle/platform/hostdevice.h" #ifdef __AVX__ @@ -29,6 +30,26 @@ namespace detail { #define SIGMOID_THRESHOLD_MAX 13.0 #define EXP_MAX_INPUT 40.0 +enum ActivationType { + kSigmoid, + kReLU, + kTanh, + kIdentity, +}; + +inline ActivationType GetActivationType(const std::string &type) { + if (type == "sigmoid") { + return ActivationType::kSigmoid; + } else if (type == "relu") { + return ActivationType::kReLU; + } else if (type == "tanh") { + return ActivationType::kTanh; + } else if (type == "identity" || type == "") { + return ActivationType::kIdentity; + } + PADDLE_THROW("Not support type %s.", type); +} + namespace forward { template diff --git a/paddle/operators/math/detail/lstm_cpu_kernel.h b/paddle/operators/math/detail/lstm_cpu_kernel.h index a734ad31ee..42888fcdb0 100644 --- a/paddle/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/operators/math/detail/lstm_cpu_kernel.h @@ -26,10 +26,9 @@ namespace detail { template void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, - int frame_size, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int frame_size, ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { T r_value_in; T r_value_ig; T r_value_fg; @@ -77,9 +76,9 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, template void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, LstmMetaGrad grad, int frame_size, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { T r_value_in; T r_value_ig; T r_value_fg; @@ -149,10 +148,9 @@ void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, template void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, - int frame_size, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int frame_size, ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { #ifdef __AVX__ __m256 r_value_in; __m256 r_value_ig; @@ -204,9 +202,9 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, template void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, LstmMetaGrad grad, int frame_size, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { #ifdef __AVX__ __m256 r_value_in; __m256 r_value_ig; @@ -281,9 +279,8 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, template void cpu_lstm_forward(Op op, LstmMetaValue value, int frame_size, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, ActivationType active_gate, + ActivationType active_state) { if (Op::avx && !(frame_size & (8 - 1)) && (std::is_same::value)) { avx_lstm_forward_one_sequence(op, value, frame_size, active_node, active_gate, active_state); @@ -295,9 +292,9 @@ void cpu_lstm_forward(Op op, LstmMetaValue value, int frame_size, template void cpu_lstm_backward(Op op, LstmMetaValue value, LstmMetaGrad grad, - int frame_size, activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int frame_size, ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { if (Op::avx && !(frame_size & (8 - 1)) && (std::is_same::value)) { avx_lstm_backward_one_sequence(op, value, grad, frame_size, active_node, active_gate, active_state); diff --git a/paddle/operators/math/detail/lstm_gpu_kernel.h b/paddle/operators/math/detail/lstm_gpu_kernel.h index 91bfedea53..e31e657e8b 100644 --- a/paddle/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/operators/math/detail/lstm_gpu_kernel.h @@ -31,9 +31,9 @@ namespace detail { */ template __global__ void KeLstmForward(Op op, LstmMetaValue value, int frame_size, - int batch_size, activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int batch_size, ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; if (frame_idx >= frame_size) return; @@ -91,9 +91,9 @@ __global__ void KeLstmForward(Op op, LstmMetaValue value, int frame_size, template __global__ void KeLstmBackward(Op op, LstmMetaValue value, LstmMetaGrad grad, int frame_size, - int batch_size, activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int batch_size, ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; if (frame_idx >= frame_size) return; @@ -185,9 +185,8 @@ __global__ void KeLstmBackward(Op op, LstmMetaValue value, template void gpu_lstm_forward(const platform::DeviceContext& context, Op op, LstmMetaValue value, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, ActivationType active_gate, + ActivationType active_state) { dim3 threads; dim3 grid; if (batch_size == 1) { @@ -220,9 +219,8 @@ template void gpu_lstm_backward(const platform::DeviceContext& context, Op op, LstmMetaValue value, LstmMetaGrad grad, int frame_size, int batch_size, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, ActivationType active_gate, + ActivationType active_state) { dim3 threads; dim3 grid; if (batch_size == 1) { diff --git a/paddle/operators/math/detail/lstm_kernel.h b/paddle/operators/math/detail/lstm_kernel.h index 78f9a249a3..fed8f9c4ca 100644 --- a/paddle/operators/math/detail/lstm_kernel.h +++ b/paddle/operators/math/detail/lstm_kernel.h @@ -30,9 +30,9 @@ class lstm { HOSTDEVICE void operator()(T &value_in, T &value_ig, T &value_fg, T &value_og, T &prev_state, T &state, T &state_atv, T &output, T &checkI, T &checkF, T &checkO, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { value_in = activation(value_in, active_node); value_ig = activation(value_ig + prev_state * checkI, active_gate); value_fg = activation(value_fg + prev_state * checkF, active_gate); @@ -53,9 +53,9 @@ class lstm { __m256 &prev_state, __m256 &state, __m256 &state_atv, __m256 &output, __m256 &checkI, __m256 &checkF, __m256 &checkO, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { value_in = activation(value_in, active_node); value_ig = activation(_mm256_add_ps(value_ig, _mm256_mul_ps(prev_state, checkI)), @@ -87,9 +87,9 @@ class lstm { T &state_grad, T &state_atv, T &output_grad, T &checkI, T &checkF, T &checkO, T &checkIGrad, T &checkFGrad, T &checkOGrad, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + ActivationType active_node, + ActivationType active_gate, + ActivationType active_state) { grad_og = activation(output_grad * state_atv, value_og, active_gate); state_grad += activation(output_grad * value_og, state_atv, active_state) + grad_og * checkO; @@ -114,8 +114,8 @@ class lstm { __m256 &prev_state, __m256 &prev_state_grad, __m256 &state, __m256 &state_grad, __m256 &state_atv, __m256 &output_grad, __m256 &checkI, __m256 &checkF, __m256 &checkO, __m256 &checkIGrad, - __m256 &checkFGrad, __m256 &checkOGrad, activation_mode_t active_node, - activation_mode_t active_gate, activation_mode_t active_state) { + __m256 &checkFGrad, __m256 &checkOGrad, ActivationType active_node, + ActivationType active_gate, ActivationType active_state) { grad_og = activation(_mm256_mul_ps(output_grad, state_atv), value_og, active_gate); state_grad = _mm256_add_ps(activation(_mm256_mul_ps(output_grad, value_og), diff --git a/paddle/operators/math/im2col.cc b/paddle/operators/math/im2col.cc index 707ebf0596..c2633b2e16 100644 --- a/paddle/operators/math/im2col.cc +++ b/paddle/operators/math/im2col.cc @@ -61,14 +61,13 @@ class Im2ColFunctor(); T* col_data = col->data(); - for (int c = 0; c < channels_col; ++c) { int w_offset = c % filter_width; int h_offset = (c / filter_width) % filter_height; - int c_im = c / filter_width / filter_height; + int c_im = c / (filter_width * filter_height); for (int h = 0; h < col_height; ++h) { + int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0]; for (int w = 0; w < col_width; ++w) { - int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0]; int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1]; int col_idx = (c * col_height + h) * col_width + w; int im_idx = (im_row_idx + c_im * im_height) * im_width + im_col_idx; @@ -130,16 +129,14 @@ class Col2ImFunctor= 0 && (im_row_idx) < im_height && (im_col_idx) >= 0 && (im_col_idx) < im_width) { - im_row_idx += c_im * im_height; - im_data[im_row_idx * im_width + im_col_idx] += + im_data[(im_row_idx + c_im * im_height) * im_width + im_col_idx] += col_data[(c * col_height + h) * col_width + w]; } } @@ -199,12 +196,13 @@ class Im2ColFunctor= 0 && im_row_offset < im_height && im_col_offset >= 0 && im_col_offset < im_width) { int im_offset = diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc index 256f3bc9bd..26c038e435 100644 --- a/paddle/operators/math/im2col_test.cc +++ b/paddle/operators/math/im2col_test.cc @@ -159,6 +159,7 @@ void testIm2col() { TEST(math, im2col) { testIm2col(); #ifdef PADDLE_WITH_CUDA - testIm2col(); + testIm2col(); #endif } diff --git a/paddle/operators/math/lstm_compute.cc b/paddle/operators/math/lstm_compute.cc index 2c2e8bb82e..d453102ece 100644 --- a/paddle/operators/math/lstm_compute.cc +++ b/paddle/operators/math/lstm_compute.cc @@ -24,12 +24,12 @@ template struct LstmUnitFunctor { static void compute(const platform::CPUDeviceContext& context, LstmMetaValue value, int frame_size, int batch_size, - const std::string& gate_act, const std::string& cell_act, - const std::string& cand_act) { + const detail::ActivationType& gate_act, + const detail::ActivationType& cell_act, + const detail::ActivationType& cand_act) { for (int b = 0; b < batch_size; b++) { detail::cpu_lstm_forward(detail::forward::lstm(), value, frame_size, - ActiveType(cand_act), ActiveType(gate_act), - ActiveType(cell_act)); + cand_act, gate_act, cell_act); value.gate_value += frame_size * 4; value.state_value += frame_size; value.state_active_value += frame_size; @@ -46,12 +46,12 @@ struct LstmUnitGradFunctor { static void compute(const platform::CPUDeviceContext& context, LstmMetaValue value, LstmMetaGrad grad, int frame_size, int batch_size, - const std::string& gate_act, const std::string& cell_act, - const std::string& cand_act) { + const detail::ActivationType& gate_act, + const detail::ActivationType& cell_act, + const detail::ActivationType& cand_act) { for (int b = 0; b < batch_size; b++) { detail::cpu_lstm_backward(detail::backward::lstm(), value, grad, - frame_size, ActiveType(cand_act), - ActiveType(gate_act), ActiveType(cell_act)); + frame_size, cand_act, gate_act, cell_act); value.gate_value += frame_size * 4; value.state_value += frame_size; diff --git a/paddle/operators/math/lstm_compute.cu b/paddle/operators/math/lstm_compute.cu index 92b1f4228b..82065d699f 100644 --- a/paddle/operators/math/lstm_compute.cu +++ b/paddle/operators/math/lstm_compute.cu @@ -24,11 +24,12 @@ template struct LstmUnitFunctor { static void compute(const platform::CUDADeviceContext& context, LstmMetaValue value, int frame_size, int batch_size, - const std::string& gate_act, const std::string& cell_act, - const std::string& cand_act) { + const detail::ActivationType& gate_act, + const detail::ActivationType& cell_act, + const detail::ActivationType& cand_act) { detail::gpu_lstm_forward(context, detail::forward::lstm(), value, - frame_size, batch_size, ActiveType(cand_act), - ActiveType(gate_act), ActiveType(cell_act)); + frame_size, batch_size, cand_act, gate_act, + cell_act); } }; @@ -37,11 +38,12 @@ struct LstmUnitGradFunctor { static void compute(const platform::CUDADeviceContext& context, LstmMetaValue value, LstmMetaGrad grad, int frame_size, int batch_size, - const std::string& gate_act, const std::string& cell_act, - const std::string& cand_act) { + const detail::ActivationType& gate_act, + const detail::ActivationType& cell_act, + const detail::ActivationType& cand_act) { detail::gpu_lstm_backward(context, detail::backward::lstm(), value, grad, - frame_size, batch_size, ActiveType(cand_act), - ActiveType(gate_act), ActiveType(cell_act)); + frame_size, batch_size, cand_act, gate_act, + cell_act); } }; diff --git a/paddle/operators/math/lstm_compute.h b/paddle/operators/math/lstm_compute.h index 5f74e27358..954762f922 100644 --- a/paddle/operators/math/lstm_compute.h +++ b/paddle/operators/math/lstm_compute.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include "paddle/operators/math/detail/activation_functions.h" #include "paddle/platform/device_context.h" #include "paddle/platform/enforce.h" @@ -72,8 +73,9 @@ class LstmUnitFunctor { public: static void compute(const DeviceContext &context, LstmMetaValue value, int frame_size, int batch_size, - const std::string &gate_act, const std::string &cell_act, - const std::string &cand_act); + const detail::ActivationType &gate_act, + const detail::ActivationType &cell_act, + const detail::ActivationType &cand_act); }; template @@ -81,8 +83,9 @@ class LstmUnitGradFunctor { public: static void compute(const DeviceContext &context, LstmMetaValue value, LstmMetaGrad grad, int frame_size, int batch_size, - const std::string &gate_act, const std::string &cell_act, - const std::string &cand_act); + const detail::ActivationType &gate_act, + const detail::ActivationType &cell_act, + const detail::ActivationType &cand_act); }; } // namespace math diff --git a/paddle/operators/math/math_function.cu b/paddle/operators/math/math_function.cu index 1b560a7e2d..927838a094 100644 --- a/paddle/operators/math/math_function.cu +++ b/paddle/operators/math/math_function.cu @@ -105,7 +105,7 @@ void matmul( PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) && platform::is_gpu_place(matrix_b.place()) && platform::is_gpu_place(matrix_out->place()), - "Matrix must all be in GPUPlace"); + "Matrix must all be in CUDAPlace"); int M = dim_out[0]; int N = dim_out[1]; @@ -134,7 +134,7 @@ void matmul( PADDLE_ENFORCE(platform::is_gpu_place(matrix_a.place()) && platform::is_gpu_place(matrix_b.place()) && platform::is_gpu_place(matrix_out->place()), - "Matrix must all be in GPUPlace"); + "Matrix must all be in CUDAPlace"); int M = dim_out[0]; int N = dim_out[1]; @@ -266,7 +266,7 @@ struct TensorSetConstantGPU { }; template <> -void set_constant_with_place( +void set_constant_with_place( const platform::DeviceContext& context, framework::Tensor* tensor, float value) { framework::VisitDataType(framework::ToDataType(tensor->type()), diff --git a/paddle/operators/math/math_function_impl.h b/paddle/operators/math/math_function_impl.h index 3e6d833865..ddd798dace 100644 --- a/paddle/operators/math/math_function_impl.h +++ b/paddle/operators/math/math_function_impl.h @@ -67,18 +67,45 @@ void RowwiseAdd::operator()(const DeviceContext& context, template void ColwiseSum::operator()(const DeviceContext& context, const framework::Tensor& input, - framework::Tensor* vector) { + framework::Tensor* out) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; - PADDLE_ENFORCE_EQ(vector->numel(), size); + PADDLE_ENFORCE_EQ(out->numel(), size); - auto vec = framework::EigenMatrix::From(*vector); auto in = framework::EigenMatrix::From(input); - Eigen::array shape({{1, static_cast(size)}}); - vec.reshape(shape).device(*context.eigen_device()) = - in.sum(Eigen::array({{0}})).reshape(shape); + auto vec = framework::EigenVector::Flatten(*out); + + vec.device(*context.eigen_device()) = in.sum(Eigen::array({{0}})); } +// Specialize for CPU, since Eigen implement a general reduce. However, +// colwise-sum can be easily implemented. General reduce has a huge overhead in +// CPU +template +class ColwiseSum { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::Tensor& input, framework::Tensor* out) { + auto& in_dims = input.dims(); + auto height = in_dims[0]; + auto size = in_dims[1]; + PADDLE_ENFORCE_EQ(out->numel(), size); + + T* out_buf = out->mutable_data(out->place()); + const T* in_buf = input.data(); + + for (size_t i = 0; i < static_cast(height); ++i) { + for (size_t j = 0; j < static_cast(size); ++j) { + if (i == 0) { + out_buf[j] = in_buf[i * size + j]; + } else { + out_buf[j] += in_buf[i * size + j]; + } + } + } + } +}; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/math_function_test.cu b/paddle/operators/math/math_function_test.cu index 32e96d9487..4325a79664 100644 --- a/paddle/operators/math/math_function_test.cu +++ b/paddle/operators/math/math_function_test.cu @@ -13,7 +13,7 @@ TEST(math_function, notrans_mul_trans) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - auto* gpu_place = new paddle::platform::GPUPlace(0); + auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); paddle::framework::CopyFrom(input1, *gpu_place, context, &input1_gpu); @@ -47,7 +47,7 @@ TEST(math_function, trans_mul_notrans) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - auto* gpu_place = new paddle::platform::GPUPlace(0); + auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); paddle::framework::CopyFrom(input1, *gpu_place, context, &input1_gpu); @@ -96,7 +96,7 @@ TEST(math_function, gemm_notrans_cublas) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - auto* gpu_place = new paddle::platform::GPUPlace(0); + auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); paddle::framework::CopyFrom(input1, *gpu_place, context, &input1_gpu); @@ -151,7 +151,7 @@ TEST(math_function, gemm_trans_cublas) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - auto* gpu_place = new paddle::platform::GPUPlace(0); + auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::platform::CUDADeviceContext context(*gpu_place); paddle::framework::CopyFrom(input1, *gpu_place, context, &input1_gpu); @@ -189,7 +189,7 @@ void GemvTest(int m, int n, bool trans) { T* data_b = vec_b.mutable_data({trans ? m : n}, *cpu_place); T* data_c = vec_c.mutable_data({trans ? n : m}, *cpu_place); - auto* gpu_place = new paddle::platform::GPUPlace(0); + auto* gpu_place = new paddle::platform::CUDAPlace(0); paddle::framework::Tensor g_mat_a; paddle::framework::Tensor g_vec_b; paddle::framework::Tensor g_vec_c; diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index c44577e00a..9fddd97a36 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -58,15 +58,15 @@ struct SelectedRowsAdd { PADDLE_ENFORCE(platform::is_gpu_place(out_place)); memory::Copy( - boost::get(out_place), out_data, - boost::get(in1_place), in1_data, + boost::get(out_place), out_data, + boost::get(in1_place), in1_data, in1_value.numel() * sizeof(T), reinterpret_cast(context).stream()); auto* in2_data = in2_value.data(); - memory::Copy(boost::get(out_place), + memory::Copy(boost::get(out_place), out_data + in1_value.numel(), - boost::get(in2_place), in2_data, + boost::get(in2_place), in2_data, in2_value.numel() * sizeof(T), context.stream()); } }; @@ -160,9 +160,9 @@ struct SelectedRowsAddTo { auto* in1_data = in1_value.data(); auto* in2_data = in2_value->data(); - memory::Copy(boost::get(in2_place), + memory::Copy(boost::get(in2_place), in2_data + input2_offset, - boost::get(in1_place), in1_data, + boost::get(in1_place), in1_data, in1_value.numel() * sizeof(T), context.stream()); } }; diff --git a/paddle/operators/math/selected_rows_functor_test.cu b/paddle/operators/math/selected_rows_functor_test.cu index 777caf5635..0a2e36f68a 100644 --- a/paddle/operators/math/selected_rows_functor_test.cu +++ b/paddle/operators/math/selected_rows_functor_test.cu @@ -21,7 +21,7 @@ TEST(selected_rows_functor, gpu_add) { using namespace paddle::platform; using namespace paddle::operators::math; - GPUPlace gpu_place(0); + CUDAPlace gpu_place(0); CPUPlace cpu_place; CUDADeviceContext ctx(gpu_place); SetConstant functor; @@ -119,7 +119,7 @@ TEST(selected_rows_functor, gpu_add_to) { using namespace paddle::platform; using namespace paddle::operators::math; - GPUPlace gpu_place(0); + CUDAPlace gpu_place(0); CPUPlace cpu_place; CUDADeviceContext ctx(gpu_place); SetConstant functor; diff --git a/paddle/operators/math/vol2col_test.cc b/paddle/operators/math/vol2col_test.cc index f46db3c567..3794f0e52d 100644 --- a/paddle/operators/math/vol2col_test.cc +++ b/paddle/operators/math/vol2col_test.cc @@ -122,6 +122,6 @@ TEST(math, vol2col) { testVol2col(); #ifdef PADDLE_WITH_CUDA testVol2col(); + paddle::platform::CUDAPlace>(); #endif // PADDLE_WITH_CUDA } diff --git a/paddle/operators/matmul_op.cc b/paddle/operators/matmul_op.cc index ee0bc0c370..fd65d894d5 100644 --- a/paddle/operators/matmul_op.cc +++ b/paddle/operators/matmul_op.cc @@ -130,7 +130,7 @@ class MatMulOp : public framework::OperatorWithKernel { class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MatMulOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + MatMulOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The first input of MatMul op"); AddInput("Y", "The second input of MatMul op"); diff --git a/paddle/operators/matmul_op.cu.cc b/paddle/operators/matmul_op.cu.cc index 6a3772c004..d28d12164e 100644 --- a/paddle/operators/matmul_op.cu.cc +++ b/paddle/operators/matmul_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/matmul_op.h" diff --git a/paddle/operators/matmul_op.h b/paddle/operators/matmul_op.h index de9da487b3..78adc64f76 100644 --- a/paddle/operators/matmul_op.h +++ b/paddle/operators/matmul_op.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - You may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/max_sequence_len_op.cc b/paddle/operators/max_sequence_len_op.cc index 798022c9dd..019150e491 100644 --- a/paddle/operators/max_sequence_len_op.cc +++ b/paddle/operators/max_sequence_len_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/op_registry.h" @@ -28,7 +28,7 @@ class MaxSeqenceLenOp : public framework::OperatorBase { : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto &rank_table = scope.FindVar(Input("RankTable"))->Get(); auto *out = @@ -40,8 +40,7 @@ class MaxSeqenceLenOp : public framework::OperatorBase { class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - MaxSeqenceLenOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + MaxSeqenceLenOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("RankTable", "The lod_rank_table."); AddOutput("Out", "The max sequence length."); diff --git a/paddle/operators/maxout_op.cc b/paddle/operators/maxout_op.cc index 011616e615..3ee3226941 100644 --- a/paddle/operators/maxout_op.cc +++ b/paddle/operators/maxout_op.cc @@ -20,7 +20,7 @@ using framework::Tensor; class MaxOutOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxOutOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + MaxOutOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", diff --git a/paddle/operators/maxout_op.cu.cc b/paddle/operators/maxout_op.cu.cc index 2904f0ff96..c4a2d676d3 100644 --- a/paddle/operators/maxout_op.cu.cc +++ b/paddle/operators/maxout_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/maxout_op.h" diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 8932d700c2..411f4d14ef 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -32,7 +32,7 @@ class MeanOp : public framework::OperatorWithKernel { class MeanOpMaker : public framework::OpProtoAndCheckerMaker { public: - MeanOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + MeanOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); AddOutput("Out", "The output of mean op"); @@ -60,13 +60,13 @@ class MeanGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto* grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto* grad_op = new framework::OpDesc(); grad_op->SetType("mean_grad"); grad_op->SetInput("X", Input("X")); grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/mean_op.cu b/paddle/operators/mean_op.cu index 93062bf540..212d448113 100644 --- a/paddle/operators/mean_op.cu +++ b/paddle/operators/mean_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU diff --git a/paddle/operators/merge_lod_tensor_op.cc b/paddle/operators/merge_lod_tensor_op.cc index adc688dbd5..2287f34791 100644 --- a/paddle/operators/merge_lod_tensor_op.cc +++ b/paddle/operators/merge_lod_tensor_op.cc @@ -28,7 +28,11 @@ class MergeLoDTensorOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(dev_place); + auto &x = scope.FindVar(Input("X"))->Get(); auto &mask = scope.FindVar(Input("Mask"))->Get(); auto &in_true = scope.FindVar(Input("InTrue"))->Get(); @@ -114,8 +118,7 @@ class MergeLoDTensorOp : public framework::OperatorBase { class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - MergeLoDTensorOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + MergeLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input LoDTensor, contains complete lod information to " @@ -162,15 +165,15 @@ class MergeLoDTensorGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("split_lod_tensor"); grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetInput("Mask", Input("Mask")); grad_op->SetOutput("OutTrue", InputGrad("InTrue")); grad_op->SetOutput("OutFalse", InputGrad("InFalse")); grad_op->SetAttrMap(Attrs()); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 27f0c8de20..3d7742dd4b 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/minus_op.h" #include "paddle/operators/net_op.h" @@ -46,7 +46,7 @@ class MinusOp : public framework::OperatorWithKernel { class MinusOpMaker : public framework::OpProtoAndCheckerMaker { public: - MinusOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + MinusOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The left tensor of minus operator."); AddInput("Y", "The right tensor of minus operator."); @@ -70,12 +70,11 @@ class MinusGradMaker : public framework::GradOpDescMakerBase { public: using framework::GradOpDescMakerBase::GradOpDescMakerBase; - std::vector> operator()() - const override { - std::vector> ops; + std::vector> operator()() const override { + std::vector> ops; auto x_g = InputGrad("X"); if (!x_g.empty()) { - auto *x_g_op = new framework::OpDescBind(); + auto *x_g_op = new framework::OpDesc(); x_g_op->SetType("scale"); x_g_op->SetInput("X", OutputGrad("Out")); x_g_op->SetOutput("Out", x_g); @@ -85,7 +84,7 @@ class MinusGradMaker : public framework::GradOpDescMakerBase { auto y_g = InputGrad("Y"); if (!y_g.empty()) { - auto *y_g_op = new framework::OpDescBind(); + auto *y_g_op = new framework::OpDesc(); y_g_op->SetType("scale"); y_g_op->SetInput("X", OutputGrad("Out")); y_g_op->SetOutput("Out", y_g); diff --git a/paddle/operators/minus_op.cu b/paddle/operators/minus_op.cu index 3b202ea92e..80cd9f7c16 100644 --- a/paddle/operators/minus_op.cu +++ b/paddle/operators/minus_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/minus_op.h" diff --git a/paddle/operators/minus_op.h b/paddle/operators/minus_op.h index 78e1e1be6d..20760b8cd5 100644 --- a/paddle/operators/minus_op.h +++ b/paddle/operators/minus_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/operators/modified_huber_loss_op.cc index f0a42491bf..f5d69071a8 100644 --- a/paddle/operators/modified_huber_loss_op.cc +++ b/paddle/operators/modified_huber_loss_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/modified_huber_loss_op.h" @@ -39,8 +39,7 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { class ModifiedHuberLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - ModifiedHuberLossOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ModifiedHuberLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of modified huber loss op. " diff --git a/paddle/operators/modified_huber_loss_op.cu b/paddle/operators/modified_huber_loss_op.cu index 40a8447da4..3d2a5562e8 100644 --- a/paddle/operators/modified_huber_loss_op.cu +++ b/paddle/operators/modified_huber_loss_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ -#include -#include +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/modified_huber_loss_op.h b/paddle/operators/modified_huber_loss_op.h index 157ae0682e..6ce86feee5 100644 --- a/paddle/operators/modified_huber_loss_op.h +++ b/paddle/operators/modified_huber_loss_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/momentum_op.cc b/paddle/operators/momentum_op.cc index 2ab48fedec..15b8b80776 100644 --- a/paddle/operators/momentum_op.cc +++ b/paddle/operators/momentum_op.cc @@ -54,8 +54,7 @@ class MomentumOp : public framework::OperatorWithKernel { class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { public: - MomentumOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + MomentumOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor, default Tensor) " diff --git a/paddle/operators/momentum_op.cu b/paddle/operators/momentum_op.cu index 00f1253465..2b9314162e 100644 --- a/paddle/operators/momentum_op.cu +++ b/paddle/operators/momentum_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index bc4a5fdf0b..c923e988a5 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -71,41 +71,52 @@ class MulOpShapeInference : public framework::InferShapeBase { class MulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + MulOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The first input of mul op"); - AddInput("Y", "The second input of mul op"); - AddOutput("Out", "The output of mul op"); + AddInput("X", "(Tensor), The first input tensor of mul op."); + AddInput("Y", "(Tensor), The second input tensor of mul op."); + AddOutput("Out", "(Tensor), The output tensor of mul op."); AddAttr( "x_num_col_dims", - "(int, default 1) " - R"DOC(mul_op can take tensors with more than two dimensions as input `X`, - in that case, tensors will be reshaped to a matrix. The matrix's first - dimension(column length) will be the product of tensor's last - `num_col_dims` dimensions, and the matrix's second dimension(row length) - will be the product of tensor's first `rank - num_col_dims` dimensions. + R"DOC((int, default 1), The mul_op can take tensors with more than two + dimensions as its inputs. If the input $X$ is a tensor with more + than two dimensions, $X$ will be flattened into a two-dimensional + matrix first. The flattening rule is: the first `num_col_dims` + will be flattened to form the first dimension of the final matrix + (the height of the matrix), and the rest `rank(X) - num_col_dims` + dimensions are flattened to form the second dimension of the final + matrix (the width of the matrix). As a result, height of the + flattened matrix is equal to the product of $X$'s first + `x_num_col_dims` dimensions' sizes, and width of the flattened + matrix is equal to the product of $X$'s last `rank(x) - num_col_dims` + dimensions' size. For example, suppose $X$ is a 6-dimensional + tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. + Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = + [24, 30]. )DOC") .SetDefault(1) .EqualGreaterThan(1); AddAttr( "y_num_col_dims", - "(int, default 1) " - R"DOC(mul_op can take tensors with more than two dimensions as input `Y`, - in that case, tensors will be reshaped to a matrix. Just like input `X`. + R"DOC((int, default 1), The mul_op can take tensors with more than two, + dimensions as its inputs. If the input $Y$ is a tensor with more + than two dimensions, $Y$ will be flattened into a two-dimensional + matrix first. The attribute `y_num_col_dims` determines how $Y$ is + flattened. See comments of `x_num_col_dims` for more details. )DOC") .SetDefault(1) .EqualGreaterThan(1); AddComment(R"DOC( -Mul Operator. +Mul Operator. -This operator is used to perform matrix multiplication for input X and Y. +This operator is used to perform matrix multiplication for input $X$ and $Y$. The equation is: - $$Out = X * Y$$ +$$Out = X * Y$$ -Both the input `X` and `Y` can carry the LoD (Level of Details) information, -or not. But the output only shares the LoD information with input `X`. +Both the input $X$ and $Y$ can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD information with input $X$. )DOC"); } diff --git a/paddle/operators/mul_op.cu.cc b/paddle/operators/mul_op.cu.cc index 6095de58d0..43de9a7194 100644 --- a/paddle/operators/mul_op.cu.cc +++ b/paddle/operators/mul_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/mul_op.h" diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index 1b467dca83..1fb0569b49 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - You may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/multiplex_op.cc b/paddle/operators/multiplex_op.cc index b1ee8051c4..11e047b5d5 100644 --- a/paddle/operators/multiplex_op.cc +++ b/paddle/operators/multiplex_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/multiplex_op.h" @@ -51,7 +51,7 @@ class MultiplexOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.MultiInput("X")[0]->type()), @@ -61,8 +61,7 @@ class MultiplexOp : public framework::OperatorWithKernel { class MultiplexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MultiplexOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + MultiplexOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Ids", "The index tensor of multiplex operator."); AddInput("X", "The candidate tensors of multiplex operator.") @@ -103,7 +102,7 @@ class MultiplexGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.MultiInput("X")[0]->type()), diff --git a/paddle/operators/multiplex_op.cu b/paddle/operators/multiplex_op.cu index 47986e9ff8..f49ee71f10 100644 --- a/paddle/operators/multiplex_op.cu +++ b/paddle/operators/multiplex_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/operators/multiplex_op.h" @@ -36,7 +36,7 @@ class MultiplexGPUKernel : public framework::OpKernel { CopyFrom(*ids, platform::CPUPlace(), ctx.device_context(), &index_t_cpu); auto* index = index_t_cpu.data(); auto stream = ctx.cuda_device_context().stream(); - platform::GPUPlace place = boost::get(ctx.GetPlace()); + platform::CUDAPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { int32_t k = index[i]; PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); @@ -73,7 +73,7 @@ class MultiplexGradGPUKernel : public framework::OpKernel { auto* index = index_t_cpu.data(); auto stream = ctx.cuda_device_context().stream(); - platform::GPUPlace place = boost::get(ctx.GetPlace()); + platform::CUDAPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { size_t k = static_cast(index[i]); if (d_ins[k]) { diff --git a/paddle/operators/multiplex_op.h b/paddle/operators/multiplex_op.h index 3443151161..ef66be5556 100644 --- a/paddle/operators/multiplex_op.h +++ b/paddle/operators/multiplex_op.h @@ -1,17 +1,16 @@ - /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/nccl/nccl_gpu_common.cc b/paddle/operators/nccl/nccl_gpu_common.cc index 6be735e4c7..1602a3d9b5 100644 --- a/paddle/operators/nccl/nccl_gpu_common.cc +++ b/paddle/operators/nccl/nccl_gpu_common.cc @@ -1,13 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/nccl/nccl_gpu_common.h" #include "paddle/platform/gpu_info.h" diff --git a/paddle/operators/nccl/nccl_gpu_common.h b/paddle/operators/nccl/nccl_gpu_common.h index 48e322f993..5173996f20 100644 --- a/paddle/operators/nccl/nccl_gpu_common.h +++ b/paddle/operators/nccl/nccl_gpu_common.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/nccl_op.cc b/paddle/operators/nccl_op.cc index 22a37ff1bb..9d51153b06 100644 --- a/paddle/operators/nccl_op.cc +++ b/paddle/operators/nccl_op.cc @@ -1,13 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/operators/nccl/nccl_gpu_common.h" @@ -24,7 +27,7 @@ class NCCLInitOp : public framework::OperatorBase { : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { const auto &name = Output("Communicator"); PADDLE_ENFORCE_NOT_NULL(scope.FindVar(name), "Can not find variable '%s' in the scope.", name); @@ -43,8 +46,7 @@ class NCCLInitOp : public framework::OperatorBase { class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLInitOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + NCCLInitOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddOutput("Communicator", "Create Communicator for communicating between gpus"); @@ -52,7 +54,7 @@ class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); AddComment(R"DOC( NCCLInit Operator. @@ -141,8 +143,7 @@ class NCCLBcastOp : public framework::OperatorWithKernel { // AllreduceOp class NCCLAllReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLAllReduceOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + NCCLAllReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of AllReduce op"); AddInput("Communicator", "Communicator for communicating between gpus"); @@ -163,8 +164,7 @@ AllReduce the input tensors. // ReduceOp class NCCLReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLReduceOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + NCCLReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of Reduce op"); AddInput("Communicator", "Communicator for communicating between gpus"); @@ -190,8 +190,7 @@ Reduce the tensors. // BcastOp class NCCLBcastOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLBcastOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + NCCLBcastOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of BcastSend op"); AddInput("Communicator", "Communicator for communicating between gpus"); diff --git a/paddle/operators/nccl_op.cu.cc b/paddle/operators/nccl_op.cu.cc index 6ca6db7253..1b986a1365 100644 --- a/paddle/operators/nccl_op.cu.cc +++ b/paddle/operators/nccl_op.cu.cc @@ -67,7 +67,7 @@ class NCCLAllReduceKernel : public framework::OpKernel { auto stream = ctx.cuda_device_context().stream(); // device id - int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); + int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); int idx = comm->GetCommId(gpu_id); for (size_t i = 0; i < ins.size(); ++i) { @@ -120,7 +120,7 @@ class NCCLReduceKernel : public framework::OpKernel { ctx.device_context()) .stream(); // device id - int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); + int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); int idx = comm->GetCommId(gpu_id); auto ins_names = ctx.Inputs("X"); @@ -164,7 +164,7 @@ class NCCLBcastKernel : public framework::OpKernel { ctx.device_context()) .stream(); // device id - int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); + int gpu_id = boost::get(ctx.GetPlace()).GetDeviceId(); int idx = comm->GetCommId(gpu_id); if (idx == root) { diff --git a/paddle/operators/nccl_op_test.cu.cc b/paddle/operators/nccl_op_test.cu.cc index d747cc0cf5..34a6e1a58d 100644 --- a/paddle/operators/nccl_op_test.cu.cc +++ b/paddle/operators/nccl_op_test.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include @@ -22,6 +22,7 @@ #include #include "paddle/framework/block_desc.h" +#include "paddle/framework/init.h" #include "paddle/framework/op_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/program_desc.h" @@ -49,9 +50,9 @@ const f::DDim kDims = {100, 100}; class NCCLTester : public ::testing::Test { public: virtual void SetUp() override { - cpu_ctx = new p::CPUDeviceContext(p::CPUPlace()); + paddle::platform::CPUPlace cpu_place; for (size_t i = 0; i < gpu_list.size(); ++i) { - p::GPUPlace place(i); + p::CUDAPlace place(i); dev_ctxs.emplace_back(new p::CUDADeviceContext(place)); } @@ -65,7 +66,8 @@ class NCCLTester : public ::testing::Test { } void NCCLInitOp() { - std::unique_ptr op1(new f::OpDescBind); + paddle::platform::CPUPlace cpu_place; + std::unique_ptr op1(new f::OpDesc); op1->SetType("ncclInit"); op1->SetOutput("Communicator", {"comm"}); @@ -76,17 +78,16 @@ class NCCLTester : public ::testing::Test { auto op = f::OpRegistry::CreateOp(*op1); VLOG(1) << "invoke NCCLInitOp."; - op->Run(g_scope, *cpu_ctx); + op->Run(g_scope, cpu_place); VLOG(1) << "NCCLInitOp finished."; } template - void PerThreadProgram(int gpu_id, const f::OpDescBind &op_desc, - f::Scope *scope) { + void PerThreadProgram(int gpu_id, const f::OpDesc &op_desc, f::Scope *scope) { std::unique_lock lk(mu); - const f::OpDescBind *op1 = &op_desc; + const f::OpDesc *op1 = &op_desc; - p::GPUPlace place(gpu_id); + p::CUDAPlace place(gpu_id); auto &ctx = dev_ctxs.at(gpu_id); auto *send_tensor = scope->Var("st")->GetMutable(); @@ -112,40 +113,39 @@ class NCCLTester : public ::testing::Test { VLOG(1) << "Device : " << gpu_id << " invoke " << op_desc.Type(); VLOG(1) << " send_tensor : " << send_tensor->numel() << " recv_tensor : " << recv_tensor->numel(); - op->Run(*scope, *ctx); + op->Run(*scope, place); VLOG(1) << "Device : " << gpu_id << " finished " << op_desc.Type(); } public: std::vector dev_ctxs; - p::DeviceContext *cpu_ctx; f::Scope g_scope; std::mutex mu; }; // ncclInitOp with desc TEST(NCCL, ncclInitOp) { - std::unique_ptr op_desc(new f::OpDescBind); + std::unique_ptr op_desc(new f::OpDesc); op_desc->SetType("ncclInit"); op_desc->SetOutput("Communicator", {"x1"}); op_desc->SetAttr("gpus", {gpu_list}); f::Scope g_scope; - std::unique_ptr ctx(new p::CPUDeviceContext(p::CPUPlace())); + paddle::platform::CPUPlace cpu_place; auto *var = g_scope.Var("x1"); var->GetMutable(); auto op = f::OpRegistry::CreateOp(*op_desc); VLOG(1) << "invoke NCCLInitOp."; - op->Run(g_scope, *ctx.get()); + op->Run(g_scope, cpu_place); VLOG(1) << "NCCLInitOp finished."; } // ncclAllReduceOp with desc TEST_F(NCCLTester, ncclAllReduceOp) { - std::unique_ptr op2(new f::OpDescBind); + std::unique_ptr op2(new f::OpDesc); op2->SetType("ncclAllReduce"); op2->SetInput("X", {"st"}); op2->SetInput("Communicator", {"comm"}); @@ -171,7 +171,7 @@ TEST_F(NCCLTester, ncclAllReduceOp) { for (size_t i = 0; i < dev_scopes.size(); ++i) { p::CPUPlace cpu_place; - p::GPUPlace gpu_place(gpu_list[i]); + p::CUDAPlace gpu_place(gpu_list[i]); auto &recv_tensor = dev_scopes[i]->FindVar("rt")->Get(); auto *rt = recv_tensor.data(); @@ -180,7 +180,7 @@ TEST_F(NCCLTester, ncclAllReduceOp) { auto *ct = result_tensor->mutable_data(cpu_place); paddle::memory::Copy( - cpu_place, ct, p::GPUPlace(gpu_list[i]), rt, + cpu_place, ct, p::CUDAPlace(gpu_list[i]), rt, recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[i])->stream()); @@ -192,7 +192,7 @@ TEST_F(NCCLTester, ncclAllReduceOp) { // ncclReduceOp with desc TEST_F(NCCLTester, ncclReduceOp) { - std::unique_ptr op2(new f::OpDescBind); + std::unique_ptr op2(new f::OpDesc); const int kRoot = 0; op2->SetType("ncclReduce"); op2->SetInput("X", {"st"}); @@ -219,7 +219,7 @@ TEST_F(NCCLTester, ncclReduceOp) { float result = std::accumulate(gpu_list.begin(), gpu_list.end(), 0); p::CPUPlace cpu_place; - p::GPUPlace gpu_place(gpu_list[kRoot]); + p::CUDAPlace gpu_place(gpu_list[kRoot]); auto &recv_tensor = dev_scopes[kRoot]->FindVar("rt")->Get(); auto *rt = recv_tensor.data(); @@ -229,7 +229,7 @@ TEST_F(NCCLTester, ncclReduceOp) { auto *ct = result_tensor->mutable_data(cpu_place); paddle::memory::Copy( - cpu_place, ct, p::GPUPlace(gpu_list[kRoot]), rt, + cpu_place, ct, p::CUDAPlace(gpu_list[kRoot]), rt, recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[kRoot])->stream()); @@ -240,7 +240,7 @@ TEST_F(NCCLTester, ncclReduceOp) { // ncclBcastOp with desc TEST_F(NCCLTester, ncclBcastOp) { - std::unique_ptr op2(new f::OpDescBind); + std::unique_ptr op2(new f::OpDesc); const int kRoot = 5; op2->SetType("ncclBcast"); op2->SetInput("X", {"st"}); @@ -268,7 +268,7 @@ TEST_F(NCCLTester, ncclBcastOp) { float result = kRoot; p::CPUPlace cpu_place; - p::GPUPlace gpu_place(gpu_list[idx]); + p::CUDAPlace gpu_place(gpu_list[idx]); auto &recv_tensor = dev_scopes[idx]->FindVar("rt")->Get(); auto *rt = recv_tensor.data(); @@ -277,7 +277,7 @@ TEST_F(NCCLTester, ncclBcastOp) { auto *ct = result_tensor->mutable_data(cpu_place); paddle::memory::Copy( - cpu_place, ct, p::GPUPlace(gpu_list[idx]), rt, + cpu_place, ct, p::CUDAPlace(gpu_list[idx]), rt, recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[idx])->stream()); @@ -295,9 +295,18 @@ int main(int argc, char **argv) { return 0; } - for (int i = 0; i < dev_count; ++i) { + std::vector places; + + places.emplace_back(paddle::platform::CPUPlace()); + int count = paddle::platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + places.emplace_back(paddle::platform::CUDAPlace(i)); gpu_list.emplace_back(i); } + + VLOG(0) << " DeviceCount " << count; + paddle::platform::DeviceContextPool::Create(places); + testing::InitGoogleTest(&argc, argv); // device context should be release before scope. diff --git a/paddle/operators/nce_op.cc b/paddle/operators/nce_op.cc index 5ad1610fde..d39ca87d53 100644 --- a/paddle/operators/nce_op.cc +++ b/paddle/operators/nce_op.cc @@ -63,7 +63,7 @@ class NCEOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), @@ -73,7 +73,7 @@ class NCEOp : public framework::OperatorWithKernel { class NCEOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCEOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + NCEOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Input", "(Tensor) A tensor of shape [batch_size, dim]."); AddInput( @@ -166,7 +166,7 @@ class NCEOpGrad : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), diff --git a/paddle/operators/nce_op.h b/paddle/operators/nce_op.h index 6636dad060..e6b496f789 100644 --- a/paddle/operators/nce_op.h +++ b/paddle/operators/nce_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 8935751f15..85d0153b32 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -65,9 +65,9 @@ class NetOp : public framework::OperatorBase { * will be used. */ void Run(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const override { + const platform::Place& place) const override { for (auto& op : ops_) { - op->Run(scope, dev_ctx); + op->Run(scope, place); } } diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index 22fba9568d..dfd86546e8 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -13,8 +13,7 @@ class TestOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; DEFINE_OP_CLONE_METHOD(TestOp); - void Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const override { + void Run(const Scope& scope, const platform::Place& place) const override { ++run_cnt; } }; diff --git a/paddle/operators/batch_norm_op.md b/paddle/operators/op_documentation/batch_norm_op.md similarity index 100% rename from paddle/operators/batch_norm_op.md rename to paddle/operators/op_documentation/batch_norm_op.md diff --git a/paddle/operators/name_convention.md b/paddle/operators/op_documentation/name_convention.md similarity index 96% rename from paddle/operators/name_convention.md rename to paddle/operators/op_documentation/name_convention.md index b5cb176e00..a02b356f05 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/op_documentation/name_convention.md @@ -35,8 +35,8 @@ Here we give some examples to show how these rules will be used. ```c++ class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker { public: - AccumulateOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + AccumulateOpMaker(OpProto *proto, + OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, diff --git a/paddle/operators/net_op_design.md b/paddle/operators/op_documentation/net_op_design.md similarity index 100% rename from paddle/operators/net_op_design.md rename to paddle/operators/op_documentation/net_op_design.md diff --git a/paddle/operators/op_documentation/op_markdown_format.md b/paddle/operators/op_documentation/op_markdown_format.md new file mode 100644 index 0000000000..0ee804d592 --- /dev/null +++ b/paddle/operators/op_documentation/op_markdown_format.md @@ -0,0 +1,64 @@ +# Standard Markdown Format for Operators +The following should be the standard format for documentation for all the operators that will get rendered in the `html`: + +``` +Operator Name (In PaddlePaddle) + +Operator Name (Standard) + +Operator description. + +LaTeX equation of how the operator performs an update. + +The signature of the operator. +``` + +Each section mentioned above has been covered in further detail in the rest of the document. + +# PaddlePaddle Operator Name +This should be in all small letters, in case of multiple words, we separate them with an underscore. For example: +`array to lod tensor` should be written as `array_to_lod_tensor`. + +This naming convention should be standard across all PaddlePaddle operators. + +# Standard Operator Name +This is the standard name of the operator as used in the community. The general standard is usually: +- Standard abbreviations like `SGD` are written in all capital letters. +- Operator names that have multiple words inside a single word use `camelCase` (capitalize word boundaries inside of a word). +- Keep numbers inside a word as is, with no boundary delimiters. +- Follow the name of the operator with the keyword: `Activation Operator.` + +# Operator description +This section should contain the description of what the operator does, including the operation performed, the literature from where it comes and was introduced first, and other important details. The relevant paper/article including the hyperlink should be cited in this section. + +# LaTeX equation +This section should contain an overall equation of the update or operation that the operator performs. The variables used in the equation should follow the naming convention of operators as described [here](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/name_convention.md). Two words in the same word should be separated by an underscore (`_`). + +# The signature +This section describes the signature of the operator. A list of Inputs and Outputs, each of which have a small description of what the variable represents and the type of variable. The variable names follow the `CamelCase` naming convention. The proposed format for this is: +`Section : +VariableName : (VariableType) VariableDescription +... +... +` + + +The following example for an `sgd` operator covers the above mentioned sections as they would ideally look like in the `html`: + +``` +sgd + +SGD operator + +This operator implements one step of the stochastic gradient descent algorithm. + +param_out = param_learning_rate * grad + +Inputs: +Param : (Tensor) Input parameter +LearningRate : (Tensor) Learning rate of SGD +Grad : (Tensor) Input gradient + +Outputs: +ParamOut : (Tensor) Output parameter +``` diff --git a/paddle/operators/rnn_design.md b/paddle/operators/op_documentation/rnn_design.md similarity index 100% rename from paddle/operators/rnn_design.md rename to paddle/operators/op_documentation/rnn_design.md diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 936dde22c3..90c53bd177 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/pad_op.h" @@ -48,7 +48,7 @@ class PadOp : public framework::OperatorWithKernel { class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: - PadOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + PadOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of pad op. " @@ -116,14 +116,14 @@ class PadOpGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto* bind = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto* bind = new framework::OpDesc(); bind->SetInput("X", Input("X")); bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); bind->SetOutput(framework::GradVarName("X"), InputGrad("X")); bind->SetAttrMap(Attrs()); bind->SetType("pad_grad"); - return std::unique_ptr(bind); + return std::unique_ptr(bind); } }; diff --git a/paddle/operators/pad_op.cu b/paddle/operators/pad_op.cu index c309fb625c..433b5f1112 100644 --- a/paddle/operators/pad_op.cu +++ b/paddle/operators/pad_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/pad_op.h" diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 1b95942af3..fdf91a5776 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/pool_cudnn_op.cu.cc b/paddle/operators/pool_cudnn_op.cu.cc index fc2b37bd0f..2d0001ba11 100644 --- a/paddle/operators/pool_cudnn_op.cu.cc +++ b/paddle/operators/pool_cudnn_op.cu.cc @@ -29,7 +29,7 @@ class PoolCudnnOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); const Tensor *input = ctx.Input("X"); Tensor *output = ctx.Output("Out"); @@ -90,7 +90,7 @@ class PoolCudnnGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); const Tensor *input = ctx.Input("X"); const Tensor *output = ctx.Input("Out"); diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index 45fa20280c..50057eb648 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -67,8 +67,7 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } -Pool2dOpMaker::Pool2dOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) +Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", @@ -136,8 +135,7 @@ Example: )DOC"); } -Pool3dOpMaker::Pool3dOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) +Pool3dOpMaker::Pool3dOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input tensor of pooling operator. " diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index ab85d587a3..3860e295f4 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -40,14 +40,12 @@ class PoolOpGrad : public framework::OperatorWithKernel { class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool2dOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker); + Pool2dOpMaker(OpProto* proto, OpAttrChecker* op_checker); }; class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool3dOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker); + Pool3dOpMaker(OpProto* proto, OpAttrChecker* op_checker); }; template diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index 1a2383f8b8..76c5123527 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -69,7 +69,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -90,7 +90,7 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -100,8 +100,7 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxPool2dWithIndexOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + MaxPool2dWithIndexOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", @@ -178,8 +177,7 @@ Example: class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxPool3dWithIndexOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + MaxPool3dWithIndexOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input tensor of pooling operator. " diff --git a/paddle/operators/positive_negative_pair_op.cc b/paddle/operators/positive_negative_pair_op.cc index 4ba40a62ec..a6b23c995b 100644 --- a/paddle/operators/positive_negative_pair_op.cc +++ b/paddle/operators/positive_negative_pair_op.cc @@ -85,7 +85,7 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Score")->type()), @@ -95,8 +95,7 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { class PositiveNegativePairOpMaker : public framework::OpProtoAndCheckerMaker { public: - PositiveNegativePairOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + PositiveNegativePairOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Score", "(Tensor, float) Model Score on an item (with " @@ -155,13 +154,14 @@ class PositiveNegativePairOpMaker : public framework::OpProtoAndCheckerMaker { "Noting that reducing on the first dim will make the LoD info lost.") .SetDefault(0); AddComment(R"DOC( - PositiveNegativePairOp can be used to evaluate Learning To Rank(LTR) - model performance. - Within some context, e.g. the "query", a LTR model generates scores - for a list of items, which gives a partial order of the items. - PositiveNegativePairOp takes a list of reference rank order - (Input("Label")) and the model generated scores (Input(Score)) as - inputs and counts the pairs that ranked correctly and incorrectly. +PositiveNegativePairOp can be used to evaluate Learning To Rank(LTR) model's +performance. + +Within some context, e.g. the "query", a LTR model generates scores for a list +of items, which gives a partial order of the items. PositiveNegativePairOp +takes a list of reference rank order (Input("Label")) and the model generated +scores (Input(Score)) as inputs and counts the pairs that ranked correctly +and incorrectly. )DOC"); } }; diff --git a/paddle/operators/precision_recall_op.cc b/paddle/operators/precision_recall_op.cc index 1ace4f2a59..c5753147ef 100644 --- a/paddle/operators/precision_recall_op.cc +++ b/paddle/operators/precision_recall_op.cc @@ -80,7 +80,7 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("MaxProbs")->type()), @@ -90,8 +90,7 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { class PrecisionRecallOpMaker : public framework::OpProtoAndCheckerMaker { public: - PrecisionRecallOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + PrecisionRecallOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("MaxProbs", "(Tensor, default Tensor) A 2-D tensor with shape N x 1, " diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc index 317a2a4015..ddc21a6570 100644 --- a/paddle/operators/prelu_op.cc +++ b/paddle/operators/prelu_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/prelu_op.h" #include "paddle/operators/net_op.h" @@ -38,7 +38,7 @@ class PReluOp : public framework::OperatorWithKernel { class PReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - PReluOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + PReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of prelu operator."); AddInput("Alpha", "The alpha weight of prelu operator."); diff --git a/paddle/operators/prelu_op.cu b/paddle/operators/prelu_op.cu index 12033dee0e..1718bb5cd6 100644 --- a/paddle/operators/prelu_op.cu +++ b/paddle/operators/prelu_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/prelu_op.h" diff --git a/paddle/operators/proximal_adagrad_op.cc b/paddle/operators/proximal_adagrad_op.cc index cc350f6d26..b92f46b5bd 100644 --- a/paddle/operators/proximal_adagrad_op.cc +++ b/paddle/operators/proximal_adagrad_op.cc @@ -59,8 +59,7 @@ class ProximalAdagradOp : public framework::OperatorWithKernel { class ProximalAdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - ProximalAdagradOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ProximalAdagradOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor, default Tensor) " diff --git a/paddle/operators/proximal_gd_op.cc b/paddle/operators/proximal_gd_op.cc index 0b26beb3ac..2d3bbdaf32 100644 --- a/paddle/operators/proximal_gd_op.cc +++ b/paddle/operators/proximal_gd_op.cc @@ -47,8 +47,7 @@ class ProximalGDOp : public framework::OperatorWithKernel { class ProximalGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - ProximalGDOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ProximalGDOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor, default Tensor) " diff --git a/paddle/operators/rank_loss_op.cc b/paddle/operators/rank_loss_op.cc index b80b175792..f2164a0f80 100644 --- a/paddle/operators/rank_loss_op.cc +++ b/paddle/operators/rank_loss_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/rank_loss_op.h" @@ -45,8 +45,7 @@ class RankLossOp : public framework::OperatorWithKernel { class RankLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - RankLossOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + RankLossOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Label", "(2-D Tensor with shape [batch_size x 1]) " diff --git a/paddle/operators/rank_loss_op.cu b/paddle/operators/rank_loss_op.cu index 5aee66443d..294b227383 100644 --- a/paddle/operators/rank_loss_op.cu +++ b/paddle/operators/rank_loss_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/rank_loss_op.h" diff --git a/paddle/operators/rank_loss_op.h b/paddle/operators/rank_loss_op.h index ea24b61fd9..bd0c49ca6e 100644 --- a/paddle/operators/rank_loss_op.h +++ b/paddle/operators/rank_loss_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 29f9163643..71769e67c7 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include "paddle/framework/executor.h" @@ -25,7 +25,7 @@ constexpr char kOutputs[] = "outputs"; constexpr char kStepScopes[] = "step_scopes"; constexpr char kExStates[] = "ex_states"; constexpr char kStates[] = "states"; -constexpr char kStepBlock[] = "step_block"; +constexpr char kStepBlock[] = "sub_block"; constexpr char kReverse[] = "reverse"; constexpr char kIsTrain[] = "is_train"; #define GRAD_SUFFIX "@GRAD" @@ -227,14 +227,15 @@ class RecurrentOp : public RecurrentBase { : RecurrentBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto seq_len = static_cast(this->GetSequenceLength(scope)); VLOG(3) << "Static RNN input sequence length = " << seq_len; StepScopes scopes = CreateStepScopes(scope, seq_len); auto reverse = Attr(kReverse); - framework::Executor executor(dev_ctx); - auto *block = Attr(kStepBlock); + framework::Executor executor(place); + auto *block = Attr(kStepBlock); + auto *program = block->Program(); for (size_t i = 0; i < seq_len; ++i) { @@ -270,6 +271,10 @@ class RecurrentOp : public RecurrentBase { executor.Run(*program, &cur_scope, block->ID(), false /*create_local_scope*/); + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + // Copy inside::output -> outside::output // outside::output[seq_offset: seq_offset + 1] = inside::output this->LinkTensorWithCallback( @@ -278,14 +283,13 @@ class RecurrentOp : public RecurrentBase { framework::LoDTensor *dst_tensor) { if (i == 0) { // create output tensor at begin dst_tensor->Resize(PrependDims(seq_len, src_tensor.dims())); - dst_tensor->mutable_data(dev_ctx.GetPlace(), src_tensor.type()); + dst_tensor->mutable_data(place, src_tensor.type()); } auto dst_out = dst_tensor->Slice(seq_offset, seq_offset + 1); // Explicit copy output since the local RNN scope can be destroyed // early. - framework::CopyFrom(src_tensor, dev_ctx.GetPlace(), dev_ctx, - &dst_out); + framework::CopyFrom(src_tensor, place, dev_ctx, &dst_out); }); scopes.Next(); @@ -311,15 +315,20 @@ class RecurrentGradOp : public RecurrentBase { : RecurrentBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto seq_len = static_cast(GetSequenceLength(scope)); StepScopes scopes = CreateStepScopes(scope, seq_len); auto reverse = Attr(kReverse); - framework::Executor executor(dev_ctx); - auto *block = Attr(kStepBlock); + framework::Executor executor(place); + auto *block = Attr(kStepBlock); + auto *program = block->Program(); + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + for (size_t step_id = 0; step_id < seq_len; ++step_id) { size_t seq_offset = reverse ? step_id : seq_len - step_id - 1; VLOG(3) << "Recurrent backward operate at the time step " << seq_offset; @@ -366,8 +375,7 @@ class RecurrentGradOp : public RecurrentBase { auto *cur_grad_var = cur_scope.Var(cur_grad); auto cur_grad_tensor = cur_grad_var->GetMutable(); - framework::CopyFrom(ex_tensor, dev_ctx.GetPlace(), dev_ctx, - cur_grad_tensor); + framework::CopyFrom(ex_tensor, place, dev_ctx, cur_grad_tensor); } } @@ -410,7 +418,7 @@ class RecurrentGradOp : public RecurrentBase { auto zero_op = framework::OpRegistry::CreateOp( "fill_constant", framework::VariableNameMap{}, {{"Out", {pg_names[param_id]}}}, attrs); - zero_op->Run(scope, dev_ctx); + zero_op->Run(scope, place); } auto new_inside_name = cur_scope.Rename(inside_grad_name); @@ -419,7 +427,7 @@ class RecurrentGradOp : public RecurrentBase { auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); - sum_op->Run(cur_scope, dev_ctx); + sum_op->Run(cur_scope, place); cur_scope.Rename(new_inside_name, inside_grad_name); } @@ -437,11 +445,11 @@ class RecurrentGradOp : public RecurrentBase { } if (step_id == 0) { // alloc memory outside->Resize(PrependDims(seq_len, inside.dims())); - outside->mutable_data(dev_ctx.GetPlace(), inside.type()); + outside->mutable_data(place, inside.type()); } auto dst = outside->Slice(seq_offset, seq_offset + 1); - framework::CopyFrom(inside, dev_ctx.GetPlace(), dev_ctx, &dst); + framework::CopyFrom(inside, place, dev_ctx, &dst); }); VLOG(5) << "Link outside gradient finished "; @@ -453,8 +461,8 @@ class RecurrentGradOp : public RecurrentBase { [&](const framework::LoDTensor &inside, framework::LoDTensor *outside) { outside->Resize(inside.dims()); - outside->mutable_data(dev_ctx.GetPlace(), inside.type()); - framework::CopyFrom(inside, dev_ctx.GetPlace(), dev_ctx, outside); + outside->mutable_data(place, inside.type()); + framework::CopyFrom(inside, place, dev_ctx, outside); }); VLOG(5) << "Link initialize state gradient finished "; } @@ -483,7 +491,7 @@ class RecurrentGradOp : public RecurrentBase { std::unordered_set LocalVarNames( const framework::Scope &scope) const { - return this->List2Set(scope.GetAllNames(false)); + return this->List2Set(scope.LocalVarNames()); } static std::vector GradVarLists( const std::vector &var_names) { @@ -497,8 +505,7 @@ class RecurrentGradOp : public RecurrentBase { class RecurrentOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - RecurrentOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + RecurrentOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput(kInputs, "rnn inputs").AsDuplicable(); AddInput(kInitialStates, "rnn initial states").AsDuplicable(); @@ -523,8 +530,7 @@ The ex-state means the state value in the ex-timestep or the previous time step string::Sprintf( "The state variable names. [%s, %s, %s] must be the same order", kExStates, kStates, kInitStateGrads)); - AddAttr(kStepBlock, - "The step block inside RNN"); + AddAttr(kStepBlock, "The step block inside RNN"); AddAttr(kReverse, R"DOC(Calculate RNN reversely or not. By default reverse=False @@ -566,13 +572,13 @@ class RecurrentGradOpDescMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - virtual std::unique_ptr Apply() const { - auto *grad = new framework::OpDescBind(); + virtual std::unique_ptr Apply() const { + auto *grad = new framework::OpDesc(); grad->SetType("recurrent_grad"); for (auto &input_param : this->InputNames()) { grad->SetInput(input_param, this->Input(input_param)); grad->SetOutput(framework::GradVarName(input_param), - this->InputGrad(input_param)); + this->InputGrad(input_param, false)); } for (auto &output_param : this->OutputNames()) { @@ -589,7 +595,7 @@ class RecurrentGradOpDescMaker : public framework::SingleGradOpDescMaker { grad->SetAttrMap(this->Attrs()); grad->SetBlockAttr(kStepBlock, *grad_block_[0]); - return std::unique_ptr(grad); + return std::unique_ptr(grad); } }; diff --git a/paddle/operators/recv_op.cc b/paddle/operators/recv_op.cc index eed482c1b4..322f8571cf 100644 --- a/paddle/operators/recv_op.cc +++ b/paddle/operators/recv_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include @@ -24,9 +24,12 @@ #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" +#include "paddle/framework/proto_desc.h" #include "paddle/operators/detail/send_recv_impl.h" #include "paddle/operators/detail/simple_block_queue.h" +#define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" + namespace paddle { namespace operators { @@ -38,7 +41,7 @@ void RunServer(Server **rpc_server, builder.RegisterService(service.get()); std::unique_ptr server(builder.BuildAndStart()); *rpc_server = server.get(); - LOG(INFO) << "Server listening on " << server_address << std::endl; + LOG(INFO) << "Server listening on " << server_address; server->Wait(); } @@ -56,34 +59,94 @@ class RecvOp : public framework::OperatorBase { } } - virtual ~RecvOp() { + void Stop() override { + detail::TensorWithName term_msg; + term_msg.first = LISTEN_TERMINATE_MESSAGE; + rpc_service_->Push(term_msg); rpc_server_->Shutdown(); server_thread_->join(); } + std::string GetGradVarNameForTrainer(const std::string &varname) const { + if (grads_counter_.find(varname) == grads_counter_.end()) { + grads_counter_[varname] = 0; + } + char ret[256]; + snprintf(ret, sizeof(ret), "%s.trainer_%d", varname.c_str(), + grads_counter_[varname]++); + return std::string(ret); + } + void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { - // blocking get one var from client. - const framework::LoDTensor &t = rpc_service_->Get(); + const platform::Place &dev_place) const override { + // FIXME(typhoonzero): no new scopes for every run. framework::Scope &recv_scope = scope.NewScope(); - // set graph input var - auto *var = recv_scope.Var(Input("RX")); - auto *tensor = var->GetMutable(); - // FIXME(typhoonzero): do not copy - framework::CopyFrom(t, dev_ctx.GetPlace(), dev_ctx, tensor); - - std::string program_str = Attr("OptimizeProgram"); - framework::ProgramDesc program_desc; - program_desc.ParseFromString(program_str); - framework::ProgramDescBind program(program_desc); - framework::Executor executor(dev_ctx); - // Run sub graph to get optimized tensor - executor.Run(program, &recv_scope, 0, /*global_block*/ - false /*create_local_scope*/); - - auto *out_var = recv_scope.FindVar("Out"); - // push back - rpc_service_->Push(out_var->Get()); + rpc_service_->SetScope(&recv_scope); + auto param_list = Attr>("ParamList"); + auto grad_list = Attr>("GradList"); + auto trainer_count = Attr("Trainers"); + size_t param_count = param_list.size(); + rpc_service_->Reset(); + // TODO(typhoonzero): change this to a while_op for every cluster-batch. + bool exit_flag = false; + while (!exit_flag) { + // Get from multiple trainers, we don't care about order in which + // the gradient arrives, just add suffix 0~n then average the gradient. + for (size_t i = 0; i < param_count * trainer_count; ++i) { + // blocking get one var from client. + const detail::TensorWithName &v = rpc_service_->Get(); + auto grad_var_name = v.first; + if (grad_var_name == LISTEN_TERMINATE_MESSAGE) { + exit_flag = true; + break; + } + auto it = std::find(grad_list.begin(), grad_list.end(), grad_var_name); + std::string param_var_name; + if (it != grad_list.end()) { + param_var_name = param_list[it - grad_list.begin()]; + } else { + LOG(ERROR) << "grad have no paired param found!"; + } + VLOG(3) << "recved grad: " << grad_var_name + << " updating param: " << param_var_name; + auto *merged_grad = recv_scope.FindVar(grad_var_name); + if (merged_grad == nullptr) { + // create output of merged var. + auto merged_var = recv_scope.Var(grad_var_name); + merged_var->GetMutable(); + } + + if (trainer_count > 1) { + grad_var_name = this->GetGradVarNameForTrainer(grad_var_name); + } + + auto *var = recv_scope.Var(grad_var_name); + auto *tensor = var->GetMutable(); + // FIXME(typhoonzero): do not copy + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(dev_place); + framework::CopyFrom(v.second, dev_place, dev_ctx, tensor); + } + if (exit_flag) { + break; + } + rpc_service_->Reset(); + + std::string program_str = Attr("OptimizeProgram"); + framework::proto::ProgramDesc program_desc; + program_desc.ParseFromString(program_str); + framework::ProgramDesc program(program_desc); + framework::Executor executor(dev_place); + // Run sub graph to get optimized tensor + try { + executor.Run(program, &recv_scope, 0, /*global_block*/ + false /*create_local_scope*/, false /*create_vars*/); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + rpc_service_->Done(); + grads_counter_.clear(); + } // while(true) } protected: @@ -93,13 +156,14 @@ class RecvOp : public framework::OperatorBase { // grpc send/recv service implement to register. std::shared_ptr rpc_service_; std::shared_ptr server_thread_; + mutable std::unordered_map grads_counter_; }; class RecvOpMaker : public framework::OpProtoAndCheckerMaker { public: - RecvOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + RecvOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("RX", "(Tensor) Input tensor to be saved"); + AddInput("RX", "(Tensor) Input tensor to be optimized").AsDuplicable(); AddComment(R"DOC( Recv operator @@ -112,6 +176,17 @@ This operator will recv tensor from send_op .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr("OptimizeProgram", "type string", "Serialized ProgramDesc string for recv to run."); + AddAttr>( + "ParamList", "type list of string", + "grad->param name mapping to find which param to optimize.") + .SetDefault({}); + AddAttr>( + "GradList", "type list of string", + "grad->param name mapping to find which param to optimize.") + .SetDefault({}); + AddAttr("Trainers", "type int", + "Number of trainers in the current cluster job") + .SetDefault(1); } }; diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index b754637bf2..a3ff4a6ca0 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/reduce_op.h" #include "paddle/operators/net_op.h" @@ -37,18 +37,23 @@ class ReduceOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_LT( dim, x_rank, "The dim should be in the range [-rank(input), rank(input))."); - bool keep_dim = ctx->Attrs().Get("keep_dim"); - auto dims_vector = vectorize(x_dims); - if (keep_dim || x_rank == 1) { - dims_vector[dim] = 1; + bool reduce_all = ctx->Attrs().Get("reduce_all"); + if (reduce_all) { + ctx->SetOutputDim("Out", {1}); } else { - dims_vector.erase(dims_vector.begin() + dim); - } - auto out_dims = framework::make_ddim(dims_vector); - ctx->SetOutputDim("Out", out_dims); - if (dim != 0) { - // Only pass LoD when not reducing on the first dim. - ctx->ShareLoD("X", /*->*/ "Out"); + bool keep_dim = ctx->Attrs().Get("keep_dim"); + auto dims_vector = vectorize(x_dims); + if (keep_dim || x_rank == 1) { + dims_vector[dim] = 1; + } else { + dims_vector.erase(dims_vector.begin() + dim); + } + auto out_dims = framework::make_ddim(dims_vector); + ctx->SetOutputDim("Out", out_dims); + if (dim != 0) { + // Only pass LoD when not reducing on the first dim. + ctx->ShareLoD("X", /*->*/ "Out"); + } } } }; @@ -78,7 +83,7 @@ class ReduceGradOp : public framework::OperatorWithKernel { class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReduceOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + ReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input tensor. Tensors with rank at most 6 are " @@ -95,11 +100,16 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) " "If true, retain the reduced dimension with length 1.") .SetDefault(false); + AddAttr("reduce_all", + "(bool, default false) " + "If true, output a scalar reduced along all dimensions.") + .SetDefault(false); comment_ = R"DOC( {ReduceOp} Operator. This operator computes the {reduce} of input tensor along the given dimension. The result tensor has 1 fewer dimension than the input unless keep_dim is true. +If reduce_all is true, just reduce along all dimensions and output a scalar. )DOC"; AddComment(comment_); @@ -125,8 +135,7 @@ The result tensor has 1 fewer dimension than the input unless keep_dim is true. class ReduceSumOpMaker : public ReduceOpMaker { public: - ReduceSumOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ReduceSumOpMaker(OpProto *proto, OpAttrChecker *op_checker) : ReduceOpMaker(proto, op_checker) { SetComment("ReduceSum", "sum"); AddComment(comment_); @@ -135,8 +144,7 @@ class ReduceSumOpMaker : public ReduceOpMaker { class ReduceMeanOpMaker : public ReduceOpMaker { public: - ReduceMeanOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ReduceMeanOpMaker(OpProto *proto, OpAttrChecker *op_checker) : ReduceOpMaker(proto, op_checker) { SetComment("ReduceMean", "mean"); AddComment(comment_); @@ -145,8 +153,7 @@ class ReduceMeanOpMaker : public ReduceOpMaker { class ReduceMaxOpMaker : public ReduceOpMaker { public: - ReduceMaxOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ReduceMaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) : ReduceOpMaker(proto, op_checker) { SetComment("ReduceMax", "max"); AddComment(comment_); @@ -155,8 +162,7 @@ class ReduceMaxOpMaker : public ReduceOpMaker { class ReduceMinOpMaker : public ReduceOpMaker { public: - ReduceMinOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ReduceMinOpMaker(OpProto *proto, OpAttrChecker *op_checker) : ReduceOpMaker(proto, op_checker) { SetComment("ReduceMin", "min"); AddComment(comment_); diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index a10ace5253..1dd948ed8a 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/reduce_op.h" diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index 47ce910f28..da5f397776 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -26,10 +26,12 @@ using DDim = framework::DDim; template using EigenTensor = framework::EigenTensor; - template using EigenScalar = framework::EigenScalar; +template +using EigenVector = framework::EigenVector; struct SumFunctor { template @@ -95,26 +97,41 @@ template class ReduceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - int rank = context.Input("X")->dims().size(); - switch (rank) { - case 1: - ReduceCompute<1>(context); - break; - case 2: - ReduceCompute<2>(context); - break; - case 3: - ReduceCompute<3>(context); - break; - case 4: - ReduceCompute<4>(context); - break; - case 5: - ReduceCompute<5>(context); - break; - case 6: - ReduceCompute<6>(context); - break; + bool reduce_all = context.Attr("reduce_all"); + if (reduce_all) { + // Flatten and reduce 1-D tensor + auto* input = context.Input("X"); + auto* output = context.Output("Out"); + output->mutable_data(context.GetPlace()); + auto x = EigenVector::Flatten(*input); + auto out = EigenScalar::From(*output); + auto& place = + *context.template device_context().eigen_device(); + auto reduce_dim = Eigen::array({{0}}); + Functor functor; + functor(place, x, out, reduce_dim); + } else { + int rank = context.Input("X")->dims().size(); + switch (rank) { + case 1: + ReduceCompute<1>(context); + break; + case 2: + ReduceCompute<2>(context); + break; + case 3: + ReduceCompute<3>(context); + break; + case 4: + ReduceCompute<4>(context); + break; + case 5: + ReduceCompute<5>(context); + break; + case 6: + ReduceCompute<6>(context); + break; + } } } @@ -157,26 +174,46 @@ template class ReduceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - int rank = context.Input("X")->dims().size(); - switch (rank) { - case 1: - ReduceGradCompute<1>(context); - break; - case 2: - ReduceGradCompute<2>(context); - break; - case 3: - ReduceGradCompute<3>(context); - break; - case 4: - ReduceGradCompute<4>(context); - break; - case 5: - ReduceGradCompute<5>(context); - break; - case 6: - ReduceGradCompute<6>(context); - break; + bool reduce_all = context.Attr("reduce_all"); + if (reduce_all) { + auto* input0 = context.Input("X"); + auto* input1 = context.Input("Out"); + auto* input2 = context.Input(framework::GradVarName("Out")); + auto* output = context.Output(framework::GradVarName("X")); + output->mutable_data(context.GetPlace()); + auto x = EigenVector::Flatten(*input0); + auto x_reduce = EigenVector::From(*input1); + auto x_reduce_grad = EigenVector::From(*input2); + auto x_grad = EigenVector::Flatten(*output); + auto& place = + *context.template device_context().eigen_device(); + auto broadcast_dim = + Eigen::array({{static_cast(input0->numel())}}); + Functor functor; + functor(place, x, x_reduce, x_grad, x_reduce_grad, broadcast_dim, + broadcast_dim[0]); + } else { + int rank = context.Input("X")->dims().size(); + switch (rank) { + case 1: + ReduceGradCompute<1>(context); + break; + case 2: + ReduceGradCompute<2>(context); + break; + case 3: + ReduceGradCompute<3>(context); + break; + case 4: + ReduceGradCompute<4>(context); + break; + case 5: + ReduceGradCompute<5>(context); + break; + case 6: + ReduceGradCompute<6>(context); + break; + } } } diff --git a/paddle/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/operators/reorder_lod_tensor_by_rank_op.cc new file mode 100644 index 0000000000..1063388e25 --- /dev/null +++ b/paddle/operators/reorder_lod_tensor_by_rank_op.cc @@ -0,0 +1,235 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/lod_rank_table.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/detail/safe_ref.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { + +class ReorderLoDTensorByRankTableOpProtoMaker + : public framework::OpProtoAndCheckerMaker { + public: + ReorderLoDTensorByRankTableOpProtoMaker(OpProto *proto, + OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(LoDTensor) the input lod tensor need to be reordered."); + AddInput("RankTable", + "(LoDRankTable) the rank table that input need follow"); + AddOutput("Out", "(LoDTensor) reordered lod tensor"); + AddComment(R"DOC(ReorderLoDTensorByRankTable + +Reorder the input X by the rank of `RankTable`. If `RankTable` is ordered by +index [3, 0, 2, 1]. Input X will reorder its sequence, the third sequence of +X will be the first sequence of Output. + +NOTE: The RankTable does not need to be calculated by X. + +For example: +The X = [Seq0, Seq1, Seq2, Seq3]. The indices of RankTable are [3, 0, 2, 1]. + +The Out = [Seq3, Seq0, Seq2, Seq1] with correct LoD information. +)DOC"); + } +}; + +class ReorderLoDTensorByRankTableBase : public framework::OperatorBase { + public: + ReorderLoDTensorByRankTableBase(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void Run(const framework::Scope &scope, + const platform::Place &place) const override { + auto &x = + detail::Ref(scope.FindVar(Input("X")), + "Cannot find input lod tensor variable %s", Input("X")) + .Get(); + auto &rank_table = detail::Ref(scope.FindVar(Input("RankTable")), + "Cannot find input rank table variable %s", + Input("RankTable")) + .Get(); + auto &out = + *detail::Ref(scope.FindVar(Output("Out")), + "Cannot find output lod tensor variable %s", Output("Out")) + .GetMutable(); + + out.Resize(x.dims()); + out.mutable_data(x.place(), x.type()); + this->process(place, x, rank_table, &out); + } + + protected: + virtual void process(const platform::Place &place, + const framework::LoDTensor &x, + const framework::LoDRankTable &rank_table, + framework::LoDTensor *out) const = 0; + + struct AbsoluteRankTableItem { + size_t offset; // the absolute/accumulated offset. + size_t length; // the length + framework::LoD lod; + }; + + std::vector GetAbsoluteOffsetAndLengthByLoDRankTable( + const framework::LoDTensor &x) const { + std::vector absolute_table; + size_t level = 0; + size_t size = x.lod()[level].size(); + + for (size_t i = 0; i < size - 1; ++i) { + auto lod_offset = + framework::GetSubLoDAndAbsoluteOffset(x.lod(), i, i + 1, level); + + auto &offset = lod_offset.second; + + absolute_table.emplace_back(); + absolute_table.back().length = offset.second - offset.first; + absolute_table.back().offset = offset.first; + absolute_table.back().lod = lod_offset.first; + } + return absolute_table; + } + + size_t CopyTensorAndLod(const platform::Place &place, + const AbsoluteRankTableItem &item, + const framework::LoDTensor &x, + framework::LoDTensor *out, size_t out_offset) const { + auto &out_lod = *out->mutable_lod(); + auto len = item.length; + auto x_offset = item.offset; + + if (out_lod.empty()) { + for (size_t i = 0; i < item.lod.size(); ++i) { + out_lod.push_back(std::vector({0})); + } + } + + for (size_t i = 0; i < out_lod.size(); ++i) { + auto &out_v = out_lod[i]; + auto &new_lod_v = item.lod[i]; + + for (auto &detail : new_lod_v) { + out_v.push_back(out_v.back() + detail); + } + } + + auto x_sliced = x.Slice(x_offset, x_offset + len); + auto out_sliced = out->Slice(out_offset, out_offset + len); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + framework::CopyFrom(x_sliced, out_sliced.place(), dev_ctx, &out_sliced); + out_offset += len; + return out_offset; + } +}; + +class ReorderLoDTensorByRankTableOp : public ReorderLoDTensorByRankTableBase { + public: + ReorderLoDTensorByRankTableOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : ReorderLoDTensorByRankTableBase(type, inputs, outputs, attrs) {} + + protected: + void process(const platform::Place &place, const framework::LoDTensor &x, + const framework::LoDRankTable &rank_table, + framework::LoDTensor *out) const override { + auto absolute_table = GetAbsoluteOffsetAndLengthByLoDRankTable(x); + size_t out_offset = 0; + out->mutable_lod()->clear(); + for (auto &item : rank_table.items()) { + PADDLE_ENFORCE_LT(item.index, absolute_table.size()); + out_offset = CopyTensorAndLod(place, absolute_table[item.index], x, out, + out_offset); + } + } +}; + +class IdentityInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + context->SetOutputDim("Out", context->GetInputDim("X")); + } +}; + +class ReorderLodTensorByRankGradOpMaker + : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("reorder_lod_tensor_by_rank_grad"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetInput("RankTable", Input("RankTable")); + return std::unique_ptr(grad_op); + } +}; + +class ReorderLoDTensorByRankGradOp : public ReorderLoDTensorByRankTableBase { + public: + ReorderLoDTensorByRankGradOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : ReorderLoDTensorByRankTableBase(type, inputs, outputs, attrs) {} + + protected: + void process(const platform::Place &place, const framework::LoDTensor &x, + const framework::LoDRankTable &rank_table, + framework::LoDTensor *out) const override { + auto absolute_table = GetAbsoluteOffsetAndLengthByLoDRankTable(x); + + // offsets = enumerate([item.index for item in rank_table.items()]) + std::vector> offsets; + offsets.reserve(rank_table.items().size()); + for (size_t i = 0; i < rank_table.items().size(); ++i) { + offsets.push_back({i, rank_table.items()[i].index}); + } + + // offsets.sort(key=lambda x: x[1]) + std::sort( + offsets.begin(), offsets.end(), + [](const std::pair &a, + const std::pair &b) { return a.second < b.second; }); + + // Copy TensorAndLod + size_t out_offset = 0; + for (auto &offset : offsets) { + out_offset = this->CopyTensorAndLod(place, absolute_table[offset.first], + x, out, out_offset); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(reorder_lod_tensor_by_rank, + ops::ReorderLoDTensorByRankTableOp, + ops::ReorderLodTensorByRankGradOpMaker, + ops::ReorderLoDTensorByRankTableOpProtoMaker, + ops::IdentityInferShape); +REGISTER_OPERATOR(reorder_lod_tensor_by_rank_grad, + ops::ReorderLoDTensorByRankGradOp, ops::IdentityInferShape); diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 39bf2118d6..58e8fd6124 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/reshape_op.h" @@ -34,21 +34,33 @@ class ReshapeOp : public framework::OperatorWithKernel { auto shape = ctx->Attrs().Get>("shape"); PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); auto x_dims = ctx->GetInputDim("X"); - // TODO(qiao) change batch_size - for (size_t i = 1; i < shape.size(); ++i) { - PADDLE_ENFORCE(shape[i] > 0, - "Each dimension of Attr(shape) " - "must be positive except the first one."); - } - if (shape[0] < 0) { - shape[0] = x_dims[0]; + + std::vector neg_dims_idx; + // set some dimension to -1 if it is unknown + const int unknown_size = -1; + for (size_t i = 0; i < shape.size(); ++i) { + PADDLE_ENFORCE(shape[i] > 0 || shape[i] == unknown_size, + "Each dimension of Attr(shape) must be positive or %d.", + unknown_size); + if (shape[i] == unknown_size) { + neg_dims_idx.push_back(i); + PADDLE_ENFORCE(neg_dims_idx.size() <= 1, + "Only one dimension of Attr(shape) can be unknown."); + } } - // capacity check + int64_t capacity = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); int64_t in_size = framework::product(x_dims); - PADDLE_ENFORCE_EQ(capacity, in_size, - "The size of Input(X) mismatches with Attr(shape)."); + if (neg_dims_idx.size() == 1) { + // dim infer + shape[neg_dims_idx[0]] = in_size / (-capacity); + // recalculate capacity + capacity = shape[neg_dims_idx[0]] * (-capacity); + } + // capacity check + PADDLE_ENFORCE(capacity == in_size, + "The size of Input(X) mismatches with Attr(shape)."); // resize output std::vector shape_int64(shape.size(), 0); std::transform(shape.begin(), shape.end(), shape_int64.begin(), @@ -65,8 +77,7 @@ class ReshapeOp : public framework::OperatorWithKernel { class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReshapeOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ReshapeOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of reshape operator."); AddOutput("Out", "The output tensor of reshape operator."); @@ -84,10 +95,13 @@ Given a 2-D tensor X with 2 rows and 2 columns [[1, 2], [3, 4]] and target shape = [1, 4], the reshape operator will transform -the tensor X into a 1-D tensor: +the tensor X into a 2-D tensor: - [1, 2, 3, 4] + [[1, 2, 3, 4]] +One dimension in the target shape can be set -1, representing that its +size is unknown. In this case, the real dimension will be infered from +the original shape of Input(X) and other dimensions in the target shape. )DOC"); } }; diff --git a/paddle/operators/reshape_op.cu b/paddle/operators/reshape_op.cu index b7329238c0..f487e43b99 100644 --- a/paddle/operators/reshape_op.cu +++ b/paddle/operators/reshape_op.cu @@ -1,22 +1,22 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/reshape_op.h" REGISTER_OP_CUDA_KERNEL( reshape, - paddle::operators::ReshapeKernel); + paddle::operators::ReshapeKernel); REGISTER_OP_CUDA_KERNEL( reshape_grad, - paddle::operators::ReshapeGradKernel); + paddle::operators::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 92d8cbbb56..a4eb34a0ad 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index fc3f9b8988..f7c250bf91 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -63,8 +63,7 @@ class RmspropOp : public framework::OperatorWithKernel { class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { public: - RmspropOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + RmspropOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor, default Tensor) " diff --git a/paddle/operators/rmsprop_op.cu b/paddle/operators/rmsprop_op.cu index 2a9fd6e104..0295dc262f 100644 --- a/paddle/operators/rmsprop_op.cu +++ b/paddle/operators/rmsprop_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/rmsprop_op.h" diff --git a/paddle/operators/rnn_memory_helper_op.cc b/paddle/operators/rnn_memory_helper_op.cc index 3a035f0b9a..eb55ed6a05 100644 --- a/paddle/operators/rnn_memory_helper_op.cc +++ b/paddle/operators/rnn_memory_helper_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" @@ -25,7 +25,7 @@ class RNNMemoryHelperOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto mem_var_name = Input("X"); auto *mem_var = scope.FindVar(mem_var_name); PADDLE_ENFORCE(mem_var != nullptr, @@ -57,15 +57,14 @@ class RNNMemoryHelperOpShapeInference : public framework::InferShapeBase { class RNNMemoryHelperOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - RNNMemoryHelperOpInfoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + RNNMemoryHelperOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", ""); AddOutput("Out", ""); AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); AddComment(""); } }; @@ -78,7 +77,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto out_grad_var_name = Input(framework::GradVarName("Out")); auto *out_grad_var = scope.FindVar(out_grad_var_name); @@ -101,7 +100,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { auto zero_op = framework::OpRegistry::CreateOp( "fill_constant", {}, {{"Out", {in_grad_var_name}}}, attrs); - zero_op->Run(scope, dev_ctx); + zero_op->Run(scope, dev_place); } else { auto &out_grad_tensor = out_grad_var->Get(); auto *in_grad_tensor = in_grad_var->GetMutable(); @@ -114,8 +113,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { class RNNMemoryHelperGradOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - RNNMemoryHelperGradOpInfoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + RNNMemoryHelperGradOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput(framework::GradVarName("Out"), ""); AddInput("X", ""); @@ -124,7 +122,7 @@ class RNNMemoryHelperGradOpInfoMaker AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); AddComment(""); } }; diff --git a/paddle/operators/roi_pool_op.cc b/paddle/operators/roi_pool_op.cc index 75fcea8401..ef1804d976 100644 --- a/paddle/operators/roi_pool_op.cc +++ b/paddle/operators/roi_pool_op.cc @@ -68,7 +68,7 @@ class ROIPoolOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -89,7 +89,7 @@ class ROIPoolGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -99,8 +99,7 @@ class ROIPoolGradOp : public framework::OperatorWithKernel { class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { public: - ROIPoolOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ROIPoolOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor), " diff --git a/paddle/operators/row_conv_op.cc b/paddle/operators/row_conv_op.cc index 5203a5079c..68f4e35315 100644 --- a/paddle/operators/row_conv_op.cc +++ b/paddle/operators/row_conv_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/row_conv_op.h" #include "paddle/framework/eigen.h" @@ -76,8 +76,7 @@ class RowConvGradOp : public framework::OperatorWithKernel { class RowConvOpMaker : public framework::OpProtoAndCheckerMaker { public: - RowConvOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + RowConvOpMaker(OpProto *proto, OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor), the input(X) is a LodTensor, which supports " diff --git a/paddle/operators/row_conv_op.cu b/paddle/operators/row_conv_op.cu index 56a98ff299..41f2c5b9de 100644 --- a/paddle/operators/row_conv_op.cu +++ b/paddle/operators/row_conv_op.cu @@ -1,16 +1,16 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/math/math_function.h" #include "paddle/operators/row_conv_op.h" diff --git a/paddle/operators/row_conv_op.h b/paddle/operators/row_conv_op.h index 80912ad8f7..10d435ab08 100644 --- a/paddle/operators/row_conv_op.h +++ b/paddle/operators/row_conv_op.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/save_load_op_test.cc b/paddle/operators/save_load_op_test.cc index a57466a48d..40103d864f 100644 --- a/paddle/operators/save_load_op_test.cc +++ b/paddle/operators/save_load_op_test.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "gtest/gtest.h" #include "paddle/framework/op_registry.h" @@ -21,7 +21,7 @@ USE_NO_KERNEL_OP(load); TEST(SaveLoadOp, CPU) { paddle::framework::Scope scope; paddle::platform::CPUPlace place; - paddle::platform::CPUDeviceContext ctx(place); + auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); tensor->Resize({10, 10}); @@ -42,13 +42,13 @@ TEST(SaveLoadOp, CPU) { auto save_op = paddle::framework::OpRegistry::CreateOp( "save", {{"X", {"test_var"}}}, {}, attrs); - save_op->Run(scope, ctx); + save_op->Run(scope, place); auto load_var = scope.Var("out_var"); auto target = load_var->GetMutable(); auto load_op = paddle::framework::OpRegistry::CreateOp( "load", {}, {{"Out", {"out_var"}}}, attrs); - load_op->Run(scope, ctx); + load_op->Run(scope, place); int* actual = target->data(); for (int64_t i = 0; i < tensor->numel(); ++i) { EXPECT_EQ(expect[i], actual[i]); diff --git a/paddle/operators/save_op.cc b/paddle/operators/save_op.cc index d4921cb80c..d045a8b5b8 100644 --- a/paddle/operators/save_op.cc +++ b/paddle/operators/save_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include @@ -21,6 +21,7 @@ #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -62,7 +63,7 @@ class SaveOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); @@ -88,14 +89,18 @@ class SaveOp : public framework::OperatorBase { "SaveOp only support LoDTensor, %s has wrong type", iname); auto &tensor = var->Get(); + + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + framework::SerializeToStream(fout, tensor, dev_ctx); } }; class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SaveOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + SaveOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor ) Input tensor to be saved"); AddComment(R"DOC( diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index d848be823e..f634ebe9a2 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/scale_op.h" #include "paddle/operators/net_op.h" @@ -38,7 +38,7 @@ class ScaleOp : public framework::OperatorWithKernel { template class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScaleOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + ScaleOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) Input tensor of scale operator."); AddOutput("Out", "(Tensor) Output tensor of scale operator."); @@ -58,13 +58,13 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("scale"); grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttr("scale", GetAttr("scale")); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/scale_op.cu b/paddle/operators/scale_op.cu index 0c7980430f..7202c0de70 100644 --- a/paddle/operators/scale_op.cu +++ b/paddle/operators/scale_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/scale_op.h" diff --git a/paddle/operators/scale_op.h b/paddle/operators/scale_op.h index 02a8c97a83..395268c2ee 100644 --- a/paddle/operators/scale_op.h +++ b/paddle/operators/scale_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/scatter.cu.h b/paddle/operators/scatter.cu.h index d95436be4f..55555300fc 100644 --- a/paddle/operators/scatter.cu.h +++ b/paddle/operators/scatter.cu.h @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/tensor.h" diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index 573bbcd187..806dccc6ca 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -49,7 +49,7 @@ class ScatterOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Ref")->type()), @@ -68,7 +68,7 @@ class ScatterGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Ref")->type()), @@ -78,8 +78,7 @@ class ScatterGradOp : public framework::OperatorWithKernel { class ScatterOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScatterOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + ScatterOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Ref", "The source input of scatter op"); AddInput("Index", diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu index 6b43a1389f..0c198d2258 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/scatter_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "gather.cu.h" #include "paddle/operators/gather_op.h" diff --git a/paddle/operators/send_op.cc b/paddle/operators/send_op.cc index a3059847f2..6e82938683 100644 --- a/paddle/operators/send_op.cc +++ b/paddle/operators/send_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include @@ -34,45 +34,62 @@ class SendOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) { // init client when the operator is created at runtime. - if (!client_) { - std::string endpoint = Attr("endpoint"); - client_.reset(new detail::RPCClient( - grpc::CreateChannel(endpoint, grpc::InsecureChannelCredentials()))); - // TODO(typhoonzero): how to call InitVariables + std::vector endpoints = + Attr>("endpoints"); + for (auto ep : endpoints) { + client_map_[ep].reset(new detail::RPCClient( + grpc::CreateChannel(ep, grpc::InsecureChannelCredentials()))); } } + void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { - auto iname = Input("X"); - auto oname = Output("Out"); - // TODO(typhoonzero): currently it's non-blocking, - // should block until server responds. - bool ret = client_->SendVariable(scope, iname, oname); - if (!ret) { - LOG(ERROR) << "send variable error"; + const platform::Place &dev_place) const override { + auto ins = Inputs("X"); + auto outs = Outputs("Out"); + std::vector epmap = Attr>("epmap"); + // TODO(typhoonzero): use async calls to send multiple variable asyncly. + for (size_t i = 0; i < ins.size(); ++i) { + bool ret = client_map_[epmap[i]]->SendVariable(scope, ins[i]); + if (!ret) { + LOG(ERROR) << "send variable error: " << ins[i]; + } + } + // TODO(typhoonzero): support async optimization + client_map_[epmap[0]]->Wait(); + for (size_t i = 0; i < outs.size(); ++i) { + bool ret = client_map_[epmap[i]]->GetVariable(scope, outs[i]); + if (!ret) { + LOG(ERROR) << "GetVariable error: " << outs[i]; + } } } protected: - std::shared_ptr client_{nullptr}; + mutable std::unordered_map> + client_map_; }; class SendOpMaker : public framework::OpProtoAndCheckerMaker { public: - SendOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + SendOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor) Input tensor to be saved"); - AddOutput("Out", "(Tensor) Output fetched from server"); + AddInput("X", "(Tensor) Input tensor to be send").AsDuplicable(); + AddOutput("Out", "(Tensor) Output tensor to get from server") + .AsDuplicable(); AddComment(R"DOC( Recv operator This operator will recv tensor from send_op )DOC"); - AddAttr("endpoint", - "(string, default 127.0.0.1:6164)" - "IP address to listen on.") - .SetDefault("127.0.0.1:6164") - .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr>("endpoints", + "(string vector, default 127.0.0.1:6164)" + "Server endpoints to send variables to.") + .SetDefault({}); + AddAttr>("epmap", + "(string vector, default 127.0.0.1:6164)" + "Server endpoints in the order of input " + "variables for mapping") + .SetDefault({}); } }; diff --git a/paddle/operators/send_recv_op_test.cc b/paddle/operators/send_recv_op_test.cc index 3e2e2051af..108e2dec6b 100644 --- a/paddle/operators/send_recv_op_test.cc +++ b/paddle/operators/send_recv_op_test.cc @@ -1,27 +1,26 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -// TODO(typhoonzero): add python bindings for this test as -// a RemoteOptimizer. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include +#include #include #include "gtest/gtest.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" #include "paddle/framework/program_desc.h" +#include "paddle/string/printf.h" USE_NO_KERNEL_OP(send); USE_NO_KERNEL_OP(recv); @@ -33,30 +32,33 @@ std::unique_ptr recv_op; void InitTensorsInScope(paddle::framework::Scope &scope, paddle::platform::CPUPlace &place) { paddle::platform::CPUDeviceContext ctx(place); - auto var = scope.Var("X"); - auto tensor = var->GetMutable(); - tensor->Resize({10, 10}); - float *expect = tensor->mutable_data(place); - for (int64_t i = 0; i < tensor->numel(); ++i) { - expect[i] = static_cast(i); + for (int i = 0; i < 2; ++i) { + auto var_name = paddle::string::Sprintf("x%d", i); + auto var = scope.Var(var_name); + auto tensor = var->GetMutable(); + tensor->Resize({10, 10}); + float *expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(i); + } } auto out_var = scope.Var("Out"); auto out_tensor = out_var->GetMutable(); out_tensor->Resize({10, 10}); - tensor->mutable_data(place); // allocate + out_tensor->mutable_data(place); // allocate } void AddOp(const std::string &type, const paddle::framework::VariableNameMap &inputs, const paddle::framework::VariableNameMap &outputs, paddle::framework::AttributeMap attrs, - paddle::framework::BlockDescBind *block) { + paddle::framework::BlockDesc *block) { // insert output for (auto kv : outputs) { for (auto v : kv.second) { auto var = block->Var(v); - var->SetDataType(paddle::framework::DataType::FP32); + var->SetDataType(paddle::framework::proto::DataType::FP32); } } @@ -78,21 +80,22 @@ void StartServerNet() { InitTensorsInScope(scope, place); // sub program run in recv_op, for simple test we use sum - paddle::framework::ProgramDescBind program; - paddle::framework::BlockDescBind *block = program.MutableBlock(0); + paddle::framework::ProgramDesc program; + paddle::framework::BlockDesc *block = program.MutableBlock(0); // X for server side tensors, RX for received tensers, must be of same shape. - AddOp("sum", {{"X", {"X", "RX"}}}, {{"Out", {"Out"}}}, {}, block); + AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"x0"}}}, {}, block); paddle::framework::AttributeMap attrs; attrs.insert({"endpoint", std::string("127.0.0.1:6174")}); + attrs.insert({"ParamList", std::vector({"x0"})}); + attrs.insert({"GradList", std::vector({"x1"})}); std::string program_proto; PADDLE_ENFORCE(program.Proto()->SerializeToString(&program_proto)); attrs.insert({"OptimizeProgram", program_proto}); - recv_op = paddle::framework::OpRegistry::CreateOp("recv", {{"RX", {"RX"}}}, - {{"Out", {"Out"}}}, attrs); - paddle::platform::CPUDeviceContext ctx(place); - recv_op->Run(scope, ctx); + recv_op = paddle::framework::OpRegistry::CreateOp("recv", {{"RX", {"x1"}}}, + {}, attrs); + recv_op->Run(scope, place); } TEST(SendRecvOp, CPU) { @@ -104,25 +107,25 @@ TEST(SendRecvOp, CPU) { InitTensorsInScope(scope, place); paddle::framework::AttributeMap attrs; - attrs.insert({"endpoint", std::string("127.0.0.1:6174")}); - + attrs.insert({"endpoints", std::vector({"127.0.0.1:6174"})}); + attrs.insert({"epmap", std::vector({"127.0.0.1:6174"})}); auto send_op = paddle::framework::OpRegistry::CreateOp( - "send", {{"X", {"X"}}}, {{"Out", {"Out"}}}, attrs); - paddle::platform::CPUDeviceContext ctx(place); - send_op->Run(scope, ctx); + "send", {{"X", {"x1"}}}, {{"Out", {"x0"}}}, attrs); + send_op->Run(scope, place); - auto in_var = scope.Var("X"); + auto in_var = scope.Var("x1"); auto tensor = in_var->GetMutable(); float *expected = tensor->data(); - - auto out_var = scope.Var("Out"); + auto out_var = scope.Var("x0"); auto target = out_var->GetMutable(); - // send fail cause output is none. + // x1 * 2 == x0 EXPECT_NE(target->memory_size(), size_t(0)); float *actual = target->data(); for (int64_t i = 0; i < target->numel(); ++i) { EXPECT_EQ(expected[i] * 2, actual[i]); } - recv_op.reset(); // dtor can shutdown and join server thread. + + recv_op->Stop(); server_thread.join(); + // recv_op.reset(); } diff --git a/paddle/operators/seq_expand_op.cu b/paddle/operators/seq_expand_op.cu deleted file mode 100644 index 8e67ce9ccb..0000000000 --- a/paddle/operators/seq_expand_op.cu +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#define EIGEN_USE_GPU -#include "paddle/operators/seq_expand_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - seq_expand, - ops::SeqExpandKernel); -REGISTER_OP_CUDA_KERNEL( - seq_expand_grad, - ops::SeqExpandGradKernel); diff --git a/paddle/operators/sequence_concat_op.cc b/paddle/operators/sequence_concat_op.cc index 9c7e5456e8..2f0aad2003 100644 --- a/paddle/operators/sequence_concat_op.cc +++ b/paddle/operators/sequence_concat_op.cc @@ -43,8 +43,7 @@ class SequenceConcatOp : public framework::OperatorWithKernel { class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceConcatOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SequenceConcatOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LodTensorArray) Input is a vector of LoDTensor, " @@ -68,12 +67,12 @@ class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { "The level should be less than the level number of inputs.") .SetDefault(0); AddComment(R"DOC( -The sequence_concat operator concatenates multiple LoDTensors. -It only supports sequence (LoD Tensor with level number is 1) +The sequence_concat operator concatenates multiple LoDTensors. +It only supports sequence (LoD Tensor with level number is 1) or a nested sequence (LoD tensor with level number is 2) as its input. - Case1: If the axis is other than 0(here, axis is 1 and level is 1), - each input should have the same LoD information and the LoD + each input should have the same LoD information and the LoD information of the output keeps the same as the input. LoD(x0) = {{0,2,4}, {0,1,2,3,4}}; Dims(x0) = (4,3,4) @@ -81,7 +80,7 @@ or a nested sequence (LoD tensor with level number is 2) as its input. LoD(Out) = {{0,2,4}, {0,1,2,3,4}}; Dims(Out) = (4,7,4) - Case2: - If the axis is 0(here, leve is 0), the inputs are concatenated along + If the axis is 0(here, leve is 0), the inputs are concatenated along time steps, the LoD information of the output need to re-compute. The LoD information of level-1 should be same. @@ -125,8 +124,9 @@ class SequenceConcatGradOp : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sequence_concat, ops::SequenceConcatOp, ops::SequenceConcatOpMaker, - sequence_concat_grad, ops::SequenceConcatGradOp); +REGISTER_OP_EX(sequence_concat, ops::SequenceConcatOp, + ops::SequenceConcatOpMaker, sequence_concat_grad, + ops::SequenceConcatGradOp, false); REGISTER_OP_CPU_KERNEL( sequence_concat, ops::SequenceConcatOpKernel); diff --git a/paddle/operators/sequence_conv_op.cc b/paddle/operators/sequence_conv_op.cc index f5c4f1c133..c5b7c81bd7 100644 --- a/paddle/operators/sequence_conv_op.cc +++ b/paddle/operators/sequence_conv_op.cc @@ -100,8 +100,7 @@ class SequenceConvGradOp : public framework::OperatorWithKernel { class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceConvOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SequenceConvOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", diff --git a/paddle/operators/sequence_conv_op.cu.cc b/paddle/operators/sequence_conv_op.cu.cc index eacba79ace..0b8f2c6955 100644 --- a/paddle/operators/sequence_conv_op.cu.cc +++ b/paddle/operators/sequence_conv_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/sequence_conv_op.h" diff --git a/paddle/operators/seq_expand_op.cc b/paddle/operators/sequence_expand_op.cc similarity index 72% rename from paddle/operators/seq_expand_op.cc rename to paddle/operators/sequence_expand_op.cc index ede9754697..b40ec617e4 100644 --- a/paddle/operators/seq_expand_op.cc +++ b/paddle/operators/sequence_expand_op.cc @@ -1,25 +1,25 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ -#include "paddle/operators/seq_expand_op.h" +#include "paddle/operators/sequence_expand_op.h" namespace paddle { namespace operators { using framework::Tensor; -class SeqExpandOp : public framework::OperatorWithKernel { +class SequenceExpandOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -35,25 +35,24 @@ class SeqExpandOp : public framework::OperatorWithKernel { } }; -class SeqExpandOpMaker : public framework::OpProtoAndCheckerMaker { +class SequenceExpandOpMaker : public framework::OpProtoAndCheckerMaker { public: - SeqExpandOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SequenceExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor or LoDTensor) The input(X) of this operator can be a " "LoDTensor or a base Tensor."); AddInput("Y", - "(LoDTensor)The reference input(Y) of seq_expand op." + "(LoDTensor)The reference input(Y) of sequence_expand op." "It must be a LoDTensor with k-level(k>0)." "The input(X) will be expanded according to LOD of input(Y)." "The element numbers of last level in input(Y) " "must be equal to dims[0] of input(X)."); AddOutput("Out", - "(LodTensor)The output of seq_expand op." + "(LodTensor)The output of sequence_expand op." "The lod of output will be as same as input(Y)'s lod."); AddComment(R"DOC( -Seq Expand Operator. +Sequence Expand Operator. This operator expands input(X) according to LOD of input(Y). Following are cases to better explain how this works: @@ -124,7 +123,7 @@ then we get 2-level LoDTensor } }; -class SeqExpandOpGrad : public framework::OperatorWithKernel { +class SequenceExpandOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -146,11 +145,11 @@ class SeqExpandOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(seq_expand, ops::SeqExpandOp, ops::SeqExpandOpMaker, - seq_expand_grad, ops::SeqExpandOpGrad); +REGISTER_OP(sequence_expand, ops::SequenceExpandOp, ops::SequenceExpandOpMaker, + sequence_expand_grad, ops::SequenceExpandOpGrad); REGISTER_OP_CPU_KERNEL( - seq_expand, - ops::SeqExpandKernel); + sequence_expand, + ops::SequenceExpandKernel); REGISTER_OP_CPU_KERNEL( - seq_expand_grad, - ops::SeqExpandGradKernel); + sequence_expand_grad, + ops::SequenceExpandGradKernel); diff --git a/paddle/operators/sequence_expand_op.cu b/paddle/operators/sequence_expand_op.cu new file mode 100644 index 0000000000..0b9638b2ce --- /dev/null +++ b/paddle/operators/sequence_expand_op.cu @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/sequence_expand_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + sequence_expand, + ops::SequenceExpandKernel); +REGISTER_OP_CUDA_KERNEL( + sequence_expand_grad, + ops::SequenceExpandGradKernel); diff --git a/paddle/operators/seq_expand_op.h b/paddle/operators/sequence_expand_op.h similarity index 83% rename from paddle/operators/seq_expand_op.h rename to paddle/operators/sequence_expand_op.h index fbee0db454..2ba628e9c3 100644 --- a/paddle/operators/seq_expand_op.h +++ b/paddle/operators/sequence_expand_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -24,7 +24,7 @@ namespace operators { using LoDTensor = framework::LoDTensor; template -class SeqExpandKernel : public framework::OpKernel { +class SequenceExpandKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); @@ -71,7 +71,7 @@ class SeqExpandKernel : public framework::OpKernel { * * */ template -class SeqExpandGradKernel : public framework::OpKernel { +class SequenceExpandGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* d_out = context.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/operators/sequence_pool_op.cc index 3526e45a1b..aea98744d8 100644 --- a/paddle/operators/sequence_pool_op.cc +++ b/paddle/operators/sequence_pool_op.cc @@ -37,8 +37,7 @@ class SequencePoolOp : public framework::OperatorWithKernel { class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequencePoolOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SequencePoolOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor) The variable-length input of SequencePoolOp"); AddOutput("Out", @@ -50,7 +49,7 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { .AsIntermediate(); AddAttr( "pooltype", - "(int, default AVERAGE) the pooling pooltype of SequencePoolOp.") + "(string, default 'AVERAGE') the pooling pooltype of SequencePoolOp.") .SetDefault("AVERAGE") .InEnum({"AVERAGE", "SUM", "SQRT", "LAST", "FIRST", "MAX"}); AddComment(R"DOC( @@ -108,7 +107,7 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), diff --git a/paddle/operators/sequence_pool_op.cu b/paddle/operators/sequence_pool_op.cu index fcd6508435..265f695935 100644 --- a/paddle/operators/sequence_pool_op.cu +++ b/paddle/operators/sequence_pool_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU diff --git a/paddle/operators/sequence_slice_op.cc b/paddle/operators/sequence_slice_op.cc index 481db8f9e5..98bd885490 100644 --- a/paddle/operators/sequence_slice_op.cc +++ b/paddle/operators/sequence_slice_op.cc @@ -48,7 +48,7 @@ class SequenceSliceOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -69,7 +69,7 @@ class SequenceSliceGradOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -79,8 +79,7 @@ class SequenceSliceGradOp : public framework::OperatorWithKernel { class SequenceSliceOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceSliceOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SequenceSliceOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor), " diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc index 37d5452e6b..b74766f012 100644 --- a/paddle/operators/sequence_softmax_op.cc +++ b/paddle/operators/sequence_softmax_op.cc @@ -33,8 +33,7 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceSoftmaxOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SequenceSoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor) 1-D or 2-D input LoDTensor with the 2-nd dimension " @@ -51,10 +50,14 @@ input Tensor can be either [N, 1] or [N], where N is the sum of the length of all sequences. The algorithm works as follows: + for i-th sequence in a mini-batch: - $$Out(X[lod[i]:lod[i+1]], :) = - \frac{\exp(X[lod[i]:lod[i+1], :])} - {\sum(\exp(X[lod[i]:lod[i+1], :]))}$$ + +$$ +Out(X[lod[i]:lod[i+1]], :) = \ +\frac{\exp(X[lod[i]:lod[i+1], :])} \ +{\sum(\exp(X[lod[i]:lod[i+1], :]))} +$$ For example, for a mini-batch of 3 sequences with variable-length, each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7], diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 121bf60b27..a11c9624ce 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -43,7 +43,7 @@ class SGDOp : public framework::OperatorWithKernel { class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - SGDOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Param", "(Tensor) Input parameter"); AddInput("LearningRate", "(Tensor) Learning rate of SGD"); @@ -61,43 +61,9 @@ $$param\_out = param - learning\_rate * grad$$ } }; -template -struct SparseSGDFunctor { - void operator()(const platform::CPUDeviceContext& context, - const framework::SelectedRows& input, - const framework::Tensor& learning_rate, - framework::Tensor* output) { - auto in_height = input.height(); - auto out_dims = output->dims(); - PADDLE_ENFORCE_EQ(in_height, out_dims[0]); - - auto& in_value = input.value(); - auto& in_rows = input.rows(); - - int64_t in_row_numel = in_value.numel() / in_rows.size(); - PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height); - - auto* in_data = in_value.data(); - auto* out_data = output->data(); - auto* lr = learning_rate.data(); - - for (size_t i = 0; i < in_rows.size(); i++) { - for (int64_t j = 0; j < in_row_numel; j++) { - out_data[in_rows[i] * in_row_numel + j] -= - lr[0] * in_data[i * in_row_numel + j]; - } - } - } -}; - -template struct SparseSGDFunctor; -template struct SparseSGDFunctor; - } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(sgd, ops::SGDOp, ops::SGDOpMaker); -REGISTER_OP_CPU_KERNEL( - sgd, ops::SGDOpKernel, - ops::SGDOpKernel); +REGISTER_OP_CPU_KERNEL(sgd, ops::SGDOpKernel, ops::SGDOpKernel); diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu index a3c0db7e50..42f8f8b2f0 100644 --- a/paddle/operators/sgd_op.cu +++ b/paddle/operators/sgd_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/sgd_op.h" @@ -20,6 +20,19 @@ namespace paddle { namespace operators { namespace { + +template +__global__ void SGDKernel(const T* g, const T* p, const T* learning_rate, + const int num, T* p_out) { + T lr = learning_rate[0]; + int grid_size = blockDim.x * gridDim.x; + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += grid_size) { + T g_data = g[i]; + T p_data = p[i]; + p_out[i] = p_data - lr * g_data; + } +} + template __global__ void SparseSGDFunctorKernel(const T* selected_rows, const int64_t* rows, @@ -41,40 +54,65 @@ __global__ void SparseSGDFunctorKernel(const T* selected_rows, } // namespace template -struct SparseSGDFunctor { - void operator()(const platform::CUDADeviceContext& context, - const framework::SelectedRows& input, - const framework::Tensor& learning_rate, - framework::Tensor* output) { - auto in_height = input.height(); - auto out_dims = output->dims(); - PADDLE_ENFORCE_EQ(in_height, out_dims[0]); - - auto& in_value = input.value(); - auto& in_rows = input.rows(); - - int64_t in_row_numel = in_value.numel() / in_rows.size(); - PADDLE_ENFORCE_EQ(in_row_numel, output->numel() / in_height); - - auto* in_data = in_value.data(); - auto* out_data = output->data(); - - const int block_size = 256; - dim3 threads(block_size, 1); - dim3 grid(1, in_rows.size()); - SparseSGDFunctorKernel<<>>( - in_data, in_rows.data(), learning_rate.data(), out_data, - in_row_numel); +class SGDOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* param = ctx.Input("Param"); + auto* param_out = ctx.Output("ParamOut"); + auto* learning_rate = ctx.Input("LearningRate"); + + auto* grad_var = ctx.InputVar("Grad"); + // Actually, all tensors are LoDTensor except SelectedRows. + if (grad_var->IsType()) { + param_out->mutable_data(ctx.GetPlace()); + auto* grad = ctx.Input("Grad"); + auto* grad_data = grad->data(); + auto* param_data = param->data(); + auto* param_out_data = param_out->data(); + + int block = 512; + int grid = (param->numel() + block - 1) / block; + + SGDKernel<<>>( + grad_data, param_data, learning_rate->data(), param->numel(), + param_out_data); + + } else if (grad_var->IsType()) { + // TODO(qijun): In Sparse SGD operator, in-place update is enforced. + // This manual optimization brings difficulty to track data dependency. + // It's better to find a more elegant solution. + PADDLE_ENFORCE_EQ(param, param_out); + auto* grad = ctx.Input("Grad"); + + auto in_height = grad->height(); + auto out_dims = param_out->dims(); + PADDLE_ENFORCE_EQ(in_height, out_dims[0]); + + auto& in_value = grad->value(); + auto& in_rows = grad->rows(); + + int64_t in_row_numel = in_value.numel() / in_rows.size(); + PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); + + auto* in_data = in_value.data(); + auto* out_data = param_out->data(); + + const int block_size = 256; + dim3 threads(block_size, 1); + dim3 grid(1, in_rows.size()); + SparseSGDFunctorKernel< + T, 256><<>>( + in_data, in_rows.data(), learning_rate->data(), out_data, + in_row_numel); + + } else { + PADDLE_THROW("Unsupported Variable Type of Grad"); + } } }; - -template struct SparseSGDFunctor; -template struct SparseSGDFunctor; - } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - sgd, ops::SGDOpKernel, - ops::SGDOpKernel); +REGISTER_OP_CUDA_KERNEL(sgd, ops::SGDOpCUDAKernel, + ops::SGDOpCUDAKernel); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index c920025a91..a6c544591e 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -20,15 +20,7 @@ limitations under the License. */ namespace paddle { namespace operators { -template -struct SparseSGDFunctor { - void operator()(const DeviceContext& context, - const framework::SelectedRows& input, - const framework::Tensor& learning_rate, - framework::Tensor* output); -}; - -template +template class SGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -45,21 +37,36 @@ class SGDOpKernel : public framework::OpKernel { auto p = framework::EigenVector::Flatten(*param); auto g = framework::EigenVector::Flatten(*grad); auto o = framework::EigenVector::Flatten(*param_out); - auto lr = framework::EigenVector::Flatten(*learning_rate); - auto& place = - *ctx.template device_context().eigen_device(); + auto* lr = learning_rate->data(); - Eigen::DSizes grad_dsize(grad->numel()); - o.device(place) = p - lr.broadcast(grad_dsize) * g; + o = p - lr[0] * g; } else if (grad_var->IsType()) { // TODO(qijun): In Sparse SGD operator, in-place update is enforced. // This manual optimization brings difficulty to track data dependency. // It's better to find a more elegant solution. PADDLE_ENFORCE_EQ(param, param_out); auto* grad = ctx.Input("Grad"); - SparseSGDFunctor functor; - functor(ctx.template device_context(), *grad, - *learning_rate, param_out); + + auto in_height = grad->height(); + auto out_dims = param_out->dims(); + PADDLE_ENFORCE_EQ(in_height, out_dims[0]); + + auto& in_value = grad->value(); + auto& in_rows = grad->rows(); + + int64_t in_row_numel = in_value.numel() / in_rows.size(); + PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); + + auto* in_data = in_value.data(); + auto* out_data = param_out->data(); + auto* lr = learning_rate->data(); + + for (size_t i = 0; i < in_rows.size(); i++) { + for (int64_t j = 0; j < in_row_numel; j++) { + out_data[in_rows[i] * in_row_numel + j] -= + lr[0] * in_data[i * in_row_numel + j]; + } + } } else { PADDLE_THROW("Unsupported Variable Type of Grad"); } diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/operators/shrink_rnn_memory_op.cc index c380e60686..e8a4773547 100644 --- a/paddle/operators/shrink_rnn_memory_op.cc +++ b/paddle/operators/shrink_rnn_memory_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/lod_rank_table.h" #include "paddle/operators/array_operator.h" #include "paddle/operators/math/math_function.h" @@ -27,11 +27,11 @@ class ShrinkRNNMemoryOp : public ArrayOp { : ArrayOp(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto *x_var = scope.FindVar(Input("X")); PADDLE_ENFORCE(x_var != nullptr, "Input X must be set"); auto &x_tensor = x_var->Get(); - size_t offset = this->GetOffset(scope, dev_ctx); + size_t offset = this->GetOffset(scope, place); auto *rank_table_var = scope.FindVar(Input("RankTable")); PADDLE_ENFORCE(rank_table_var != nullptr, "RankTable must be set"); auto &rank_table = rank_table_var->Get(); @@ -54,8 +54,7 @@ class ShrinkRNNMemoryOp : public ArrayOp { class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ShrinkRNNMemoryOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ShrinkRNNMemoryOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor) The RNN step memory to be shrinked."); AddInput("RankTable", "(LoDRankTable) The lod_rank_table of dynamic RNN."); @@ -94,7 +93,7 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { : ArrayOp(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out"))); auto *dx_var = scope.FindVar(Output(framework::GradVarName("X"))); PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr"); @@ -106,6 +105,10 @@ class ShrinkRNNMemoryGradOp : public ArrayOp { dx_tensor.Resize(x_tensor.dims()); dx_tensor.mutable_data(x_tensor.place(), x_tensor.type()); + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + if (dout_var == nullptr) { // dx_tensor fill zero math::set_constant(dev_ctx, &dx_tensor, 0.0f); } else { @@ -137,14 +140,14 @@ class ShrinkRNNGradOpMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *op = new framework::OpDesc(); op->SetType("shrink_rnn_memory_grad"); op->SetInput("X", Input("X")); op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); op->SetOutput(framework::GradVarName("X"), InputGrad("X")); op->SetAttrMap(Attrs()); - return std::unique_ptr(op); + return std::unique_ptr(op); } }; diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc index b8a1bf122a..c526a88a12 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/sigmoid_cross_entropy_with_logits_op.h" @@ -86,8 +86,8 @@ class SigmoidCrossEntropyWithLogitsGradOp class SigmoidCrossEntropyWithLogitsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SigmoidCrossEntropyWithLogitsOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SigmoidCrossEntropyWithLogitsOpMaker(OpProto* proto, + OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape N x D, " diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu index 1b569c93ed..3f393265f4 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/sigmoid_cross_entropy_with_logits_op.h" diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h index 8fe7c5ba82..b78bcc436e 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" diff --git a/paddle/operators/sign_op.cc b/paddle/operators/sign_op.cc index d5a7ccb77e..f63eaa4464 100644 --- a/paddle/operators/sign_op.cc +++ b/paddle/operators/sign_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/sign_op.h" @@ -34,7 +34,7 @@ class SignOp : public framework::OperatorWithKernel { template class SignOpMaker : public framework::OpProtoAndCheckerMaker { public: - SignOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + SignOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) Input tensor of sign operator."); AddOutput("Out", "(Tensor) Output tensor of sign operator."); @@ -50,13 +50,13 @@ class SignGradMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("scale"); grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttr("scale", 0.0f); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/sign_op.cu b/paddle/operators/sign_op.cu index 9bc1c65d21..f224880cff 100644 --- a/paddle/operators/sign_op.cu +++ b/paddle/operators/sign_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/sign_op.h" diff --git a/paddle/operators/sign_op.h b/paddle/operators/sign_op.h index 2e476ed665..9fe49ae1a2 100644 --- a/paddle/operators/sign_op.h +++ b/paddle/operators/sign_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/operators/smooth_l1_loss_op.cc index 56e8d9058f..dcb18d729d 100644 --- a/paddle/operators/smooth_l1_loss_op.cc +++ b/paddle/operators/smooth_l1_loss_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/smooth_l1_loss_op.h" @@ -47,8 +47,7 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { template class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { public: - SmoothL1LossOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SmoothL1LossOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor, default Tensor) A tensor with rank at least 2. " diff --git a/paddle/operators/smooth_l1_loss_op.cu b/paddle/operators/smooth_l1_loss_op.cu index 8e94ebac64..213429bc37 100644 --- a/paddle/operators/smooth_l1_loss_op.cu +++ b/paddle/operators/smooth_l1_loss_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU diff --git a/paddle/operators/smooth_l1_loss_op.h b/paddle/operators/smooth_l1_loss_op.h index 1a70c9c63c..3facfae116 100644 --- a/paddle/operators/smooth_l1_loss_op.h +++ b/paddle/operators/smooth_l1_loss_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 0988c83d43..e7306bc5f1 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -24,25 +24,24 @@ class SoftmaxOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SoftmaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Y"), - "Output(Y) of SoftmaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SoftmaxOp should not be null."); auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE(x_dims.size() == 2UL, "The input of softmax op must be a matrix."); - ctx->SetOutputDim("Y", x_dims); + ctx->SetOutputDim("Out", x_dims); } }; class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input tensor of softmax. " "2-D with shape [batch_size, input_feature_dimensions]."); - AddOutput("Y", "The normalized values with the same shape as X."); + AddOutput("Out", "The normalized values with the same shape as X."); AddComment(R"DOC( Softmax Operator. @@ -60,7 +59,7 @@ exponential values of all the other dimensions is the output of the softmax operator. For each row $i$ and each column $j$ in Input(X), we have: - $$Y[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$ + $$Out[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$ )DOC"); } @@ -71,12 +70,12 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), - "Input(Y@GRAD) should be not null."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Y"), - ctx->GetInputDim(framework::GradVarName("Y")), - "Input(Y) and its gradients should have a same shape."); + PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) should be not null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should be not null."); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Out"), + ctx->GetInputDim(framework::GradVarName("Out")), + "Input(Out) and its gradients should have a same shape."); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } diff --git a/paddle/operators/softmax_op.cu.cc b/paddle/operators/softmax_op.cu.cc index 7b9882cbcf..e7da40f3e8 100644 --- a/paddle/operators/softmax_op.cu.cc +++ b/paddle/operators/softmax_op.cu.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/softmax_op.h" diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 0f8998b99e..63e379a3b3 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -26,13 +26,13 @@ class SoftmaxKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); - auto* Y = context.Output("Y"); + auto* Out = context.Output("Out"); // allocate memory on device. - Y->mutable_data(context.GetPlace()); + Out->mutable_data(context.GetPlace()); math::SoftmaxFunctor()( - context.template device_context(), X, Y); + context.template device_context(), X, Out); } }; @@ -40,15 +40,15 @@ template class SoftmaxGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* Y = context.Input("Y"); - auto* dY = context.Input(framework::GradVarName("Y")); + auto* Out = context.Input("Out"); + auto* dOut = context.Input(framework::GradVarName("Out")); auto* dX = context.Output(framework::GradVarName("X")); // allocate memory on device. dX->mutable_data(context.GetPlace()); math::SoftmaxGradFunctor()( - context.template device_context(), Y, dY, dX); + context.template device_context(), Out, dOut, dX); } }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 0c30228863..41e65b701e 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -1,10 +1,10 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,8 +20,7 @@ namespace operators { class SoftmaxWithCrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxWithCrossEntropyOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SoftmaxWithCrossEntropyOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Logits", "(Tensor, default: Tensor), The unscaled log probabilities " @@ -119,7 +118,7 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("Logits")->type()), @@ -160,7 +159,7 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType( @@ -174,8 +173,8 @@ class SoftmaxGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto* grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto* grad_op = new framework::OpDesc(); grad_op->SetType("softmax_with_cross_entropy_grad"); grad_op->SetInput("Label", Input("Label")); grad_op->SetInput("Softmax", Output("Softmax")); @@ -184,7 +183,7 @@ class SoftmaxGradMaker : public framework::SingleGradOpDescMaker { grad_op->SetInput(framework::GradVarName("Loss"), OutputGrad("Loss")); grad_op->SetOutput(framework::GradVarName("Logits"), InputGrad("Logits")); grad_op->SetAttrMap(Attrs()); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index 6100c63f9a..61583c6161 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -1,10 +1,10 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index 9c3431605b..6bde0f37e0 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -1,10 +1,10 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/paddle/operators/split_lod_tensor_op.cc b/paddle/operators/split_lod_tensor_op.cc index f164a47711..89826ca6ee 100644 --- a/paddle/operators/split_lod_tensor_op.cc +++ b/paddle/operators/split_lod_tensor_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/memory/memcpy.h" +#include "paddle/platform/device_context.h" namespace paddle { namespace operators { @@ -33,7 +34,7 @@ class SplitLoDTensorOp : public framework::OperatorBase { const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { auto &x = scope.FindVar(Input("X"))->Get(); auto &mask = scope.FindVar(Input("Mask"))->Get(); auto *out_true = @@ -44,6 +45,9 @@ class SplitLoDTensorOp : public framework::OperatorBase { auto &x_lod = x.lod(); auto &mask_dim = mask.dims(); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(dev_place); + std::unique_ptr cpu_mask{new framework::LoDTensor()}; if (platform::is_cpu_place(mask.place())) { cpu_mask->ShareDataWith(mask); @@ -118,8 +122,7 @@ class SplitLoDTensorOp : public framework::OperatorBase { class SplitLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SplitLoDTensorOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + SplitLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input LoDTensor"); AddInput("Mask", "A bool column vector which mask the input"); @@ -164,8 +167,8 @@ class SplitLoDTensorArrayGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("merge_lod_tensor"); grad_op->SetInput("InTrue", OutputGrad("OutTrue")); grad_op->SetInput("InFalse", OutputGrad("OutFalse")); @@ -173,7 +176,7 @@ class SplitLoDTensorArrayGradMaker : public framework::SingleGradOpDescMaker { grad_op->SetInput("X", Input("X")); grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttrMap(Attrs()); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc index 275b25e96a..4dfae043cb 100644 --- a/paddle/operators/split_op.cc +++ b/paddle/operators/split_op.cc @@ -65,7 +65,7 @@ class SplitOp : public framework::OperatorWithKernel { class SplitOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + SplitOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) Input tensor of the split operator."); AddOutput("Out", "(Tensor) Output tensors of the split operator.") @@ -108,13 +108,13 @@ class SplitGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto op = new framework::OpDesc(); op->SetType("concat"); op->SetInput("X", OutputGrad("Out")); op->SetOutput("Out", InputGrad("X")); op->SetAttrMap(Attrs()); - return std::unique_ptr(op); + return std::unique_ptr(op); } }; diff --git a/paddle/operators/spp_op.cc b/paddle/operators/spp_op.cc new file mode 100644 index 0000000000..c0aa87b0f0 --- /dev/null +++ b/paddle/operators/spp_op.cc @@ -0,0 +1,99 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/spp_op.h" +namespace paddle { +namespace operators { + +class SppOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SppOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "(Tensor) The input tensor of spp operator. " + "The format of input tensor is NCHW. Where N is batch size, C is the " + "number of channels, H and W is the height and width of feature."); + AddOutput("Out", + "(Tensor) The output tensor of spp operator." + "N * M." + "M = C * H * W"); + AddAttr("pyramid_height", "(int), multi level pooling"); + AddAttr( + "pooling_type", + "(string), pooling type, can be \"max\" for max-pooling " + "and \"avg\" for average-pooling.") + .InEnum({"max", "avg"}); + AddComment(R"DOC( + "With spatial pyramid pooling, the input image can + be of any sizes. This not only allows arbitrary aspect + ratios, but also allows arbitrary scales. We can resize + the input image to any scale (e.g., min(w, h)=180, 224, + ...) and apply the same deep network. When the + input image is at different scales, the network (with + the same filter sizes) will extract features at different + scales. The scales play important roles in traditional + methods. + Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Output shape: $(H_{out}, W_{out})$ + Where + $$ + H_{out} = N \\ + W_{out} = (((4^pyramid_height) - 1) / (4 - 1))$ * C_{in} + $$ + paper https://arxiv.org/pdf/1406.4729v4.pdf + )DOC"); + } +}; + +class SppOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SppOp" + "should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SppOp should not be null."); + auto in_x_dims = ctx->GetInputDim("X"); + int pyramid_height = ctx->Attrs().Get("pyramid_height"); + PADDLE_ENFORCE(in_x_dims.size() == 4, + "Spping intput must be of 4-dimensional."); + int outlen = ((std::pow(4, pyramid_height) - 1) / (4 - 1)) * in_x_dims[1]; + std::vector output_shape({in_x_dims[0], outlen}); + ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); + } +}; + +class SppOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), + "Input(X@GRAD) should not be null."); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(spp, ops::SppOp, ops::SppOpMaker, spp_grad, ops::SppOpGrad); +REGISTER_OP_CPU_KERNEL( + spp, ops::SppKernel, + ops::SppKernel); +REGISTER_OP_CPU_KERNEL( + spp_grad, ops::SppGradKernel, + ops::SppGradKernel); diff --git a/paddle/operators/spp_op.cu.cc b/paddle/operators/spp_op.cu.cc new file mode 100644 index 0000000000..761e4d6c4a --- /dev/null +++ b/paddle/operators/spp_op.cu.cc @@ -0,0 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/spp_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + spp, ops::SppKernel, + ops::SppKernel); +REGISTER_OP_CUDA_KERNEL( + spp_grad, ops::SppGradKernel, + ops::SppGradKernel); diff --git a/paddle/operators/spp_op.h b/paddle/operators/spp_op.h new file mode 100644 index 0000000000..f35b305d02 --- /dev/null +++ b/paddle/operators/spp_op.h @@ -0,0 +1,161 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +Indicesou may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/pooling.h" +#include "paddle/operators/strided_memcpy.h" + +namespace paddle { +namespace operators { +template +class SppKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const framework::Tensor* in_x = context.Input("X"); + auto* out = context.Output("Out"); + int pyramid_height = context.template Attr("pyramid_height"); + std::string pooling_type = + context.template Attr("pooling_type"); + out->mutable_data(context.GetPlace()); + auto out_stride = framework::stride(out->dims()); + int input_h = in_x->dims()[2]; + int input_w = in_x->dims()[3]; + size_t output_offset = 0; + for (int p = 0; p < pyramid_height; ++p) { + int bins = std::pow(2, p); + int kernel_size_h = std::ceil(input_h / static_cast(bins)); + int kernel_size_w = std::ceil(input_w / static_cast(bins)); + int padding_h = (kernel_size_h * bins - input_h + 1) / 2; + int padding_w = (kernel_size_w * bins - input_w + 1) / 2; + std::vector kernel_size({kernel_size_h, kernel_size_w}); + std::vector strides({kernel_size_h, kernel_size_w}); + std::vector paddings({padding_h, padding_w}); + // pooling output shape + framework::Tensor out_level; + std::vector output_shape_vec( + {in_x->dims()[0], in_x->dims()[1], bins, bins}); + framework::DDim output_shape(framework::make_ddim(output_shape_vec)); + out_level.mutable_data(output_shape, context.GetPlace()); + // pooling + if (pooling_type == "max") { + math::Pool2dFunctor, T> pool_forward; + math::MaxPool max_process; + pool_forward(context.template device_context(), *in_x, + kernel_size, strides, paddings, max_process, &out_level); + } else if (pooling_type == "avg") { + math::Pool2dFunctor, T> pool_forward; + math::AvgPool avg_process; + pool_forward(context.template device_context(), *in_x, + kernel_size, strides, paddings, avg_process, &out_level); + } + // flatten pooling output shape + int output_flatten_w = in_x->dims()[1] * bins * bins; + std::vector output_flatten_shape_vec( + {in_x->dims()[0], output_flatten_w}); + framework::DDim output_flatten_shape( + framework::make_ddim(output_flatten_shape_vec)); + out_level.Resize(output_flatten_shape); + // concat + auto out_level_stride = framework::stride(out_level.dims()); + StridedMemcpy(context.template device_context(), + out_level.data(), out_level_stride, out_level.dims(), + out_stride, out->data() + output_offset); + output_offset += out_level.dims()[1] * out_level_stride[1]; + } + } +}; +template +class SppGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const framework::Tensor* in_x = context.Input("X"); + const framework::Tensor* out = context.Input("Out"); + const framework::Tensor* out_grad = + context.Input(framework::GradVarName("Out")); + framework::Tensor* in_x_grad = + context.Output(framework::GradVarName("X")); + int pyramid_height = context.template Attr("pyramid_height"); + std::string pooling_type = + context.template Attr("pooling_type"); + auto& device_ctx = context.template device_context(); + math::SetConstant zero; + in_x_grad->mutable_data(context.GetPlace()); + zero(device_ctx, in_x_grad, static_cast(0)); + auto out_stride = framework::stride(out->dims()); + int input_h = in_x->dims()[2]; + int input_w = in_x->dims()[3]; + size_t out_offset = 0; + for (int p = 0; p < pyramid_height; ++p) { + int bins = std::pow(2, p); + int kernel_size_h = std::ceil(input_h / static_cast(bins)); + int kernel_size_w = std::ceil(input_w / static_cast(bins)); + int padding_h = (kernel_size_h * bins - input_h + 1) / 2; + int padding_w = (kernel_size_w * bins - input_w + 1) / 2; + std::vector kernel_size({kernel_size_h, kernel_size_w}); + std::vector strides({kernel_size_h, kernel_size_w}); + std::vector paddings({padding_h, padding_w}); + // split out and outgrad ... to flatten + framework::Tensor out_level; + framework::Tensor outgrad_level; + int out_flatten_w = in_x->dims()[1] * bins * bins; + std::vector out_flatten_shape_vec( + {in_x->dims()[0], out_flatten_w}); + framework::DDim out_flatten_shape( + framework::make_ddim(out_flatten_shape_vec)); + out_level.mutable_data(out_flatten_shape, context.GetPlace()); + outgrad_level.mutable_data(out_flatten_shape, context.GetPlace()); + auto flatten_stride = framework::stride(out_level.dims()); + // memcpy + StridedMemcpy(context.template device_context(), + out->data() + out_offset, out_stride, + out_level.dims(), flatten_stride, out_level.data()); + + StridedMemcpy(context.template device_context(), + out_grad->data() + out_offset, out_stride, + outgrad_level.dims(), flatten_stride, + outgrad_level.data()); + out_offset += out_level.dims()[1] * out_stride[1]; + // flatten backward to nchw + + std::vector out_shape_vec({in_x->dims()[0], in_x->dims()[1]}); + out_shape_vec.push_back( + (input_h - kernel_size_h + 2 * padding_h) / kernel_size_h + 1); + out_shape_vec.push_back( + (input_w - kernel_size_w + 2 * padding_w) / kernel_size_w + 1); + framework::DDim out_shape(framework::make_ddim(out_shape_vec)); + out_level.ShareDataWith(out_level); + out_level.Resize(out_shape); + outgrad_level.ShareDataWith(outgrad_level); + outgrad_level.Resize(out_shape); + // pooling backward + if (pooling_type == "max") { + math::MaxPool2dGradFunctor pool2d_backward; + pool2d_backward(context.template device_context(), *in_x, + *&out_level, *&outgrad_level, kernel_size, strides, + paddings, in_x_grad); + } else if (pooling_type == "avg") { + math::Pool2dGradFunctor, T> + pool_backward; + math::AvgPoolGrad avg_process; + pool_backward(context.template device_context(), *in_x, + *&out_level, *&outgrad_level, kernel_size, strides, + paddings, avg_process, in_x_grad); + } + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index 50bc6da196..9e097176f3 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -56,8 +56,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquaredL2DistanceOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SquaredL2DistanceOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) Input of SquaredL2DistanceOp."); AddInput("Y", "(Tensor) Target of SquaredL2DistanceOp."); diff --git a/paddle/operators/squared_l2_distance_op.cu b/paddle/operators/squared_l2_distance_op.cu index ecc82ed1e4..f2648dde5e 100644 --- a/paddle/operators/squared_l2_distance_op.cu +++ b/paddle/operators/squared_l2_distance_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU diff --git a/paddle/operators/squared_l2_norm_op.cc b/paddle/operators/squared_l2_norm_op.cc index 3cff61a02f..6626bf0375 100644 --- a/paddle/operators/squared_l2_norm_op.cc +++ b/paddle/operators/squared_l2_norm_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/squared_l2_norm_op.h" @@ -48,8 +48,7 @@ class SquaredL2NormGradOp : public framework::OperatorWithKernel { class SquaredL2NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquaredL2NormOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + SquaredL2NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input of squared_l2_norm op."); AddOutput("Out", "(Scalar) The output of squared_l2_norm op."); diff --git a/paddle/operators/squared_l2_norm_op.cu b/paddle/operators/squared_l2_norm_op.cu index 2d6567d090..b222113a8c 100644 --- a/paddle/operators/squared_l2_norm_op.cu +++ b/paddle/operators/squared_l2_norm_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/squared_l2_norm_op.h" diff --git a/paddle/operators/squared_l2_norm_op.h b/paddle/operators/squared_l2_norm_op.h index 0ced7e7d70..1ce26c775e 100644 --- a/paddle/operators/squared_l2_norm_op.h +++ b/paddle/operators/squared_l2_norm_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" diff --git a/paddle/operators/strided_memcpy.h b/paddle/operators/strided_memcpy.h index c9dd805184..735cabcd97 100644 --- a/paddle/operators/strided_memcpy.h +++ b/paddle/operators/strided_memcpy.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/operators/detail/strided_memcpy.h" diff --git a/paddle/operators/strided_memcpy_test.cc b/paddle/operators/strided_memcpy_test.cc index 68f064eaee..06d8118855 100644 --- a/paddle/operators/strided_memcpy_test.cc +++ b/paddle/operators/strided_memcpy_test.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/strided_memcpy.h" #include "gtest/gtest.h" @@ -82,11 +82,13 @@ TEST(StridedMemcpy, GPUCrop) { }; // clang-format on - platform::GPUPlace gpu0(0); + platform::CUDAPlace gpu0(0); platform::CPUPlace cpu; + platform::CUDADeviceContext ctx(gpu0); + int* gpu_src = reinterpret_cast(memory::Alloc(gpu0, sizeof(src))); - memory::Copy(gpu0, gpu_src, cpu, src, sizeof(src)); + memory::Copy(gpu0, gpu_src, cpu, src, sizeof(src), ctx.stream()); framework::DDim src_stride({5, 1}); @@ -96,7 +98,6 @@ TEST(StridedMemcpy, GPUCrop) { framework::DDim dst_dim({2, 2}); framework::DDim dst_stride({2, 1}); - platform::CUDADeviceContext ctx(gpu0); StridedMemcpy(ctx, gpu_src + 1, src_stride, dst_dim, dst_stride, gpu_dst); @@ -120,11 +121,12 @@ TEST(StridedMemcpy, GPUConcat) { }; // clang-format on - platform::GPUPlace gpu0(0); + platform::CUDAPlace gpu0(0); platform::CPUPlace cpu; + platform::CUDADeviceContext ctx(gpu0); int* gpu_src = reinterpret_cast(memory::Alloc(gpu0, sizeof(src))); - memory::Copy(gpu0, gpu_src, cpu, src, sizeof(src)); + memory::Copy(gpu0, gpu_src, cpu, src, sizeof(src), ctx.stream()); int dst[8]; int* gpu_dst = reinterpret_cast(memory::Alloc(gpu0, sizeof(dst))); @@ -132,7 +134,6 @@ TEST(StridedMemcpy, GPUConcat) { framework::DDim src_stride({2, 1}); framework::DDim dst_dim({2, 2}); framework::DDim dst_stride({4, 1}); - platform::CUDADeviceContext ctx(gpu0); StridedMemcpy(ctx, gpu_src, src_stride, dst_dim, dst_stride, gpu_dst); StridedMemcpy(ctx, gpu_src, src_stride, dst_dim, dst_stride, diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index cd52672f78..b86e826642 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -29,7 +29,7 @@ class SumOp : public framework::OperatorWithKernel { "Output(Out) of SumOp should not be null."); if (ctx->IsRuntime() && ctx->GetOutputsVarType("Out")[0] == - framework::VarDesc::LOD_TENSOR_ARRAY) { + framework::proto::VarDesc::LOD_TENSOR_ARRAY) { return; // skip runtime infershape when is tensor array; } @@ -53,7 +53,7 @@ class SumOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { auto x_vars = ctx.MultiInputVar("X"); if (x_vars[0]->IsType()) { @@ -72,8 +72,8 @@ class SumOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NE(dtype, -1, "Sum operator should have at least one tensor"); - return framework::OpKernelType(static_cast(dtype), - ctx.device_context()); + return framework::OpKernelType( + static_cast(dtype), ctx.device_context()); } else if (x_vars[0]->IsType()) { return framework::OpKernelType( framework::ToDataType( @@ -98,7 +98,7 @@ class SumOp : public framework::OperatorWithKernel { class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: - SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + SumOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(vector) The input tensors of sum operator.") .AsDuplicable(); @@ -106,8 +106,8 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Sum operator. -This operators sums the input tensors. All the inputs can carry the -LoD (Level of Details) information. However, the output only shares +This operators sums the input tensors. All the inputs can carry the +LoD (Level of Details) information. However, the output only shares the LoD information with the first input. )DOC"); } @@ -115,10 +115,10 @@ the LoD information with the first input. class SumOpVarTypeInference : public framework::VarTypeInference { public: - void operator()(const framework::OpDescBind& op_desc, - framework::BlockDescBind* block) const override { + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { auto& inputs = op_desc.Input("X"); - auto var_type = framework::VarDesc::SELECTED_ROWS; + auto var_type = framework::proto::VarDesc::SELECTED_ROWS; for (auto& name : op_desc.Input("X")) { VLOG(10) << name << " " @@ -128,12 +128,12 @@ class SumOpVarTypeInference : public framework::VarTypeInference { bool any_input_is_lod_tensor = std::any_of( inputs.begin(), inputs.end(), [block](const std::string& name) { return block->FindRecursiveOrCreateVar(name)->GetType() == - framework::VarDesc::LOD_TENSOR; + framework::proto::VarDesc::LOD_TENSOR; }); auto is_tensor_array = [block](const std::string& name) { return detail::Ref(block->FindRecursiveOrCreateVar(name)).GetType() == - framework::VarDesc::LOD_TENSOR_ARRAY; + framework::proto::VarDesc::LOD_TENSOR_ARRAY; }; bool any_input_is_tensor_array = @@ -152,9 +152,9 @@ class SumOpVarTypeInference : public framework::VarTypeInference { PADDLE_ENFORCE(all_inputs_are_tensor_array, "Not all inputs are tensor array:\n%s", os.str()); } - var_type = framework::VarDesc::LOD_TENSOR_ARRAY; + var_type = framework::proto::VarDesc::LOD_TENSOR_ARRAY; } else if (any_input_is_lod_tensor) { - var_type = framework::VarDesc::LOD_TENSOR; + var_type = framework::proto::VarDesc::LOD_TENSOR; } auto out_var_name = op_desc.Output("Out").front(); @@ -169,20 +169,19 @@ class SumGradMaker : public framework::GradOpDescMakerBase { public: using framework::GradOpDescMakerBase::GradOpDescMakerBase; - std::vector> operator()() - const override { - auto x_grads = InputGrad("X"); - std::vector> grad_ops; + std::vector> operator()() const override { + auto x_grads = InputGrad("X", false); + std::vector> grad_ops; grad_ops.reserve(x_grads.size()); auto og = OutputGrad("Out"); std::transform(x_grads.begin(), x_grads.end(), std::back_inserter(grad_ops), [&og](const std::string& x_grad) { - auto* grad_op = new framework::OpDescBind(); + auto* grad_op = new framework::OpDesc(); grad_op->SetType("scale"); grad_op->SetInput("X", og); grad_op->SetOutput("Out", {x_grad}); grad_op->SetAttr("scale", 1.0f); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); }); return grad_ops; } diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/operators/tensor_array_read_write_op.cc index 2835b84f75..9529aab573 100644 --- a/paddle/operators/tensor_array_read_write_op.cc +++ b/paddle/operators/tensor_array_read_write_op.cc @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/array_operator.h" #include "paddle/operators/detail/safe_ref.h" namespace paddle { @@ -25,11 +25,11 @@ class WriteToArrayOp : public ArrayOp { : ArrayOp(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto *x = scope.FindVar(Input("X")); if (x == nullptr) return; auto &x_tensor = x->Get(); - size_t offset = GetOffset(scope, dev_ctx); + size_t offset = GetOffset(scope, place); auto *out = scope.FindVar(Output("Out"))->GetMutable(); if (offset >= out->size()) { @@ -39,7 +39,11 @@ class WriteToArrayOp : public ArrayOp { } if (x_tensor.memory_size() > 0) { auto *out_tensor = &out->at(offset); - CopyFrom(x_tensor, dev_ctx.GetPlace(), dev_ctx, out_tensor); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + + CopyFrom(x_tensor, place, dev_ctx, out_tensor); out_tensor->set_lod(x_tensor.lod()); } else { VLOG(10) << "WARNING: The input tensor 'x_tensor' holds no memory, so " @@ -51,8 +55,7 @@ class WriteToArrayOp : public ArrayOp { class WriteToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - WriteToArrayOpProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + WriteToArrayOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(LoDTensor) the tensor will be written to tensor array"); AddInput( @@ -97,14 +100,14 @@ class WriteToArrayInferShape : public framework::InferShapeBase { class WriteToArrayInferVarType : public framework::VarTypeInference { public: - void operator()(const framework::OpDescBind &op_desc, - framework::BlockDescBind *block) const override { + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { auto x_name = op_desc.Input("X")[0]; auto out_name = op_desc.Output("Out")[0]; VLOG(10) << "Set Variable " << out_name << " as LOD_TENSOR_ARRAY"; auto &out = detail::Ref(block->FindRecursiveOrCreateVar(out_name), "Cannot found %s", out_name); - out.SetType(framework::VarDesc::LOD_TENSOR_ARRAY); + out.SetType(framework::proto::VarDesc::LOD_TENSOR_ARRAY); auto *x = block->FindVarRecursive(x_name); if (x != nullptr) { out.SetDataType(x->GetDataType()); @@ -120,17 +123,18 @@ class ReadFromArrayOp : public ArrayOp { const framework::AttributeMap &attrs) : ArrayOp(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &place) const override { auto *x = scope.FindVar(Input("X")); PADDLE_ENFORCE(x != nullptr, "X must be set"); auto &x_array = x->Get(); auto *out = scope.FindVar(Output("Out")); PADDLE_ENFORCE(out != nullptr, "Out must be set"); auto *out_tensor = out->GetMutable(); - size_t offset = GetOffset(scope, dev_ctx); + size_t offset = GetOffset(scope, place); if (offset < x_array.size()) { - framework::CopyFrom(x_array[offset], dev_ctx.GetPlace(), dev_ctx, - out_tensor); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto &dev_ctx = *pool.Borrow(place); + framework::CopyFrom(x_array[offset], place, dev_ctx, out_tensor); out_tensor->set_lod(x_array[offset].lod()); } else { VLOG(10) << "offset " << offset << " >= " << x_array.size(); @@ -140,8 +144,7 @@ class ReadFromArrayOp : public ArrayOp { class ReadFromArrayProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ReadFromArrayProtoMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ReadFromArrayProtoMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(TensorArray) the array will be read from."); AddInput("I", @@ -177,14 +180,14 @@ class WriteToArrayGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("read_from_array"); grad_op->SetInput("I", Input("I")); grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttrMap(Attrs()); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; @@ -193,14 +196,14 @@ class ReadFromArrayGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *grad_op = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); grad_op->SetType("write_to_array"); grad_op->SetInput("I", Input("I")); grad_op->SetInput("X", OutputGrad("Out")); grad_op->SetOutput("Out", InputGrad("X")); grad_op->SetAttrMap(Attrs()); - return std::unique_ptr(grad_op); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index 16ae925eb5..bb72210bb6 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -46,7 +46,7 @@ class TopkOp : public framework::OperatorWithKernel { class TopkOpMaker : public framework::OpProtoAndCheckerMaker { public: - TopkOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + TopkOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "(Tensor) The input of Topk op"); AddOutput("Out", "(Tensor) The output tensor of Topk op"); diff --git a/paddle/operators/top_k_op.cu b/paddle/operators/top_k_op.cu index 453bd07267..f7bf58e721 100644 --- a/paddle/operators/top_k_op.cu +++ b/paddle/operators/top_k_op.cu @@ -1,16 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/platform/assert.h" @@ -283,7 +283,7 @@ class TopkOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use GPUPlace."); + "It must use CUDAPlace."); auto* input = ctx.Input("X"); auto* output = ctx.Output("Out"); auto* indices = ctx.Output("Indices"); diff --git a/paddle/operators/transpose_op.cc b/paddle/operators/transpose_op.cc index de5ff561ad..11615d806a 100644 --- a/paddle/operators/transpose_op.cc +++ b/paddle/operators/transpose_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/transpose_op.h" @@ -55,8 +55,7 @@ class TransposeOp : public framework::OperatorWithKernel { class TransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - TransposeOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + TransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", @@ -71,18 +70,31 @@ class TransposeOpMaker : public framework::OpProtoAndCheckerMaker { Transpose Operator. The input tensor will be permuted according to the axis values given. -The op functions similar to how numpy.transpose works in python. +The op functions is similar to how numpy.transpose works in python. + For example: - >> input = numpy.arange(6).reshape((2,3)) - >> input - array([[0, 1, 2], - [3, 4, 5]]) - >> axis = [1, 0] - >> output = input.transpose(axis) - >> output - array([[0, 3], - [1, 4], - [2, 5]]) + + .. code-block:: text + + input = numpy.arange(6).reshape((2,3)) + + the input is: + + array([[0, 1, 2], + [3, 4, 5]]) + + given axis is: + + [1, 0] + + output = input.transpose(axis) + + then the output is: + + array([[0, 3], + [1, 4], + [2, 5]]) + So, given a input tensor of shape(N, C, H, W) and the axis is {0, 2, 3, 1}, the output tensor shape will be (N, H, W, C) diff --git a/paddle/operators/transpose_op.cu.cc b/paddle/operators/transpose_op.cu.cc index 7d23f1493e..281c4468cc 100644 --- a/paddle/operators/transpose_op.cu.cc +++ b/paddle/operators/transpose_op.cu.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/transpose_op.h" diff --git a/paddle/operators/transpose_op.h b/paddle/operators/transpose_op.h index d995271a6b..b9686a2db3 100644 --- a/paddle/operators/transpose_op.h +++ b/paddle/operators/transpose_op.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 2a49ee471f..4d5dd86cb8 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ -#include -#include +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" @@ -63,18 +63,17 @@ class UniformRandomOp : public framework::OperatorWithKernel { } protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( - static_cast(ctx.Attr("dtype")), + static_cast(ctx.Attr("dtype")), ctx.GetPlace()); } }; class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { public: - UniformRandomOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + UniformRandomOpMaker(OpProto* proto, OpAttrChecker* op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { AddOutput("Out", "(Tensor) The output tensor of uniform random op"); AddComment(R"DOC( @@ -100,7 +99,7 @@ uniform distribution. "0 means use a seed generated by the system.") .SetDefault(0); AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") - .SetDefault(framework::DataType::FP32); + .SetDefault(framework::proto::DataType::FP32); } }; } // namespace operators diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index cfe9d293cf..719d0872a7 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ -#include -#include +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include #include "paddle/framework/op_registry.h" diff --git a/paddle/operators/unpool_op.cc b/paddle/operators/unpool_op.cc index 49df2a530c..aeed9679b2 100644 --- a/paddle/operators/unpool_op.cc +++ b/paddle/operators/unpool_op.cc @@ -18,8 +18,7 @@ namespace operators { class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Unpool2dOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) + Unpool2dOpMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", @@ -54,16 +53,14 @@ class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker { "(string), unpooling type, can be \"max\" for max-unpooling ") .InEnum({"max"}); AddComment(R"DOC( - "Input shape: $(N, C_{in}, H_{in}, W_{in})$ - Output shape: $(N, C_{out}, H_{out}, W_{out})$ - Where - $$ - H_{out} = (H_{in}−1) * strides[0] − 2 * paddings[0] + ksize[0] \\ - W_{out} = (W_{in}−1) * strides[1] − 2 * paddings[1] + ksize[1] - $$ - Paper: http://www.matthewzeiler.com/wp-content/uploads/2017 - /07/iccv2011.pdf - )DOC"); +Input shape is: $(N, C_{in}, H_{in}, W_{in})$, Output shape is: +$(N, C_{out}, H_{out}, W_{out})$, where +$$ +H_{out} = (H_{in}−1) * strides[0] − 2 * paddings[0] + ksize[0] \\ +W_{out} = (W_{in}−1) * strides[1] − 2 * paddings[1] + ksize[1] +$$ +Paper: http://www.matthewzeiler.com/wp-content/uploads/2017/07/iccv2011.pdf +)DOC"); } }; @@ -74,7 +71,7 @@ int OutputSize(int input_size, int ksize, int padding, int stride) { class UnpoolOp : public framework::OperatorWithKernel { protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), @@ -113,7 +110,7 @@ class UnpoolOp : public framework::OperatorWithKernel { class UnpoolOpGrad : public framework::OperatorWithKernel { protected: - framework::OpKernelType GetKernelType( + framework::OpKernelType GetActualKernelType( const framework::ExecutionContext& ctx) const override { return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), diff --git a/paddle/operators/while_op.cc b/paddle/operators/while_op.cc index b8e44bcc5a..728ef60794 100644 --- a/paddle/operators/while_op.cc +++ b/paddle/operators/while_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include "paddle/framework/executor.h" @@ -25,7 +25,7 @@ namespace operators { using StepScopeVar = std::vector; using LoDTensor = framework::LoDTensor; -constexpr char kStepBlock[] = "step_block"; +constexpr char kStepBlock[] = "sub_block"; constexpr char kCondition[] = "Condition"; constexpr char kStepScopes[] = "StepScopes"; constexpr char kParameters[] = "X"; @@ -40,13 +40,14 @@ class WhileOp : public framework::OperatorBase { : framework::OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { + const platform::Place &dev_place) const override { PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kCondition))); auto &cond = scope.FindVar(Input(kCondition))->Get(); PADDLE_ENFORCE_EQ(cond.dims(), paddle::framework::make_ddim({1})); - framework::Executor executor(dev_ctx); - auto *block = Attr(kStepBlock); + framework::Executor executor(dev_place); + auto *block = Attr(kStepBlock); + auto *program = block->Program(); auto step_scopes = @@ -64,7 +65,7 @@ class WhileOp : public framework::OperatorBase { class WhileOpMaker : public framework::OpProtoAndCheckerMaker { public: - WhileOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + WhileOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput(kParameters, "A set of variables, which are required by operators inside the " @@ -82,8 +83,8 @@ class WhileOpMaker : public framework::OpProtoAndCheckerMaker { "(StepScopeVar) A vector of local scope, which size equals the " "step number of While Op. The i'th scope storages temporary " "variables generated in the i'th step."); - AddAttr(kStepBlock, - "The step block inside WhileOp"); + AddAttr(kStepBlock, + "The step block inside WhileOp"); AddComment(R"DOC( )DOC"); } @@ -97,9 +98,9 @@ class WhileGradOp : public framework::OperatorBase { : framework::OperatorBase(type, inputs, outputs, attrs) {} void Run(const framework::Scope &scope, - const platform::DeviceContext &dev_ctx) const override { - framework::Executor executor(dev_ctx); - auto *block = Attr(kStepBlock); + const platform::Place &dev_place) const override { + framework::Executor executor(dev_place); + auto *block = Attr(kStepBlock); auto *program = block->Program(); auto *step_scopes = @@ -189,7 +190,7 @@ class WhileGradOp : public framework::OperatorBase { auto zero_op = framework::OpRegistry::CreateOp( "fill_constant", framework::VariableNameMap{}, {{"Out", {pg_names[param_id]}}}, attrs); - zero_op->Run(scope, dev_ctx); + zero_op->Run(scope, dev_place); } } @@ -197,7 +198,7 @@ class WhileGradOp : public framework::OperatorBase { auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); - sum_op->Run(cur_scope, dev_ctx); + sum_op->Run(cur_scope, dev_place); cur_scope.Rename(new_inside_name, inside_grad_name); } } @@ -209,8 +210,8 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - std::unique_ptr Apply() const override { - auto *grad = new framework::OpDescBind(); + std::unique_ptr Apply() const override { + auto *grad = new framework::OpDesc(); grad->SetType("while_grad"); grad->SetInput(kParameters, Input(kParameters)); @@ -279,14 +280,14 @@ class WhileGradOpDescMaker : public framework::SingleGradOpDescMaker { // while operator could be renamed. grad->SetAttr("original_output_grad", extra_inputs_list); - return std::unique_ptr(grad); + return std::unique_ptr(grad); } }; class WhileGradOpVarTypeInference : public framework::VarTypeInference { public: - void operator()(const framework::OpDescBind &op_desc, - framework::BlockDescBind *block) const override { + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { auto p_names = op_desc.Input(kParameters); auto pg_names = op_desc.Output(framework::GradVarName(kParameters)); @@ -321,10 +322,10 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { continue; } auto dims = ctx->GetInputsElementDim(kParameters, i); - if (var_types[i] == framework::VarDesc::LOD_TENSOR) { + if (var_types[i] == framework::proto::VarDesc::LOD_TENSOR) { names_to_set.push_back(pg_names[i]); dims_to_set.push_back(dims); - } else if (var_types[i] == framework::VarDesc::LOD_TENSOR_ARRAY) { + } else if (var_types[i] == framework::proto::VarDesc::LOD_TENSOR_ARRAY) { // not sure how to set the dim of LOD_TENSOR_ARRAY names_to_set.push_back(pg_names[i]); dims_to_set.push_back(dims); diff --git a/paddle/optimizer/adadelta_optimizer.cc b/paddle/optimizer/adadelta_optimizer.cc index 5cc7c47d44..8ca048257e 100644 --- a/paddle/optimizer/adadelta_optimizer.cc +++ b/paddle/optimizer/adadelta_optimizer.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "adadelta_optimizer.h" #include diff --git a/paddle/optimizer/adadelta_optimizer.h b/paddle/optimizer/adadelta_optimizer.h index 6aab1ad553..48f1ae1750 100644 --- a/paddle/optimizer/adadelta_optimizer.h +++ b/paddle/optimizer/adadelta_optimizer.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/optimizer/adagrad_optimizer.cc b/paddle/optimizer/adagrad_optimizer.cc index c981996bab..c6d39a366a 100644 --- a/paddle/optimizer/adagrad_optimizer.cc +++ b/paddle/optimizer/adagrad_optimizer.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include diff --git a/paddle/optimizer/adagrad_optimizer.h b/paddle/optimizer/adagrad_optimizer.h index 447b7c7547..b0cff061f5 100644 --- a/paddle/optimizer/adagrad_optimizer.h +++ b/paddle/optimizer/adagrad_optimizer.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/optimizer/adam_optimizer.cc b/paddle/optimizer/adam_optimizer.cc index 6dc2d74970..8a384b59c4 100644 --- a/paddle/optimizer/adam_optimizer.cc +++ b/paddle/optimizer/adam_optimizer.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "adam_optimizer.h" #include diff --git a/paddle/optimizer/adam_optimizer.h b/paddle/optimizer/adam_optimizer.h index 37ab53afc3..7df40064df 100644 --- a/paddle/optimizer/adam_optimizer.h +++ b/paddle/optimizer/adam_optimizer.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/optimizer/optimizer.cc b/paddle/optimizer/optimizer.cc index faa2376452..3af4448436 100644 --- a/paddle/optimizer/optimizer.cc +++ b/paddle/optimizer/optimizer.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "optimizer.h" #include diff --git a/paddle/optimizer/optimizer.h b/paddle/optimizer/optimizer.h index e6fa12a4d2..516e612167 100644 --- a/paddle/optimizer/optimizer.h +++ b/paddle/optimizer/optimizer.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/optimizer/parameter_optimizer.cc b/paddle/optimizer/parameter_optimizer.cc index da92c2d01c..1603e5fdc8 100644 --- a/paddle/optimizer/parameter_optimizer.cc +++ b/paddle/optimizer/parameter_optimizer.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include "adadelta_optimizer.h" diff --git a/paddle/optimizer/parameter_optimizer.h b/paddle/optimizer/parameter_optimizer.h index 99d0416e75..1f501c49e1 100644 --- a/paddle/optimizer/parameter_optimizer.h +++ b/paddle/optimizer/parameter_optimizer.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/optimizer/sgd_optimizer.cc b/paddle/optimizer/sgd_optimizer.cc index c150144ac2..ee80f543fc 100644 --- a/paddle/optimizer/sgd_optimizer.cc +++ b/paddle/optimizer/sgd_optimizer.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "sgd_optimizer.h" #include "serialization.h" diff --git a/paddle/optimizer/sgd_optimizer.h b/paddle/optimizer/sgd_optimizer.h index 0b1da0aa27..16a4df9973 100644 --- a/paddle/optimizer/sgd_optimizer.h +++ b/paddle/optimizer/sgd_optimizer.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index 88df28a966..f0a0ea70a0 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -25,7 +25,7 @@ ENDIF() # avoiding cycle dependencies cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS}) -nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) +nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) diff --git a/paddle/platform/call_once.h b/paddle/platform/call_once.h index d9f49527dc..00337a7f05 100644 --- a/paddle/platform/call_once.h +++ b/paddle/platform/call_once.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/platform/cuda_profiler.h b/paddle/platform/cuda_profiler.h index b6311cb23d..67d5f626d4 100644 --- a/paddle/platform/cuda_profiler.h +++ b/paddle/platform/cuda_profiler.h @@ -22,23 +22,7 @@ namespace paddle { namespace platform { void CudaProfilerInit(std::string output_file, std::string output_mode, - std::vector config_flags) { - std::array buf; - std::string tmpl = "/tmp/cuda_profile_config.XXXXXX"; - PADDLE_ENFORCE_LT(tmpl.size(), buf.size()); - memcpy(buf.data(), tmpl.data(), tmpl.size()); - auto result = mktemp(buf.data()); - PADDLE_ENFORCE(strlen(result) != 0); - std::string config_file = result; - - { - std::ofstream ofs(config_file, std::ios::out | std::ios::trunc); - PADDLE_ENFORCE(ofs.is_open(), "ofstream: ", ofs.rdstate()); - for (const auto& line : config_flags) { - ofs << line << std::endl; - } - } - + std::string config_file) { PADDLE_ENFORCE(output_mode == "kvp" || output_mode == "csv"); cudaOutputMode_t mode = output_mode == "csv" ? cudaCSV : cudaKeyValuePair; PADDLE_ENFORCE( diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index 2c7f964216..e450ef32a4 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -15,11 +15,64 @@ limitations under the License. */ namespace paddle { namespace platform { +DeviceContextPool* DeviceContextPool::pool = nullptr; + +const platform::DeviceContext* DeviceContextPool::Borrow( + const platform::Place& place) { + auto it = device_contexts_.find(place); + if (it == device_contexts_.end()) { + PADDLE_THROW( + "'Place' is not supported, Please re-compile with WITH_GPU " + "option"); + } + return it->second; +} + +std::vector DeviceContextPool::Borrow( + const std::vector& places) { + PADDLE_ENFORCE_GT(places.size(), 0); + PADDLE_ENFORCE_LE(places.size(), device_contexts_.size()); + std::vector borrowed_contexts; + for (auto& place : places) { + auto it = device_contexts_.find(place); + if (it != device_contexts_.end()) { + borrowed_contexts.emplace_back(it->second); + } else { + PADDLE_THROW( + "'Place' is not supported, Please re-compile with WITH_GPU " + "option"); + } + } + return borrowed_contexts; +} + +DeviceContextPool::DeviceContextPool( + const std::vector& places) { + PADDLE_ENFORCE_GT(places.size(), 0); + for (size_t i = 0; i < places.size(); i++) { + if (platform::is_cpu_place(places[i])) { + device_contexts_.emplace(places[i], + new platform::CPUDeviceContext( + boost::get(places[i]))); + } else if (platform::is_gpu_place(places[i])) { +#ifdef PADDLE_WITH_CUDA + device_contexts_.emplace(places[i], + new platform::CUDADeviceContext( + boost::get(places[i]))); +#else + PADDLE_THROW( + "'CUDAPlace' is not supported, Please re-compile with WITH_GPU " + "option"); +#endif + } + } +} + CPUDeviceContext::CPUDeviceContext() { eigen_device_.reset(new Eigen::DefaultDevice()); } -CPUDeviceContext::CPUDeviceContext(CPUPlace place) { +CPUDeviceContext::CPUDeviceContext(CPUPlace place) : place_(place) { eigen_device_.reset(new Eigen::DefaultDevice()); } @@ -27,7 +80,7 @@ Eigen::DefaultDevice* CPUDeviceContext::eigen_device() const { return eigen_device_.get(); } -Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } +Place CPUDeviceContext::GetPlace() const { return place_; } #ifdef PADDLE_WITH_CUDA @@ -38,7 +91,7 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { } ~EigenCudaStreamDevice() override {} - void Reinitialize(const cudaStream_t* cuda_stream, GPUPlace place) { + void Reinitialize(const cudaStream_t* cuda_stream, CUDAPlace place) { stream_ = cuda_stream; place_ = place; device_prop_ = &Eigen::m_deviceProperties[place.device]; @@ -77,14 +130,14 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { } private: - GPUPlace place_; + CUDAPlace place_; const cudaStream_t* stream_; // not owned; const cudaDeviceProp* device_prop_; // not owned; mutable void* scratch_; mutable unsigned int* semaphore_; }; -CUDADeviceContext::CUDADeviceContext(GPUPlace place) : place_(place) { +CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place) { SetDeviceId(place_.device); PADDLE_ENFORCE(cudaStreamCreate(&stream_)); eigen_stream_.reset(new EigenCudaStreamDevice()); @@ -125,6 +178,20 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; } cudaStream_t CUDADeviceContext::stream() const { return stream_; } +CUDNNDeviceContext::CUDNNDeviceContext(CUDAPlace place) + : CUDADeviceContext(place) { + PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); + PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, stream())); +} + +CUDNNDeviceContext::~CUDNNDeviceContext() { + SetDeviceId(boost::get(GetPlace()).device); + Wait(); + PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_)); +} + +cudnnHandle_t CUDNNDeviceContext::cudnn_handle() const { return cudnn_handle_; } + #endif } // namespace platform diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index 596d9d0bba..8ba12e1657 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -11,8 +11,8 @@ limitations under the License. */ #pragma once -#include "paddle/platform/enforce.h" -#include "paddle/platform/place.h" +#include +#include #ifdef PADDLE_WITH_CUDA #include "paddle/platform/dynload/cublas.h" @@ -20,10 +20,13 @@ limitations under the License. */ #include "paddle/platform/gpu_info.h" #define EIGEN_USE_GPU #endif -#include + +#include "paddle/platform/enforce.h" #include "paddle/platform/place.h" #include "unsupported/Eigen/CXX11/Tensor" +#include "glog/logging.h" + namespace paddle { namespace platform { @@ -45,6 +48,7 @@ class CPUDeviceContext : public DeviceContext { Place GetPlace() const override; private: + CPUPlace place_; std::unique_ptr eigen_device_; }; @@ -54,7 +58,7 @@ class EigenCudaStreamDevice; class CUDADeviceContext : public DeviceContext { public: - explicit CUDADeviceContext(GPUPlace place); + explicit CUDADeviceContext(CUDAPlace place); virtual ~CUDADeviceContext(); /*! \brief Wait for all operations completion in the stream. */ @@ -76,7 +80,7 @@ class CUDADeviceContext : public DeviceContext { cudaStream_t stream() const; private: - GPUPlace place_; + CUDAPlace place_; std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; @@ -86,7 +90,65 @@ class CUDADeviceContext : public DeviceContext { cublasHandle_t cublas_handle_; }; +class CUDNNDeviceContext : public CUDADeviceContext { + public: + explicit CUDNNDeviceContext(CUDAPlace place); + virtual ~CUDNNDeviceContext(); + + /*! \brief Return cudnn handle in the device context. */ + cudnnHandle_t cudnn_handle() const; + + private: + cudnnHandle_t cudnn_handle_; +}; + #endif +/*! \brief device context pool singleton */ +class DeviceContextPool { + public: + explicit DeviceContextPool(const std::vector& places); + + static DeviceContextPool& Get() { + PADDLE_ENFORCE_NOT_NULL(pool, "Need to Create DeviceContextPool first!"); + return *pool; + } + + /*! \brief Create should only called by Init function */ + static DeviceContextPool& Create(const std::vector& places) { + if (pool == nullptr) { + pool = new DeviceContextPool(places); + } + return *pool; + } + + /*! \brief Return handle of single device context. */ + const platform::DeviceContext* Borrow(const platform::Place& place); + + /*! \brief Return handle of multi-device context. */ + std::vector Borrow( + const std::vector& places); + + ~DeviceContextPool() {} + + private: + static DeviceContextPool* pool; + constexpr static int LEFT_SHIFT = 8; + struct Hash { + std::hash hash_; + size_t operator()(const platform::Place& place) const { + int pre_hash = place.which() + (1 << LEFT_SHIFT); + if (platform::is_gpu_place(place)) { + pre_hash += boost::get(place).GetDeviceId(); + } + return hash_(pre_hash); + } + }; + std::unordered_map + device_contexts_; + DISABLE_COPY_AND_ASSIGN(DeviceContextPool); +}; + } // namespace platform } // namespace paddle diff --git a/paddle/platform/device_context_test.cc b/paddle/platform/device_context_test.cc deleted file mode 100644 index 4893cd92f6..0000000000 --- a/paddle/platform/device_context_test.cc +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/platform/device_context.h" -#include "gtest/gtest.h" - -TEST(Device, Init) { - using paddle::platform::DeviceContext; - using paddle::platform::CUDADeviceContext; - using paddle::platform::GPUPlace; - - int count = paddle::platform::GetCUDADeviceCount(); - for (int i = 0; i < count; i++) { - CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); - Eigen::GpuDevice* gpu_device = device_context->eigen_device(); - ASSERT_NE(nullptr, gpu_device); - delete device_context; - } -} - -TEST(Device, CUDADeviceContext) { - using paddle::platform::CUDADeviceContext; - using paddle::platform::GPUPlace; - - int count = paddle::platform::GetCUDADeviceCount(); - for (int i = 0; i < count; i++) { - CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); - Eigen::GpuDevice* gpu_device = device_context->eigen_device(); - ASSERT_NE(nullptr, gpu_device); - cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); - ASSERT_NE(nullptr, cudnn_handle); - cublasHandle_t cublas_handle = device_context->cublas_handle(); - ASSERT_NE(nullptr, cublas_handle); - ASSERT_NE(nullptr, device_context->stream()); - delete device_context; - } -} diff --git a/paddle/platform/device_context_test.cu b/paddle/platform/device_context_test.cu new file mode 100644 index 0000000000..91011bf71c --- /dev/null +++ b/paddle/platform/device_context_test.cu @@ -0,0 +1,116 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "gtest/gtest.h" +#include "paddle/platform/device_context.h" + +#include "glog/logging.h" + +TEST(Device, Init) { + using paddle::platform::DeviceContext; + using paddle::platform::CUDADeviceContext; + using paddle::platform::CUDAPlace; + + int count = paddle::platform::GetCUDADeviceCount(); + for (int i = 0; i < count; i++) { + CUDADeviceContext* device_context = new CUDADeviceContext(CUDAPlace(i)); + Eigen::GpuDevice* gpu_device = device_context->eigen_device(); + ASSERT_NE(nullptr, gpu_device); + delete device_context; + } +} + +TEST(Device, CUDADeviceContext) { + using paddle::platform::CUDADeviceContext; + using paddle::platform::CUDAPlace; + + int count = paddle::platform::GetCUDADeviceCount(); + for (int i = 0; i < count; i++) { + CUDADeviceContext* device_context = new CUDADeviceContext(CUDAPlace(i)); + Eigen::GpuDevice* gpu_device = device_context->eigen_device(); + ASSERT_NE(nullptr, gpu_device); + cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); + ASSERT_NE(nullptr, cudnn_handle); + cublasHandle_t cublas_handle = device_context->cublas_handle(); + ASSERT_NE(nullptr, cublas_handle); + ASSERT_NE(nullptr, device_context->stream()); + delete device_context; + } +} + +TEST(Device, CUDNNDeviceContext) { + using paddle::platform::CUDNNDeviceContext; + using paddle::platform::CUDAPlace; + if (paddle::platform::dynload::HasCUDNN()) { + int count = paddle::platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + CUDNNDeviceContext* device_context = new CUDNNDeviceContext(CUDAPlace(i)); + cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); + ASSERT_NE(nullptr, cudnn_handle); + ASSERT_NE(nullptr, device_context->stream()); + delete device_context; + } + } +} + +TEST(Device, DeviceContextPool) { + using paddle::platform::DeviceContextPool; + using paddle::platform::CUDADeviceContext; + using paddle::platform::Place; + using paddle::platform::CPUPlace; + using paddle::platform::CUDAPlace; + + DeviceContextPool& pool = DeviceContextPool::Get(); + auto cpu_dev_ctx1 = pool.Borrow(CPUPlace()); + auto cpu_dev_ctx2 = pool.Borrow(CPUPlace()); + EXPECT_TRUE(cpu_dev_ctx2 == cpu_dev_ctx1); + + std::vector gpu_places; + int count = paddle::platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + gpu_places.emplace_back(CUDAPlace(i)); + } + auto dev_ctxs = pool.Borrow(gpu_places); + for (size_t i = 0; i < dev_ctxs.size(); ++i) { + auto* dev_ctx = static_cast(dev_ctxs[i]); + + // check same as CUDAPlace(i) + CUDAPlace place = boost::get(dev_ctx->GetPlace()); + EXPECT_EQ(place.GetDeviceId(), static_cast(i)); + } +} + +int main(int argc, char** argv) { + int dev_count = paddle::platform::GetCUDADeviceCount(); + if (dev_count <= 1) { + LOG(WARNING) << "Cannot test multi-gpu DeviceContextPool, because the CUDA " + "device count is " + << dev_count; + return 0; + } + + std::vector places; + + places.emplace_back(paddle::platform::CPUPlace()); + int count = paddle::platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + places.emplace_back(paddle::platform::CUDAPlace(i)); + } + + VLOG(0) << " DeviceCount " << count; + paddle::platform::DeviceContextPool::Create(places); + + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/paddle/platform/dynload/nccl.cc b/paddle/platform/dynload/nccl.cc index 8f92b8d94d..4cec829a8a 100644 --- a/paddle/platform/dynload/nccl.cc +++ b/paddle/platform/dynload/nccl.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/platform/dynload/nccl.h" @@ -25,6 +25,11 @@ void *nccl_dso_handle; NCCL_RAND_ROUTINE_EACH(DEFINE_WRAP); +void LoadNCCLDSO() { + platform::call_once(nccl_dso_flag, + [] { GetNCCLDsoHandle(&nccl_dso_handle); }); +} + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/platform/dynload/nccl.h b/paddle/platform/dynload/nccl.h index 981b2ab258..6c776afc97 100644 --- a/paddle/platform/dynload/nccl.h +++ b/paddle/platform/dynload/nccl.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once @@ -28,18 +28,18 @@ extern std::once_flag nccl_dso_flag; extern void* nccl_dso_handle; #ifdef PADDLE_USE_DSO -#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - auto operator()(Args... args) -> decltype(__name(args...)) { \ - using nccl_func = decltype(__name(args...)) (*)(Args...); \ - platform::call_once(nccl_dso_flag, \ - paddle::platform::dynload::GetNCCLDsoHandle, \ - &nccl_dso_handle); \ - void* p_##__name = dlsym(nccl_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ - }; \ +extern void LoadNCCLDSO(); + +#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using nccl_func = decltype(__name(args...)) (*)(Args...); \ + paddle::platform::dynload::LoadNCCLDSO(); \ + void* p_##__name = dlsym(nccl_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ + }; \ extern DynLoad__##__name __name #else #define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \ @@ -63,6 +63,8 @@ extern void* nccl_dso_handle; __macro(ncclAllReduce); \ __macro(ncclBcast); \ __macro(ncclAllGather); \ + __macro(ncclGroupStart); \ + __macro(ncclGroupEnd); \ __macro(ncclReduce); \ __macro(ncclGetErrorString); diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 5abd4d4a34..d1c7be0790 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -22,6 +22,7 @@ limitations under the License. */ #include #include +#include "paddle/platform/macros.h" #include "paddle/string/printf.h" #include "paddle/string/to_string.h" diff --git a/paddle/platform/for_range.h b/paddle/platform/for_range.h new file mode 100644 index 0000000000..5427aa2823 --- /dev/null +++ b/paddle/platform/for_range.h @@ -0,0 +1,85 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace platform { + +template +struct ForRange { + ForRange(const DeviceContext& dev_ctx, size_t limit); + + template + void operator()(Function func) const; +}; + +template <> +struct ForRange { + ForRange(const CPUDeviceContext& dev_ctx, size_t limit) : limit_(limit) {} + + template + void operator()(Function func) const { + for (size_t i = 0; i < limit_; ++i) { + func(i); + } + } + + size_t limit_; +}; + +#ifdef __NVCC__ +template +__global__ static void ForRangeElemwiseOpGridIsOne(Function func) { + size_t idx = static_cast(threadIdx.x); + func(idx); +} + +template +__global__ static void ForRangeElemwiseOp(Function func, int limit) { + size_t idx = static_cast(blockIdx.x * blockDim.x + threadIdx.x); + if (idx < limit) { + func(idx); + } +} + +template <> +struct ForRange { + ForRange(const CUDADeviceContext& dev_ctx, size_t limit) + : dev_ctx_(dev_ctx), limit_(static_cast(limit)) {} + + template + inline void operator()(Function func) const { + constexpr size_t num_threads = 1024; + int block_size = limit_ <= num_threads ? limit_ : num_threads; + int grid_size = (limit_ + num_threads - 1) / num_threads; + + if (grid_size == 1) { + ForRangeElemwiseOpGridIsOne<<<1, block_size, 0, dev_ctx_.stream()>>>( + func); + } else { + ForRangeElemwiseOp<<>>( + func, limit_); + } + } + + const CUDADeviceContext& dev_ctx_; + int limit_; +}; + +#endif + +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/gpu_info.cc b/paddle/platform/gpu_info.cc index 4fa2eaed31..7037551d75 100644 --- a/paddle/platform/gpu_info.cc +++ b/paddle/platform/gpu_info.cc @@ -73,19 +73,20 @@ size_t GpuMaxChunkSize() { size_t available = 0; GpuMemoryUsage(available, total); - - // Reserving the rest memory for page tables, etc. - size_t reserving = 0.05 * total; - + VLOG(10) << "GPU Usage " << available / 1024 / 1024 << "M/" + << total / 1024 / 1024 << "M"; + size_t reserving = static_cast(0.05 * total); // If available less than minimum chunk size, no usable memory exists. available = - std::max(std::max(available, GpuMinChunkSize()) - GpuMinChunkSize(), - reserving) - - reserving; + std::min(std::max(available, GpuMinChunkSize()) - GpuMinChunkSize(), + total - reserving); + + // Reserving the rest memory for page tables, etc. - size_t allocating = FLAGS_fraction_of_gpu_memory_to_use * total; + size_t allocating = static_cast(FLAGS_fraction_of_gpu_memory_to_use * + (total - reserving)); - PADDLE_ENFORCE_LT(allocating, available); + PADDLE_ENFORCE_LE(allocating, available); return allocating; } @@ -96,17 +97,6 @@ void GpuMemcpyAsync(void *dst, const void *src, size_t count, "cudaMemcpyAsync failed in paddle::platform::GpuMemcpyAsync"); } -void GpuMemcpySync(void *dst, const void *src, size_t count, - enum cudaMemcpyKind kind) { - PADDLE_ENFORCE(cudaMemcpy(dst, src, count, kind), - "cudaMemcpy failed in paddle::platform::GpuMemcpySync"); - // note: cudaMemcpy may actually be asynchronous with respect to the caller, - // block on stream 0 to make sure the copy has completed - PADDLE_ENFORCE( - cudaStreamSynchronize(0), - "cudaStreamSynchronize failed in paddle::platform::GpuMemcpySync"); -} - void GpuMemcpyPeer(void *dst, int dst_device, const void *src, int src_device, size_t count, cudaStream_t stream) { PADDLE_ENFORCE( diff --git a/paddle/platform/gpu_info.h b/paddle/platform/gpu_info.h index db961f3838..d05131fa41 100644 --- a/paddle/platform/gpu_info.h +++ b/paddle/platform/gpu_info.h @@ -52,10 +52,6 @@ size_t GpuMaxChunkSize(); void GpuMemcpyAsync(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind, cudaStream_t stream); -//! Copy memory from address src to dst synchronously. -void GpuMemcpySync(void *dst, const void *src, size_t count, - enum cudaMemcpyKind kind); - //! Copy memory from one device to another device. void GpuMemcpyPeer(void *dst, int dst_device, const void *src, int src_device, size_t count, cudaStream_t stream); diff --git a/paddle/platform/nccl_test.cu b/paddle/platform/nccl_test.cu index c99dae68be..8f815863a7 100644 --- a/paddle/platform/nccl_test.cu +++ b/paddle/platform/nccl_test.cu @@ -1,28 +1,30 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include #include "glog/logging.h" #include "gtest/gtest.h" + +#include "paddle/framework/init.h" #include "paddle/platform/device_context.h" #include "paddle/platform/dynload/nccl.h" #include "paddle/platform/enforce.h" #include "paddle/platform/gpu_info.h" -#include -#include -#include - static int dev_count = 0; namespace paddle { @@ -32,6 +34,7 @@ TEST(NCCL, init) { std::vector comms; comms.resize(dev_count); PADDLE_ENFORCE(dynload::ncclCommInitAll(comms.data(), dev_count, nullptr)); + for (int i = 0; i < dev_count; ++i) { dynload::ncclCommDestroy(comms[i]); } @@ -47,7 +50,7 @@ struct PerThreadData { T* RecvBuff() { return thrust::raw_pointer_cast(recv_buff.data()); } - PerThreadData(int gpu_id, size_t size) : dev_ctx(GPUPlace(gpu_id)) { + PerThreadData(int gpu_id, size_t size) : dev_ctx(CUDAPlace(gpu_id)) { send_buff.resize(size); for (size_t i = 0; i < size; ++i) { send_buff[i] = static_cast(i); @@ -62,7 +65,7 @@ TEST(NCCL, all_reduce) { std::vector comms; comms.resize(dev_count); VLOG(1) << "Initializing ncclComm"; - PADDLE_ENFORCE(dynload::ncclCommInitAll(comms.data(), dev_count, nullptr)); + dynload::ncclCommInitAll(comms.data(), dev_count, nullptr); VLOG(1) << "ncclComm initialized"; VLOG(1) << "Creating thread data"; std::vector>> data; @@ -131,6 +134,18 @@ int main(int argc, char** argv) { << dev_count; return 0; } + + std::vector places; + + places.emplace_back(paddle::platform::CPUPlace()); + int count = paddle::platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + places.emplace_back(paddle::platform::CUDAPlace(i)); + } + + VLOG(0) << " DeviceCount " << count; + paddle::platform::DeviceContextPool::Create(places); + testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/paddle/platform/place.cc b/paddle/platform/place.cc index 856e54df89..249527e3e1 100644 --- a/paddle/platform/place.cc +++ b/paddle/platform/place.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/platform/place.h" @@ -23,7 +23,9 @@ class PlacePrinter : public boost::static_visitor<> { public: explicit PlacePrinter(std::ostream &os) : os_(os) {} void operator()(const CPUPlace &) { os_ << "CPUPlace"; } - void operator()(const GPUPlace &p) { os_ << "GPUPlace(" << p.device << ")"; } + void operator()(const CUDAPlace &p) { + os_ << "CUDAPlace(" << p.device << ")"; + } private: std::ostream &os_; @@ -36,16 +38,15 @@ static Place the_default_place; void set_place(const Place &place) { the_default_place = place; } const Place &get_place() { return the_default_place; } -const GPUPlace default_gpu() { return GPUPlace(0); } +const CUDAPlace default_gpu() { return CUDAPlace(0); } const CPUPlace default_cpu() { return CPUPlace(); } bool is_gpu_place(const Place &p) { - return boost::apply_visitor(IsGPUPlace(), p); -} -bool is_cpu_place(const Place &p) { - return !boost::apply_visitor(IsGPUPlace(), p); + return boost::apply_visitor(IsCUDAPlace(), p); } +bool is_cpu_place(const Place &p) { return !is_gpu_place(p); } + bool places_are_same_class(const Place &p1, const Place &p2) { return p1.which() == p2.which(); } diff --git a/paddle/platform/place.h b/paddle/platform/place.h index 5370360a7d..d25eaa689f 100644 --- a/paddle/platform/place.h +++ b/paddle/platform/place.h @@ -31,39 +31,31 @@ struct CPUPlace { inline bool operator!=(const CPUPlace &) const { return false; } }; -struct GPUPlace { - GPUPlace() : GPUPlace(0) {} - explicit GPUPlace(int d) : device(d) {} +struct CUDAPlace { + CUDAPlace() : CUDAPlace(0) {} + explicit CUDAPlace(int d) : device(d) {} inline int GetDeviceId() const { return device; } // needed for variant equality comparison - inline bool operator==(const GPUPlace &o) const { return device == o.device; } - inline bool operator!=(const GPUPlace &o) const { return !(*this == o); } + inline bool operator==(const CUDAPlace &o) const { + return device == o.device; + } + inline bool operator!=(const CUDAPlace &o) const { return !(*this == o); } int device; }; -struct IsGPUPlace : public boost::static_visitor { +struct IsCUDAPlace : public boost::static_visitor { bool operator()(const CPUPlace &) const { return false; } - bool operator()(const GPUPlace &gpu) const { return true; } + bool operator()(const CUDAPlace &gpu) const { return true; } }; -// Define the max number of Place in bit length. i.e., the max number of places -// should be less equal than 2^(NUM_PLACE_TYPE_LIMIT_IN_BIT) -#define NUM_PLACE_TYPE_LIMIT_IN_BIT 4 - -typedef boost::variant Place; - -// static check number of place types is less equal than -// 2^(NUM_PLACE_TYPE_LIMIT_IN_BIT) -BOOST_MPL_ASSERT((boost::mpl::less_equal< - Place::types::size, - boost::mpl::long_<1 << NUM_PLACE_TYPE_LIMIT_IN_BIT>>)); +typedef boost::variant Place; void set_place(const Place &); const Place &get_place(); -const GPUPlace default_gpu(); +const CUDAPlace default_gpu(); const CPUPlace default_cpu(); bool is_gpu_place(const Place &); diff --git a/paddle/platform/place_test.cc b/paddle/platform/place_test.cc index 33e2e5a439..4f1eba01df 100644 --- a/paddle/platform/place_test.cc +++ b/paddle/platform/place_test.cc @@ -4,7 +4,7 @@ TEST(Place, Equality) { paddle::platform::CPUPlace cpu; - paddle::platform::GPUPlace g0(0), g1(1), gg0(0); + paddle::platform::CUDAPlace g0(0), g1(1), gg0(0); EXPECT_EQ(cpu, cpu); EXPECT_EQ(g0, g0); @@ -22,6 +22,7 @@ TEST(Place, Default) { EXPECT_TRUE(paddle::platform::is_gpu_place(paddle::platform::default_gpu())); EXPECT_TRUE(paddle::platform::is_cpu_place(paddle::platform::default_cpu())); + EXPECT_FALSE(paddle::platform::is_cpu_place(paddle::platform::get_place())); paddle::platform::set_place(paddle::platform::CPUPlace()); EXPECT_TRUE(paddle::platform::is_cpu_place(paddle::platform::get_place())); } @@ -29,8 +30,8 @@ TEST(Place, Default) { TEST(Place, Print) { { std::stringstream ss; - ss << paddle::platform::GPUPlace(1); - EXPECT_EQ("GPUPlace(1)", ss.str()); + ss << paddle::platform::CUDAPlace(1); + EXPECT_EQ("CUDAPlace(1)", ss.str()); } { std::stringstream ss; diff --git a/paddle/platform/transform.h b/paddle/platform/transform.h index 148ebaed3d..a88902b164 100644 --- a/paddle/platform/transform.h +++ b/paddle/platform/transform.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/paddle/platform/transform_test.cu b/paddle/platform/transform_test.cu index d36eac8379..af9204a0a7 100644 --- a/paddle/platform/transform_test.cu +++ b/paddle/platform/transform_test.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include "paddle/memory/memcpy.h" @@ -49,15 +49,15 @@ TEST(Transform, CPUUnary) { TEST(Transform, GPUUnary) { using namespace paddle::platform; using namespace paddle::memory; - GPUPlace gpu0(0); + CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4}; float* gpu_buf = static_cast(Alloc(gpu0, sizeof(float) * 4)); - Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf)); + Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf), ctx.stream()); Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, Scale(10)); ctx.Wait(); - Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf)); + Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf), ctx.stream()); Free(gpu0, gpu_buf); for (int i = 0; i < 4; ++i) { ASSERT_NEAR(cpu_buf[i], static_cast(i + 1), 1e-5); @@ -80,14 +80,14 @@ TEST(Transform, GPUBinary) { using namespace paddle::platform; using namespace paddle::memory; int buf[4] = {1, 2, 3, 4}; - GPUPlace gpu0(0); + CUDAPlace gpu0(0); CUDADeviceContext ctx(gpu0); int* gpu_buf = static_cast(Alloc(gpu0, sizeof(buf))); - Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf)); + Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf), ctx.stream()); Transform trans; trans(ctx, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply()); ctx.Wait(); - Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf)); + Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf), ctx.stream()); Free(gpu0, gpu_buf); for (int i = 0; i < 4; ++i) { ASSERT_EQ((i + 1) * (i + 1), buf[i]); diff --git a/paddle/platform/variant.h b/paddle/platform/variant.h index 619897ca19..ea6ef8fddf 100644 --- a/paddle/platform/variant.h +++ b/paddle/platform/variant.h @@ -1,19 +1,32 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once +#ifdef __CUDACC__ +#ifdef __CUDACC_VER_MAJOR__ +// CUDA 9 define `__CUDACC_VER__` as a warning message, manually define +// __CUDACC_VER__ instead. +#undef __CUDACC_VER__ + +#define __CUDACC_VER__ \ + (__CUDACC_VER_MAJOR__ * 10000 + __CUDACC_VER_MINOR__ * 100 + \ + __CUDACC_VER_BUILD__) +#endif + +#endif + #include #ifdef PADDLE_WITH_CUDA diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index fd55f410d3..6afed7eec7 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,7 +1,7 @@ if(WITH_PYTHON) cc_library(paddle_pybind SHARED - SRCS pybind.cc exception.cc protobuf.cc - DEPS pybind python backward proto_desc paddle_memory executor prune + SRCS pybind.cc exception.cc protobuf.cc const_value.cc + DEPS pybind python backward proto_desc paddle_memory executor prune init ${GLOB_OP_LIB}) endif(WITH_PYTHON) diff --git a/paddle/pybind/const_value.cc b/paddle/pybind/const_value.cc new file mode 100644 index 0000000000..761635aa5e --- /dev/null +++ b/paddle/pybind/const_value.cc @@ -0,0 +1,34 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "const_value.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace pybind { + +void BindConstValue(pybind11::module& m) { + m.def("kEmptyVarName", [] { return framework::kEmptyVarName; }); + m.def("kTempVarName", [] { return framework::kTempVarName; }); + m.def("kGradVarSuffix", [] { return framework::kGradVarSuffix; }); + m.def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); + + // for kernel_hint key + m.def("kUseCPU", [] { return framework::kUseCPU; }); + m.def("kUseCUDNN", [] { return framework::kUseCUDNN; }); + m.def("kUseMKLDNN", [] { return framework::kUseMKLDNN; }); +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/pybind/const_value.h b/paddle/pybind/const_value.h new file mode 100644 index 0000000000..3d57c972a9 --- /dev/null +++ b/paddle/pybind/const_value.h @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/platform/enforce.h" +#include "pybind11/pybind11.h" + +namespace py = pybind11; + +namespace paddle { +namespace pybind { +extern void BindConstValue(pybind11::module& m); +} // namespace pybind +} // namespace paddle diff --git a/paddle/pybind/exception.cc b/paddle/pybind/exception.cc index ff79b12ee4..e29ac3ebab 100644 --- a/paddle/pybind/exception.cc +++ b/paddle/pybind/exception.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/pybind/exception.h" diff --git a/paddle/pybind/exception.h b/paddle/pybind/exception.h index 70beac1460..436ddd5707 100644 --- a/paddle/pybind/exception.h +++ b/paddle/pybind/exception.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include diff --git a/paddle/pybind/print_operators_doc.cc b/paddle/pybind/print_operators_doc.cc index 24f2a9383f..f4f281229e 100644 --- a/paddle/pybind/print_operators_doc.cc +++ b/paddle/pybind/print_operators_doc.cc @@ -31,31 +31,32 @@ std::string Escape(const std::string& s) { return r; } -std::string AttrType(paddle::framework::AttrType at) { +std::string AttrType(paddle::framework::proto::AttrType at) { switch (at) { - case paddle::framework::INT: + case paddle::framework::proto::INT: return "int"; - case paddle::framework::FLOAT: + case paddle::framework::proto::FLOAT: return "float"; - case paddle::framework::STRING: + case paddle::framework::proto::STRING: return "string"; - case paddle::framework::BOOLEAN: + case paddle::framework::proto::BOOLEAN: return "bool"; - case paddle::framework::INTS: + case paddle::framework::proto::INTS: return "int array"; - case paddle::framework::FLOATS: + case paddle::framework::proto::FLOATS: return "float array"; - case paddle::framework::STRINGS: + case paddle::framework::proto::STRINGS: return "string array"; - case paddle::framework::BOOLEANS: + case paddle::framework::proto::BOOLEANS: return "bool array"; - case paddle::framework::BLOCK: + case paddle::framework::proto::BLOCK: return "block id"; } return "UNKNOWN"; // not possible } -void PrintVar(const paddle::framework::OpProto::Var& v, std::stringstream& ss) { +void PrintVar(const paddle::framework::proto::OpProto::Var& v, + std::stringstream& ss) { ss << " { " << "\n" << " \"name\" : \"" << Escape(v.name()) << "\",\n" @@ -65,7 +66,7 @@ void PrintVar(const paddle::framework::OpProto::Var& v, std::stringstream& ss) { << " },"; } -void PrintAttr(const paddle::framework::OpProto::Attr& a, +void PrintAttr(const paddle::framework::proto::OpProto::Attr& a, std::stringstream& ss) { ss << " { " << "\n" @@ -81,7 +82,7 @@ void PrintOpProto(const std::string& type, std::stringstream& ss) { std::cerr << "Processing " << type << "\n"; - const paddle::framework::OpProto* p = opinfo.proto_; + const paddle::framework::proto::OpProto* p = opinfo.proto_; if (p == nullptr) { return; // It is possible that an operator doesn't have OpProto. } diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 6c8f06cccb..f105370f22 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -108,21 +108,21 @@ static py::bytes SerializeMessage(T &self) { // Bind Methods void BindProgramDesc(py::module &m) { - py::class_(m, "ProgramDesc", "") + py::class_(m, "ProgramDesc", "") .def(py::init<>()) .def("__init__", - [](ProgramDescBind &self, const ProgramDescBind &other) { - new (&self) ProgramDescBind(other); + [](ProgramDesc &self, const ProgramDesc &other) { + new (&self) ProgramDesc(other); }) .def("__init__", - [](ProgramDescBind &self, const py::bytes &binary_str) { + [](ProgramDesc &self, const py::bytes &binary_str) { std::string str(binary_str); - new (&self) ProgramDescBind(str); + new (&self) ProgramDesc(str); }) - .def("append_block", &ProgramDescBind::AppendBlock, + .def("append_block", &ProgramDesc::AppendBlock, py::return_value_policy::reference) .def("append_backward", - [](ProgramDescBind &program_desc, const VarDescBind &target, + [](ProgramDesc &program_desc, const VarDesc &target, const std::unordered_set &no_grad_vars) { ParamGradInfoMap param_grad_map = AppendBackward(program_desc, target, no_grad_vars); @@ -138,13 +138,13 @@ void BindProgramDesc(py::module &m) { } return retv; }) - .def("block", &ProgramDescBind::MutableBlock, + .def("block", &ProgramDesc::MutableBlock, py::return_value_policy::reference) - .def("num_blocks", &ProgramDescBind::Size) - .def("serialize_to_string", SerializeMessage) + .def("num_blocks", &ProgramDesc::Size) + .def("serialize_to_string", SerializeMessage) .def("parse_from_string", - [](ProgramDescBind &program_desc, const std::string &data) { - ProgramDesc *desc = program_desc.Proto(); + [](ProgramDesc &program_desc, const std::string &data) { + proto::ProgramDesc *desc = program_desc.Proto(); PADDLE_ENFORCE(desc->ParseFromString(data), "Fail to parse ProgramDesc from string. This could " "be a bug of Paddle."); @@ -152,109 +152,115 @@ void BindProgramDesc(py::module &m) { } void BindBlockDesc(py::module &m) { - py::class_(m, "BlockDesc", "") - .def_property_readonly("id", &BlockDescBind::ID) - .def_property_readonly("parent", &BlockDescBind::Parent) - .def("append_op", &BlockDescBind::AppendOp, + py::class_(m, "BlockDesc", "") + .def_property_readonly("id", &BlockDesc::ID) + .def_property_readonly("parent", &BlockDesc::Parent) + .def("append_op", &BlockDesc::AppendOp, py::return_value_policy::reference) - .def("prepend_op", &BlockDescBind::PrependOp, + .def("prepend_op", &BlockDesc::PrependOp, py::return_value_policy::reference) + .def("remove_op", &BlockDesc::RemoveOp) .def("var", - [](BlockDescBind &self, py::bytes byte_name) { + [](BlockDesc &self, py::bytes byte_name) { std::string name = byte_name; return self.Var(name); }, py::return_value_policy::reference) .def("has_var", - [](BlockDescBind &self, py::bytes byte_name) { + [](BlockDesc &self, py::bytes byte_name) { std::string name = byte_name; return self.HasVar(name); }) .def("find_var", - [](BlockDescBind &self, py::bytes byte_name) { + [](BlockDesc &self, py::bytes byte_name) { std::string name = byte_name; return self.FindVar(name); }, py::return_value_policy::reference) - .def("all_vars", &BlockDescBind::AllVars, - py::return_value_policy::reference) - .def("op_size", &BlockDescBind::OpSize) - .def("op", &BlockDescBind::Op, py::return_value_policy::reference) - .def("serialize_to_string", SerializeMessage); + .def("all_vars", &BlockDesc::AllVars, py::return_value_policy::reference) + .def("op_size", &BlockDesc::OpSize) + .def("op", &BlockDesc::Op, py::return_value_policy::reference) + .def("serialize_to_string", SerializeMessage); } void BindVarDsec(py::module &m) { - py::enum_(m, "DataType", "") - .value("BOOL", DataType::BOOL) - .value("INT16", DataType::INT16) - .value("INT32", DataType::INT32) - .value("INT64", DataType::INT64) - .value("FP16", DataType::FP16) - .value("FP32", DataType::FP32) - .value("FP64", DataType::FP64); + py::enum_(m, "DataType", "") + .value("BOOL", proto::DataType::BOOL) + .value("INT16", proto::DataType::INT16) + .value("INT32", proto::DataType::INT32) + .value("INT64", proto::DataType::INT64) + .value("FP16", proto::DataType::FP16) + .value("FP32", proto::DataType::FP32) + .value("FP64", proto::DataType::FP64); - py::class_ var_desc(m, "VarDesc", ""); + py::class_ var_desc(m, "VarDesc", ""); var_desc .def("name", - [](const VarDescBind &self) { + [](const VarDesc &self) { py::bytes name = self.Name(); return name; }, py::return_value_policy::reference) - .def("set_shape", &VarDescBind::SetShape) - .def("set_dtype", &VarDescBind::SetDataType) - .def("shape", &VarDescBind::Shape, py::return_value_policy::reference) - .def("dtype", &VarDescBind::GetDataType) - .def("lod_level", &VarDescBind::GetLodLevel) - .def("set_lod_level", &VarDescBind::SetLoDLevel) - .def("type", &VarDescBind::GetType) - .def("set_type", &VarDescBind::SetType) - .def("serialize_to_string", SerializeMessage) - .def("persistable", &VarDescBind::Persistable) - .def("set_persistable", &VarDescBind::SetPersistable); + .def("set_shape", &VarDesc::SetShape) + .def("set_dtype", &VarDesc::SetDataType) + .def("shape", &VarDesc::Shape, py::return_value_policy::reference) + .def("dtype", &VarDesc::GetDataType) + .def("lod_level", &VarDesc::GetLodLevel) + .def("set_lod_level", &VarDesc::SetLoDLevel) + .def("type", &VarDesc::GetType) + .def("set_type", &VarDesc::SetType) + .def("serialize_to_string", SerializeMessage) + .def("persistable", &VarDesc::Persistable) + .def("set_persistable", &VarDesc::SetPersistable); - py::enum_(var_desc, "VarType", "") - .value("LOD_TENSOR", VarDesc::LOD_TENSOR) - .value("SELECTED_ROWS", VarDesc::SELECTED_ROWS) - .value("FEED_MINIBATCH", VarDesc::FEED_MINIBATCH) - .value("FETCH_LIST", VarDesc::FETCH_LIST) - .value("STEP_SCOPES", VarDesc::STEP_SCOPES) - .value("LOD_RANK_TABLE", VarDesc::LOD_RANK_TABLE) - .value("LOD_TENSOR_ARRAY", VarDesc::LOD_TENSOR_ARRAY); + py::enum_(var_desc, "VarType", "") + .value("LOD_TENSOR", proto::VarDesc::LOD_TENSOR) + .value("SELECTED_ROWS", proto::VarDesc::SELECTED_ROWS) + .value("FEED_MINIBATCH", proto::VarDesc::FEED_MINIBATCH) + .value("FETCH_LIST", proto::VarDesc::FETCH_LIST) + .value("STEP_SCOPES", proto::VarDesc::STEP_SCOPES) + .value("LOD_RANK_TABLE", proto::VarDesc::LOD_RANK_TABLE) + .value("LOD_TENSOR_ARRAY", proto::VarDesc::LOD_TENSOR_ARRAY); } void BindOpDesc(py::module &m) { - py::enum_(m, "AttrType", "") - .value("INT", AttrType::INT) - .value("INTS", AttrType::INTS) - .value("FLOAT", AttrType::FLOAT) - .value("FLOATS", AttrType::FLOATS) - .value("STRING", AttrType::STRING) - .value("STRINGS", AttrType::STRINGS) - .value("BOOL", AttrType::BOOLEAN) - .value("BOOLS", AttrType::BOOLEANS) - .value("BLOCK", AttrType::BLOCK); + py::enum_(m, "AttrType", "") + .value("INT", proto::AttrType::INT) + .value("INTS", proto::AttrType::INTS) + .value("FLOAT", proto::AttrType::FLOAT) + .value("FLOATS", proto::AttrType::FLOATS) + .value("STRING", proto::AttrType::STRING) + .value("STRINGS", proto::AttrType::STRINGS) + .value("BOOL", proto::AttrType::BOOLEAN) + .value("BOOLS", proto::AttrType::BOOLEANS) + .value("BLOCK", proto::AttrType::BLOCK); - py::class_ op_desc(m, "OpDesc", ""); - op_desc.def("type", &OpDescBind::Type) - .def("set_type", &OpDescBind::SetType) - .def("input", &OpDescBind::Input) - .def("input_names", &OpDescBind::InputNames) - .def("set_input", &OpDescBind::SetInput) - .def("output", &OpDescBind::Output) - .def("output_names", &OpDescBind::OutputNames) - .def("set_output", &OpDescBind::SetOutput) - .def("has_attr", &OpDescBind::HasAttr) - .def("attr_type", &OpDescBind::GetAttrType) - .def("attr_names", &OpDescBind::AttrNames) - .def("set_attr", &OpDescBind::SetAttr) - .def("attr", &OpDescBind::GetAttr) - .def("set_block_attr", &OpDescBind::SetBlockAttr) - .def("block_attr", &OpDescBind::GetBlockAttr) - .def("check_attrs", &OpDescBind::CheckAttrs) - .def("infer_shape", &OpDescBind::InferShape) - .def("infer_var_type", &OpDescBind::InferVarType) - .def("serialize_to_string", SerializeMessage); + py::class_ op_desc(m, "OpDesc", ""); + op_desc.def("type", &OpDesc::Type) + .def("set_type", &OpDesc::SetType) + .def("input", &OpDesc::Input) + .def("input_names", &OpDesc::InputNames) + .def("set_input", &OpDesc::SetInput) + .def("output", &OpDesc::Output) + .def("output_names", &OpDesc::OutputNames) + .def("set_output", &OpDesc::SetOutput) + .def("has_attr", &OpDesc::HasAttr) + .def("attr_type", &OpDesc::GetAttrType) + .def("attr_names", &OpDesc::AttrNames) + .def("set_attr", &OpDesc::SetAttr) + .def("attr", &OpDesc::GetAttr) + .def("set_block_attr", &OpDesc::SetBlockAttr) + .def("set_serialized_attr", + [](OpDesc &self, const std::string &name, + const py::bytes &seriralized) { + std::string ser(seriralized); + self.SetAttr(name, ser); + }) + .def("block_attr", &OpDesc::GetBlockAttr) + .def("check_attrs", &OpDesc::CheckAttrs) + .def("infer_shape", &OpDesc::InferShape) + .def("infer_var_type", &OpDesc::InferVarType) + .def("serialize_to_string", SerializeMessage); } } // namespace pybind diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index c16d3e0cbe..07e38476e6 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -16,11 +16,11 @@ limitations under the License. */ #include // for call_once #include -#include "gflags/gflags.h" #include "paddle/framework/backward.h" #include "paddle/framework/executor.h" #include "paddle/framework/feed_fetch_method.h" #include "paddle/framework/framework.pb.h" +#include "paddle/framework/init.h" #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/lod_tensor_array.h" @@ -30,6 +30,7 @@ limitations under the License. */ #include "paddle/operators/net_op.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" +#include "paddle/pybind/const_value.h" #include "paddle/pybind/exception.h" #include "paddle/pybind/pybind.h" #include "paddle/pybind/tensor_py.h" @@ -51,24 +52,6 @@ static size_t UniqueIntegerGenerator(const std::string &prefix) { return generators[prefix].fetch_add(1); } -std::once_flag gflags_init_flag; - -// TODO(qijun) move init gflags to init.cc -void InitGflags(std::vector &argv) { - std::call_once(gflags_init_flag, [&]() { - int argc = argv.size(); - char **arr = new char *[argv.size()]; - std::string line; - for (size_t i = 0; i < argv.size(); i++) { - arr[i] = &argv[i][0]; - line += argv[i]; - line += ' '; - } - google::ParseCommandLineFlags(&argc, &arr, true); - VLOG(1) << "Init commandline: " << line; - }); -} - bool IsCompileGPU() { #ifndef PADDLE_WITH_CUDA return false; @@ -95,8 +78,12 @@ PYBIND11_PLUGIN(core) { [](Tensor &self, const std::vector &dim) { self.Resize(make_ddim(dim)); }) + .def("set_layout", + [](Tensor &self, const std::string &layout) { + self.set_layout(StringToDataLayout(layout)); + }) .def("alloc_float", - [](Tensor &self, paddle::platform::GPUPlace &place) { + [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) .def("alloc_float", @@ -108,7 +95,7 @@ PYBIND11_PLUGIN(core) { self.mutable_data(place); }) .def("alloc_int", - [](Tensor &self, paddle::platform::GPUPlace &place) { + [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) .def("set", PyCPUTensorSetFromArray) @@ -282,20 +269,37 @@ All parameter, weight, gradient are variables in Paddle. } return ret_values; }); - m.def("prune", [](const ProgramDescBind &origin, + m.def("get_grad_op_descs", + [](const OpDesc &op_desc, + const std::unordered_set &no_grad_set, + std::unordered_map &grad_to_var, + const std::vector &grad_sub_block) { + std::vector> grad_op_descs = + framework::OpInfoMap::Instance() + .Get(op_desc.Type()) + .GradOpMaker()(op_desc, no_grad_set, &grad_to_var, + grad_sub_block); + std::vector grad_op_desc_ptrs(grad_op_descs.size()); + std::transform( + grad_op_descs.begin(), grad_op_descs.end(), + grad_op_desc_ptrs.begin(), + [](std::unique_ptr &p) { return p.release(); }); + return grad_op_desc_ptrs; + }); + m.def("prune", [](const ProgramDesc &origin, const std::vector> &targets) { - ProgramDescBind prog_with_targets(origin); + ProgramDesc prog_with_targets(origin); for (const auto &t : targets) { prog_with_targets.MutableBlock(t[0])->Op(t[1])->MarkAsTarget(); } - ProgramDesc pruned_desc; + proto::ProgramDesc pruned_desc; Prune(*prog_with_targets.Proto(), &pruned_desc); - return new ProgramDescBind(pruned_desc); + return new ProgramDesc(pruned_desc); }); - m.def("inference_optimize", [](ProgramDescBind &origin) { - ProgramDesc pruned_desc; + m.def("inference_optimize", [](ProgramDesc &origin) { + proto::ProgramDesc pruned_desc; InferenceOptimize(*(origin.Proto()), &pruned_desc); - return new ProgramDescBind(pruned_desc); + return new ProgramDesc(pruned_desc); }); m.def_submodule( "var_names", @@ -310,10 +314,10 @@ All parameter, weight, gradient are variables in Paddle. return new paddle::platform::CPUDeviceContext(); }) .def_static("create", - [](paddle::platform::GPUPlace& place) + [](paddle::platform::CUDAPlace& place) -> paddle::platform::DeviceContext* { #ifndef PADDLE_WITH_CUDA - PADDLE_THROW("GPUPlace is not supported in CPU device."); + PADDLE_THROW("CUDAPlace is not supported in CPU device."); #else return new paddle::platform::CUDADeviceContext(place); #endif @@ -323,9 +327,9 @@ All parameter, weight, gradient are variables in Paddle. #ifdef PADDLE_WITH_CUDA py::class_(m, "Communicator").def(py::init<>()); #endif - py::class_(m, "GPUPlace") + py::class_(m, "CUDAPlace") .def(py::init()) - .def("__str__", string::to_string); + .def("__str__", string::to_string); py::class_(m, "CPUPlace") .def(py::init<>()) @@ -338,14 +342,14 @@ All parameter, weight, gradient are variables in Paddle. self = cpu_place; }) .def("set_place", - [](platform::Place &self, const platform::GPUPlace &gpu_place) { + [](platform::Place &self, const platform::CUDAPlace &gpu_place) { self = gpu_place; }); py::class_(m, "Operator") .def_static("create", [](py::bytes protobin) { - OpDesc desc; + proto::OpDesc desc; PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), "Cannot parse user input to OpDesc"); PADDLE_ENFORCE(desc.IsInitialized(), @@ -360,10 +364,10 @@ All parameter, weight, gradient are variables in Paddle. }) .def("run", [](OperatorBase &self, const Scope &scope, - const platform::DeviceContext &dev_ctx) { - self.Run(scope, dev_ctx); - dev_ctx.Wait(); - }) + const platform::CPUPlace &place) { self.Run(scope, place); }) + .def("run", + [](OperatorBase &self, const Scope &scope, + const platform::CUDAPlace &place) { self.Run(scope, place); }) .def("type", [](const OperatorBase &op) -> std::string { return op.Type(); }) .def("outputs", @@ -398,7 +402,7 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "CondOp") .def_static("create", [](py::bytes protobin) -> operators::CondOp * { - OpDesc desc; + proto::OpDesc desc; PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), "Cannot parse user input to OpDesc"); PADDLE_ENFORCE(desc.IsInitialized(), @@ -417,11 +421,12 @@ All parameter, weight, gradient are variables in Paddle. }); py::class_(m, "Executor") - .def(py::init &>()) + .def(py::init()) .def("run", &Executor::Run); m.def("unique_integer", UniqueIntegerGenerator); - m.def("init_gflags", InitGflags); + m.def("init_gflags", framework::InitGflags); + m.def("init_devices", &framework::InitDevices); m.def("is_compile_gpu", IsCompileGPU); m.def("set_feed_variable", framework::SetFeedVariable); @@ -431,6 +436,7 @@ All parameter, weight, gradient are variables in Paddle. BindBlockDesc(m); BindVarDsec(m); BindOpDesc(m); + BindConstValue(m); py::class_(m, "LodRankTable") .def("items", [](framework::LoDRankTable &table) { diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 41fa658502..67244d8260 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -1,21 +1,22 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include #include "paddle/framework/tensor.h" #include "paddle/memory/memcpy.h" +#include "paddle/platform/device_context.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" @@ -61,13 +62,16 @@ struct CastToPyBufferImpl { auto *src_ptr = static_cast(tensor.data()); auto *dst_ptr = static_cast(dst_tensor.mutable_data( tensor.dims(), platform::CPUPlace())); - // TODO(qijun): Here we use default CUDA stream to set GPU Tensor to - // a Python numpy array. It's better to manage CDUA stream unifiedly. - paddle::platform::GpuMemcpySync(dst_ptr, src_ptr, - sizeof(CUR_TYPE) * tensor.numel(), - cudaMemcpyDeviceToHost); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto dev_ctx = static_cast( + pool.Borrow(tensor.place())); + + paddle::platform::GpuMemcpyAsync( + dst_ptr, src_ptr, sizeof(CUR_TYPE) * tensor.numel(), + cudaMemcpyDeviceToHost, dev_ctx->stream()); #else - PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); #endif } else if (paddle::platform::is_cpu_place(tensor.place())) { dst_tensor = tensor; @@ -123,7 +127,7 @@ template void PyCUDATensorSetFromArray( framework::Tensor &self, py::array_t array, - paddle::platform::GPUPlace &place) { + paddle::platform::CUDAPlace &place) { std::vector dims; dims.reserve(array.ndim()); for (size_t i = 0; i < array.ndim(); ++i) { @@ -132,10 +136,12 @@ void PyCUDATensorSetFromArray( self.Resize(framework::make_ddim(dims)); auto *dst = self.mutable_data(place); - // TODO(qijun): Here we use default CUDA stream to set a Python numpy - // array to a GPU Tensor. It's better to manage CDUA stream unifiedly. - paddle::platform::GpuMemcpySync(dst, array.data(), sizeof(T) * array.size(), - cudaMemcpyHostToDevice); + + platform::DeviceContextPool &pool = platform::DeviceContextPool::Get(); + auto dev_ctx = + static_cast(pool.Borrow(place)); + paddle::platform::GpuMemcpyAsync(dst, array.data(), sizeof(T) * array.size(), + cudaMemcpyHostToDevice, dev_ctx->stream()); } #endif diff --git a/paddle/scripts/CMakeLists.txt b/paddle/scripts/CMakeLists.txt index a52f06fe49..68cb5a19f9 100644 --- a/paddle/scripts/CMakeLists.txt +++ b/paddle/scripts/CMakeLists.txt @@ -5,11 +5,3 @@ configure_file(submit_local.sh.in install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle DESTINATION bin PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) - -configure_file(tools/usage_stat/usage.sh - paddle_usage - @ONLY) - -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/paddle_usage DESTINATION opt/paddle/bin - PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - GROUP_EXECUTE GROUP_READ WORLD_EXECUTE WORLD_READ) diff --git a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile index 1a2d19e823..c2f631bdf4 100644 --- a/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile +++ b/paddle/scripts/cluster_train_v2/openmpi/docker_cluster/Dockerfile @@ -1,7 +1,7 @@ # Build this image: docker build -t mpi . # -FROM paddledev/paddle:0.10.0rc3 +FROM paddlepaddle/paddle:0.10.0rc3 ENV DEBIAN_FRONTEND noninteractive diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/docker/README.md index f3a6f1dba7..f0620498cf 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/docker/README.md @@ -20,7 +20,7 @@ binaries. ## Run The Build -### Build Evironments +### Build Environments The pre-built build environment images are: @@ -192,7 +192,7 @@ For developers who are interested in the C++ source code, please use -e "WOBOQ=O - The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: ```bash -docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=ON" -e "WOBOQ=ON" paddlepaddle/paddle:latest-dev +docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=ON" -e "WOBOQ=ON" paddlepaddle/paddle:latest-dev ``` - You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index 43d2d1b410..a94bc01b35 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -165,9 +165,6 @@ case "$1" in "make_diagram") python -m paddle.utils.make_model_diagram ${@:2} ;; - "usage") - $PADDLE_BIN_PATH/paddle_usage ${@:2} - ;; "version") version ;; diff --git a/paddle/scripts/tools/build_docs/build_docs.sh b/paddle/scripts/tools/build_docs/build_docs.sh index c6cbbc4eef..f9bc8bf63a 100755 --- a/paddle/scripts/tools/build_docs/build_docs.sh +++ b/paddle/scripts/tools/build_docs/build_docs.sh @@ -5,4 +5,4 @@ docker run --rm \ -e "WITH_AVX=ON" \ -e "WITH_DOC=ON" \ -e "WOBOQ=ON" \ - ${1:-"paddledev/paddle:dev"} + ${1:-"paddlepaddle/paddle:latest-dev"} diff --git a/paddle/scripts/tools/usage_stat/usage.sh b/paddle/scripts/tools/usage_stat/usage.sh deleted file mode 100755 index 7dbd1f5884..0000000000 --- a/paddle/scripts/tools/usage_stat/usage.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash - -ARGPARSE=`getopt -o u:vin:l:e: --long git-user:,help,dry-run,task-name:,log-file:,exit-code: -- "$@"` -KEEP_ANONYMOUS="A_USER_DOES_NOT_TELL_US" -# paddle config home dir, same as paddle -PADDLE_CONF_HOME="$HOME/.config/paddle" -# api url, mirror url(s) will be append later -PD_URLS="http://api.paddlepaddle.org/version" - -usage() -{ - echo "Usage: `basename $0` [options]" - echo "Options:" - echo " -e, --exit-code=EXIT_CODE The train/predict process's exit code" - echo " -l, --log-file=LOG_FILE_PATH Read which log file to get the duration of process" - echo " -n, --task-name=TASK_NAME The name of demo or example" - echo " -u, --git-user=GITHUB_USER provide contact info, like username or email" - echo " -v, -i Verbose output and interact with user when necessary" - echo " --help display this help message" -} - -eval set -- "${ARGPARSE}" -while true; do - case "$1" in - -l|--log-file) - log_file=$2 - shift 2 - ;; - -e|--exit-code) - exit_code=$2 - shift 2 - ;; - -u|--git-user) - github_user=$2 - shift 2 - ;; - -n|--task-name) - task=$2 - shift 2 - ;; - -v|-i) - v=1 - shift - ;; - --dry-run) - dry_run=1 - shift - ;; - --) - shift - break - ;; - --help) - usage - exit 0 - ;; - *) - echo "Invalid option $1" - usage - exit 1 - ;; - esac -done - -# parse the log_file to get the time costs -if [ -s "${log_file}" ]; then - duration=`awk 'BEGIN{day=0;last_sec=0;min_sec=0;max_sec=0;} - {if(index($2,":")==3){ - t=substr($2,1,8); - sec=day*86400+substr(t,1,2)*3600+substr(t,4,2)*60+substr(t,7,2); - if(secsec){min_sec=sec;} - if(max_sec==0 || max_sec/dev/null` - git_url=`git config --get remote.origin.url 2>/dev/null` - if [ "`echo ${git_url} | cut -b 1-19`" = "https://github.com/" ]; then - # under a git url, like https://github.com/user_xxx/proj_yyy.git - if [ "${v}" = "1" ]; then echo " from github url..."; fi - github_user=`echo ${git_url} | cut -d "/" -f 4` - if [ "${github_user}" = "PaddlePaddle" ]; then - github_user= - fi - fi - if [ -n "${git_username}" -a -z "${github_user}" ]; then - if [ "${v}" = "1" ]; then echo " from global git username..."; fi - github_user=${git_username} - fi - fi -fi -# allow user to set the user name, if it's not found -if [ -z "${github_user}" -a "${v}" = "1" ]; then - read -p "Please input your github username or email, or just return to keep this feedback anonymous:" - github_user=${REPLY} - if [ -z "${github_user}" ]; then - # empty input, consider as one anonymous user - github_user="${KEEP_ANONYMOUS}" - fi -fi -if [ -n "${github_user}" -a -z "${dry_run}" ]; then - # valid user and not in dry-run mode, then save to cache - mkdir -p ${PADDLE_CONF_HOME} - echo "${github_user}" >${PADDLE_CONF_HOME}/github_user -fi -if [ "${v}" = "1" ]; then echo "username: ${github_user}"; fi -if [ "${github_user}" = "${KEEP_ANONYMOUS}" ]; then - # anonymous user should keep the var empty. - github_user= -fi - -# read local paddle version -paddle_version=`paddle version | grep PaddlePaddle | head -n1 | cut -d " " -f 2 | cut -d "," -f 1` -if [ "${v}" = "1" ]; then echo "version:${paddle_version}"; fi - -# read local system time -system_time=`date "+%Y%m%d%H%M%S"` -if [ "${v}" = "1" ]; then echo "system time:${system_time}"; fi - -# make empty job_name as default value. -if [ -z "${task}" ]; then - task="(unknown_task)" -fi -if [ "${v}" = "1" ]; then echo "task: ${task}"; fi - -# concat the curl command -params="content={\"data_type\":\"usage\",\ -\"system_time\":${system_time},\"paddle_version\":\"${paddle_version}\",\ -\"github_user\":\"${github_user}\",\"job_name\":\"${task}\",\ -\"duration\":${duration},\"exit_code\":\"${exit_code}\"\ -}&type=1" -curl_cmd_prefix="curl -m 5 -X POST -d ${params}\ - -b ${PADDLE_CONF_HOME}/paddle.cookie -c ${PADDLE_CONF_HOME}/paddle.cookie " - -if [ "${dry_run}" = "1" ]; then - first_url=`echo ${PD_URLS} | cut -d " " -f 1` - echo "(dry-run mode)curl command: ${curl_cmd_prefix} ${first_url}" - exit 0 -else - for u in ${PD_URLS}; do - curl_cmd="${curl_cmd_prefix} ${u}" - if [ "${v}" = "1" ]; then echo "run: ${curl_cmd}"; fi - ${curl_cmd} >/dev/null 2>&1 - if [ $? -eq 0 ]; then - if [ "${v}" = "1" ]; then echo "upload OK!"; fi - exit 0 - else - if [ "${v}" = "1" ]; then echo "upload failed...try next"; fi - fi - done - if [ "${v}" = "1" ]; then echo "all urls tried but all failed...exit"; fi - exit 1 -fi diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index ff0bac6a07..0db8d33bbc 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -14,9 +14,8 @@ make -j `nproc` print_operators_doc paddle/pybind/print_operators_doc > doc/en/html/operators.json # check websites for broken links -# It will be failed now! -#linkchecker doc/en/html/index.html -#linkchecker doc/cn/html/index.html +linkchecker doc/en/html/index.html +linkchecker doc/cn/html/index.html # Parse Github URL REPO=`git config remote.origin.url` diff --git a/paddle/string/to_string.h b/paddle/string/to_string.h index 4f478b6a36..3b3bcc69a4 100644 --- a/paddle/string/to_string.h +++ b/paddle/string/to_string.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include diff --git a/paddle/string/to_string_test.cc b/paddle/string/to_string_test.cc index 971484dd0c..4956bd96fa 100644 --- a/paddle/string/to_string_test.cc +++ b/paddle/string/to_string_test.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/string/to_string.h" #include diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index 8132742749..77f84cd43b 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -6,7 +6,6 @@ if(WITH_TESTING) add_library(paddle_test_util STATIC TestUtil.cpp) add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) if(NOT MOBILE_INFERENCE) - add_library(paddle_gtest_main STATIC paddle_gtest_main.cc) - add_dependencies(paddle_gtest_main paddle_memory gtest gflags) + cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init paddle_memory gtest gflags) endif() endif() diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index a491322b7e..108ff335bf 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -13,8 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include + #include "gflags/gflags.h" #include "gtest/gtest.h" +#include "paddle/framework/init.h" #include "paddle/memory/memory.h" int main(int argc, char** argv) { @@ -32,8 +34,11 @@ int main(int argc, char** argv) { google::ParseCommandLineFlags(&new_argc, &new_argv_address, false); testing::InitGoogleTest(&argc, argv); paddle::memory::Used(paddle::platform::CPUPlace()); + std::vector devs = {"CPU"}; #ifdef PADDLE_WITH_CUDA - paddle::memory::Used(paddle::platform::GPUPlace(0)); + paddle::memory::Used(paddle::platform::CUDAPlace(0)); + devs.push_back("GPU:0"); #endif + paddle::framework::InitDevices(devs); return RUN_ALL_TESTS(); } diff --git a/python/.gitignore b/python/.gitignore index cc7d0ece4a..1ba1d4c9b0 100644 --- a/python/.gitignore +++ b/python/.gitignore @@ -2,6 +2,7 @@ build dist paddle.egg-info +paddlepaddle_gpu.egg-info .idea paddle/proto/*.py paddle/proto/*.pyc diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 7e118b24a4..19e2ab1b7d 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -270,7 +270,7 @@ class LayerType(object): @staticmethod def is_layer_type(type_name): """ - If type_name is a layer type. + Whether type_name is a layer type. :param type_name: layer type name. Because layer type enumerations are strings. @@ -441,7 +441,7 @@ def full_matrix_projection(input, size=0, param_attr=None): with mixed_layer(size=100) as m: m += full_matrix_projection(input=layer) - 2. When used as an independant object like this, you must set the size: + 2. When used as an independent object like this, you must set the size: .. code-block:: python @@ -451,11 +451,11 @@ def full_matrix_projection(input, size=0, param_attr=None): :param input: The input of this layer. :type input: LayerOutput - :param size: The parameter size. Means the width of parameter. + :param size: The dimension of this layer. :type size: int - :param param_attr: Parameter config, None if use default. + :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :return: A FullMatrixProjection Object. + :return: FullMatrixProjection Object. :rtype: FullMatrixProjection """ proj = FullMatrixProjection( @@ -468,12 +468,12 @@ def full_matrix_projection(input, size=0, param_attr=None): def trans_full_matrix_projection(input, size=0, param_attr=None): """ Different from full_matrix_projection, this projection performs matrix - multiplication, using transpose of weight. + multiplication, using the transpose of weight. .. math:: out.row[i] += in.row[i] * w^\mathrm{T} - :math:`w^\mathrm{T}` means transpose of weight. + :math:`w^\mathrm{T}` means the transpose of weight. The simply usage is: .. code-block:: python @@ -489,9 +489,9 @@ def trans_full_matrix_projection(input, size=0, param_attr=None): :type input: LayerOutput :param size: The parameter size. Means the width of parameter. :type size: int - :param param_attr: Parameter config, None if use default. + :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :return: A TransposedFullMatrixProjection Object. + :return: TransposedFullMatrixProjection Object. :rtype: TransposedFullMatrixProjection """ proj = TransposedFullMatrixProjection( @@ -521,7 +521,7 @@ def table_projection(input, size=0, param_attr=None): with mixed_layer(size=100) as m: m += table_projection(input=layer) - 2. When used as an independant object like this, you must set the size: + 2. When used as an independent object like this, you must set the size: .. code-block:: python @@ -532,11 +532,11 @@ def table_projection(input, size=0, param_attr=None): :param input: The input of this layer, which must contains id fields. :type input: LayerOutput - :param size: The parameter size. Means the width of parameter. + :param size: The dimension of the output. :type size: int - :param param_attr: Parameter config, None if use default. + :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :return: A TableProjection Object. + :return: TableProjection Object. :rtype: TableProjection """ proj = TableProjection( @@ -547,7 +547,7 @@ def table_projection(input, size=0, param_attr=None): def identity_projection(input, offset=None, size=None): """ - 1. IdentityProjection if offset=None. It performs: + 1. If offset=None, it performs IdentityProjection as follows: .. math:: out.row[i] += in.row[i] @@ -559,9 +559,8 @@ def identity_projection(input, offset=None, size=None): proj = identity_projection(input=layer) - 2. IdentityOffsetProjection if offset!=None. It likes IdentityProjection, - but layer size may be smaller than input size. - It select dimesions [offset, offset+layer_size) from input: + 2. If offset!=None, It executes IdentityOffsetProjection and takes the + elements of the input in the range [offset, offset+size) as output. .. math:: out.row[i] += in.row[i + \\textrm{offset}] @@ -573,14 +572,20 @@ def identity_projection(input, offset=None, size=None): proj = identity_projection(input=layer, offset=10) - Note that both of two projections should not have any parameter. + Note that neither of the projections have trainable parameter. :param input: The input of this layer. :type input: LayerOutput - :param offset: Offset, None if use default. + :param offset: The offset from the start of the input. The input's + elements in the range [offset, offset+size) will be + taken as output. If this parameter is not set or set + to None, the output will be the same as the input. :type offset: int - :return: A IdentityProjection or IdentityOffsetProjection object - :rtype: IdentityProjection or IdentityOffsetProjection + :param size: The dimension of this layer. It will be neglected + when offset is None or not set. + :type size: int + :return: IdentityProjection or IdentityOffsetProjection object + :rtype: IdentityProjection | IdentityOffsetProjection """ if offset is None: proj = IdentityProjection(input_layer_name=input.name) @@ -596,8 +601,8 @@ def identity_projection(input, offset=None, size=None): def slice_projection(input, slices): """ - slice_projection can slice the input value into multiple parts, - and then select some of them to merge into a new output. + slice_projection slices the input value into multiple parts, + then selects and merges some of them into a new output. .. math:: output = [input.slices()] @@ -608,15 +613,13 @@ def slice_projection(input, slices): proj = slice_projection(input=layer, slices=[(0, 10), (20, 30)]) - Note that slice_projection should not have any parameter. + Note that slice_projection has no trainable parameter. :param input: The input of this layer. :type input: LayerOutput - :param slices: An array of slice parameters. - Each slice contains the start and end offsets based - on the input. - :type slices: pair of int - :return: A SliceProjection object + :param slices: A list of start and end offsets of each slice. + :type slices: list of tuple + :return: SliceProjection object. :rtype: SliceProjection """ assert len(slices) >= 1 @@ -636,8 +639,7 @@ def slice_projection(input, slices): @wrap_param_attr_default() def scaling_projection(input, param_attr=None): """ - scaling_projection multiplies the input with a scalar parameter and add to - the output. + scaling_projection multiplies the input with a scalar parameter. .. math:: out += w * in @@ -650,9 +652,9 @@ def scaling_projection(input, param_attr=None): :param input: The input of this layer. :type input: LayerOutput - :param param_attr: Parameter config, None if use default. + :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :return: A ScalingProjection object + :return: ScalingProjection object. :rtype: ScalingProjection """ proj = ScalingProjection(input_layer_name=input.name, **param_attr.attr) @@ -663,8 +665,8 @@ def scaling_projection(input, param_attr=None): @wrap_param_attr_default() def dotmul_projection(input, param_attr=None): """ - DotMulProjection with a layer as input. - It performs element-wise multiplication with weight. + DotMulProjection takes a layer as input and performs + element-wise multiplication with weight. .. math:: out.row[i] += in.row[i] .* weight @@ -679,9 +681,9 @@ def dotmul_projection(input, param_attr=None): :param input: The input of this layer. :type input: LayerOutput - :param param_attr: Parameter config, None if use default. + :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :return: A DotMulProjection Object. + :return: DotMulProjection object. :rtype: DotMulProjection """ proj = DotMulProjection( @@ -698,7 +700,7 @@ def dotmul_operator(a=None, b=None, scale=1, **kwargs): out.row[i] += scale * (a.row[i] .* b.row[i]) where :math:`.*` means element-wise multiplication, and - scale is a config scalar, its default value is one. + scale is a config scalar, its default value is 1. The example usage is: @@ -706,13 +708,13 @@ def dotmul_operator(a=None, b=None, scale=1, **kwargs): op = dotmul_operator(a=layer1, b=layer2, scale=0.5) - :param a: Input layer1 + :param a: The first input of this layer. :type a: LayerOutput - :param b: Input layer2 + :param b: The second input of this layer. :type b: LayerOutput - :param scale: config scalar, default value is one. + :param scale: A scalar to scale the product. Its default value is 1. :type scale: float - :return: A DotMulOperator Object. + :return: DotMulOperator object. :rtype: DotMulOperator """ if 'x' in kwargs or 'y' in kwargs: @@ -738,28 +740,29 @@ def context_projection(input, """ Context Projection. - It just simply reorganizes input sequence, combines "context_len" sequence - to one context from context_start. "context_start" will be set to - -(context_len - 1) / 2 by default. If context position out of sequence + It just reorganizes input sequence, combines "context_len" elements of the + sequence to one context from context_start. "context_start" will be set to + -(context_len - 1) / 2 by default. When context position is out of sequence length, padding will be filled as zero if padding_attr = False, otherwise it is trainable. - For example, origin sequence is [A B C D E F G], context len is 3, then - after context projection and not set padding_attr, sequence will + For example, origin sequence is [A B C D E F G], context len is 3, padding_attr + is not set, then after context projection, sequence will be [ 0AB ABC BCD CDE DEF EFG FG0 ]. :param input: The input of this layer, which should be a sequence. :type input: LayerOutput - :param context_len: context length. + :param context_len: The length of the context. :type context_len: int - :param context_start: context start position. Default is + :param context_start: The start position of the context. The default value is -(context_len - 1)/2 :type context_start: int - :param padding_attr: Padding Parameter Attribute. If false, it means padding - always be zero. Otherwise Padding is learnable, and - parameter attribute is set by this parameter. + :param padding_attr: Parameter attribute of the padding. If the parameter is + set to False, padding will be zero. In other cases, the + padding is trainable, and its parameter attribute is set + by this parameter. :type padding_attr: bool | ParameterAttribute - :return: Projection + :return: Projection object. :rtype: Projection """ context_start = -( @@ -791,10 +794,9 @@ class MixedLayerType(LayerOutput): def __init__(self, name, size, act, bias_attr, layer_attr, parents=None): """ - Ctor. - :param name: layer name. + :param name: The name of this layer. :type name: basestring - :param size: layer size. + :param size: The dimension of this layer. :type size: int :param act: Activation type. :type act: BaseActivation @@ -802,8 +804,9 @@ class MixedLayerType(LayerOutput): whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param layer_attr: Extra Layer Attribute. - :type layer_attr: ExtraLayerAttribute or None + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute | None """ LayerOutput.__init__( self, @@ -868,12 +871,12 @@ def mixed_layer(size=0, bias_attr=False, layer_attr=None): """ - Mixed Layer. A mixed layer will add all inputs together, then activate. - Each inputs is a projection or operator. + Mixed Layer. A mixed layer will add all inputs together, then activate the sum. + Each input is a projection or operator. There are two styles of usages. - 1. When not set inputs parameter, use mixed_layer like this: + 1. When the parameter input is not set, use mixed_layer like this: .. code-block:: python @@ -889,21 +892,21 @@ def mixed_layer(size=0, input=[full_matrix_projection(input=layer1), full_matrix_projection(input=layer2)]) - :param name: mixed layer name. Can be referenced by other layer. + :param name: The name of this layer. It is optional. :type name: basestring - :param size: layer size. + :param size: The dimension of this layer. :type size: int - :param input: The input of this layer. It is an optional parameter. If set, - then this function will just return layer's name. + :param input: The input of this layer. It is an optional parameter. :param act: Activation Type. LinearActivation is the default activation. :type act: BaseActivation :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param layer_attr: The extra layer config. Default is None. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute - :return: MixedLayerType object can add inputs or layer name. + :return: MixedLayerType object. :rtype: MixedLayerType """ @@ -938,14 +941,15 @@ def data_layer(name, size, depth=None, height=None, width=None, :param name: The name of this layer. :type name: basestring - :param size: Size of this data layer. + :param size: The dimension of this data layer. :type size: int - :param height: Height of this data layer, used for image + :param height: The height of the input image data. :type height: int | None - :param width: Width of this data layer, used for image + :param width: The width of the input image data. :type width: int | None - :param layer_attr: Extra Layer Attribute. - :type layer_attr: ExtraLayerAttribute. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -978,14 +982,15 @@ def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None): :param name: The name of this layer. It is optional. :type name: basestring - :param input: The input of this layer, which must be Index Data. + :param input: The input of this layer, whose type must be Index Data. :type input: LayerOutput - :param size: The embedding dimension. + :param size: The dimension of the embedding vector. :type size: int :param param_attr: The embedding parameter attribute. See ParameterAttribute for details. - :type param_attr: ParameterAttribute | None - :param layer_attr: Extra layer Config. Default is None. + :type param_attr: ParameterAttribute + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute | None :return: LayerOutput object. :rtype: LayerOutput @@ -1013,7 +1018,7 @@ def fc_layer(input, bias_attr=None, layer_attr=None): """ - Helper for declare fully connected layer. + The fully connected layer. The example usage is: @@ -1035,17 +1040,18 @@ def fc_layer(input, :type name: basestring :param input: The input of this layer. :type input: LayerOutput | list | tuple - :param size: The layer dimension. + :param size: The dimension of this layer. :type size: int :param act: Activation Type. TanhActivation is the default activation. :type act: BaseActivation - :param param_attr: The Parameter Attribute|list. + :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param layer_attr: Extra Layer config. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute | None :return: LayerOutput object. :rtype: LayerOutput @@ -1086,13 +1092,15 @@ def fc_layer(input, @wrap_name_default("print") def printer_layer(input, format=None, name=None): """ - Print the output value of input layers. This layer is useful for debugging. + Print the output value of the layers specified by the parameter input. + This layer is useful for debugging. :param name: The name of this layer. It is optional. :type name: basestring :param input: The input of this layer. :type input: LayerOutput | list | tuple - :return: LayerOutput + :return: LayerOutput object. + :rtype: LayerOutput """ if isinstance(input, LayerOutput): input = [input] @@ -1135,11 +1143,12 @@ def priorbox_layer(input, :param aspect_ratio: The aspect ratio. :type aspect_ratio: list :param variance: The bounding box variance. - :type min_size: The min size of the priorbox width/height. + :type min_size: The minimum size of the priorbox width/height. :param min_size: list - :type max_size: The max size of the priorbox width/height. Could be NULL. + :type max_size: The maximum size of the priorbox width/height. It could be NULL. :param max_size: list - :return: LayerOutput + :return: LayerOutput object. + :rtype: LayerOutput """ # plus one for ratio 1. num_filters = (len(aspect_ratio) * 2 + 1 + len(max_size)) * 4 @@ -1177,7 +1186,7 @@ def multibox_loss_layer(input_loc, :param name: The name of this layer. It is optional. :type name: basestring - :param input_loc: The input predict locations. + :param input_loc: The input predicted locations. :type input_loc: LayerOutput | List of LayerOutput :param input_conf: The input priorbox confidence. :type input_conf: LayerOutput | List of LayerOutput @@ -1189,13 +1198,15 @@ def multibox_loss_layer(input_loc, :type num_classes: int :param overlap_threshold: The threshold of the overlap. :type overlap_threshold: float - :param neg_pos_ratio: The ratio of the negative bbox to the positive bbox. + :param neg_pos_ratio: The ratio of the negative bounding box to + the positive bounding box. :type neg_pos_ratio: float - :param neg_overlap: The negative bbox overlap threshold. + :param neg_overlap: The negative bounding box overlap threshold. :type neg_overlap: float :param background_id: The background class index. :type background_id: int - :return: LayerOutput + :return: LayerOutput object. + :rtype: LayerOutput """ if isinstance(input_loc, LayerOutput): input_loc = [input_loc] @@ -1258,19 +1269,20 @@ def detection_output_layer(input_loc, :type input_conf: LayerOutput | List of LayerOutput. :param priorbox: The input priorbox location and the variance. :type priorbox: LayerOutput - :param num_classes: The number of the classification. + :param num_classes: The number of the classes. :type num_classes: int :param nms_threshold: The Non-maximum suppression threshold. :type nms_threshold: float - :param nms_top_k: The bbox number kept of the NMS's output + :param nms_top_k: The bounding boxes number kept of the NMS's output. :type nms_top_k: int - :param keep_top_k: The bbox number kept of the layer's output + :param keep_top_k: The bounding boxes number kept of the layer's output. :type keep_top_k: int - :param confidence_threshold: The classification confidence threshold + :param confidence_threshold: The classification confidence threshold. :type confidence_threshold: float :param background_id: The background class index. :type background_id: int - :return: LayerOutput + :return: LayerOutput object. + :rtype: LayerOutput """ if isinstance(input_loc, LayerOutput): input_loc = [input_loc] @@ -1326,7 +1338,7 @@ def roi_pool_layer(input, A layer used by Fast R-CNN to extract feature maps of ROIs from the last feature map. - :param name: The Layer Name. + :param name: The name of this layer. It is optional. :type name: basestring :param input: The input layer. :type input: LayerOutput. @@ -1338,9 +1350,10 @@ def roi_pool_layer(input, :type pooled_height: int :param spatial_scale: The spatial scale between the image and feature map. :type spatial_scale: float - :param num_channels: number of input channel. + :param num_channels: The number of the input channels. :type num_channels: int - :return: LayerOutput + :return: LayerOutput object. + :rtype: LayerOutput """ if num_channels is None: assert input.num_filters is not None @@ -1361,18 +1374,19 @@ def roi_pool_layer(input, @wrap_name_default("cross_channel_norm") def cross_channel_norm_layer(input, name=None, param_attr=None): """ - Normalize a layer's output. This layer is necessary for ssd. - This layer applys normalize across the channels of each sample to - a conv layer's output and scale the output by a group of trainable - factors which dimensions equal to the channel's number. + Normalize a layer's output. This layer is necessary for ssd. This + layer applys normalization across the channels of each sample to + a convolutional layer's output and scales the output by a group of + trainable factors whose dimensions equal to the channel's number. :param name: The name of this layer. It is optional. :type name: basestring :param input: The input of this layer. :type input: LayerOutput - :param param_attr: The Parameter Attribute|list. + :param param_attr: The parameter attribute. See ParameterAttribute for details. :type param_attr: ParameterAttribute - :return: LayerOutput + :return: LayerOutput object. + :rtype: LayerOutput """ assert input.num_filters is not None Layer( @@ -1413,12 +1427,9 @@ def pooling_layer(input, Pooling layer for sequence inputs, not used for Image. If stride > 0, this layer slides a window whose size is determined by stride, - and return the pooling value of the window as the output. Thus, a long sequence - will be shorten. - - The parameter stride specifies the intervals at which to apply the pooling - operation. Note that for sequence with sub-sequence, the default value - of stride is -1. + and returns the pooling value of the sequence in the window as the output. Thus, + a long sequence will be shortened. Note that for sequence with sub-sequence, the + default value of stride is -1. The example usage is: @@ -1435,16 +1446,16 @@ def pooling_layer(input, :type name: basestring :param input: The input of this layer. :type input: LayerOutput - :param pooling_type: Type of pooling, MaxPooling(default), AvgPooling, - SumPooling, SquareRootNPooling. + :param pooling_type: Type of pooling. MaxPooling is the default pooling. :type pooling_type: BasePoolingType | None :param stride: The step size between successive pooling regions. - :type stride: Int + :type stride: int :param bias_attr: The bias attribute. If the parameter is set to False or an object whose type is not ParameterAttribute, no bias is defined. If the parameter is set to True, the bias is initialized to zero. :type bias_attr: ParameterAttribute | None | bool | Any - :param layer_attr: The Extra Attributes for layer, such as dropout. + :param layer_attr: The extra layer attribute. See ExtraLayerAttribute for + details. :type layer_attr: ExtraLayerAttribute | None :return: LayerOutput object. :rtype: LayerOutput @@ -6618,7 +6629,7 @@ def row_conv_layer(input, .. math:: r_{t,r} = \sum_{j=1}^{k + 1} {w_{i,j}h_{t+j-1, i}} - \quad \text{for} \quad (1 \leq i \leq d) + \quad \\text{for} \quad (1 \leq i \leq d) Note: The `context_len` is `k + 1`. That is to say, the lookahead step @@ -6767,7 +6778,7 @@ def gated_unit_layer(input, The gated unit layer implements a simple gating mechanism over the input. The input :math:`X` is first projected into a new space :math:`X'`, and it is also used to produce a gate weight :math:`\sigma`. Element-wise - product between :match:`X'` and :math:`\sigma` is finally returned. + product between :math:`X'` and :math:`\sigma` is finally returned. Reference: `Language Modeling with Gated Convolutional Networks @@ -7463,7 +7474,7 @@ def factorization_machine(input, Factorization Machine with the formula: .. math:: - y = \sum_{i=1}^{n-1}\sum_{j=i+1}^n\langle v_i, v_j \rangle x_i x_j + y = \sum_{i=1}^{n-1}\sum_{j=i+1}^n\langle v_i, v_j \\rangle x_i x_j Note: X is the input vector with size n. V is the factor matrix. Each row of V diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index 9776ae1805..b5cde7bac7 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -25,10 +25,10 @@ from paddle.trainer.config_parser import * __all__ = [ 'sequence_conv_pool', 'simple_lstm', "simple_img_conv_pool", "img_conv_bn_pool", 'lstmemory_group', 'lstmemory_unit', 'small_vgg', - 'img_conv_group', 'vgg_16_network', 'gru_unit', 'gru_group', 'simple_gru', - 'simple_attention', 'dot_product_attention', 'multi_head_attention', - 'simple_gru2', 'bidirectional_gru', 'text_conv_pool', 'bidirectional_lstm', - 'inputs', 'outputs' + 'img_conv_group', 'img_separable_conv', 'vgg_16_network', 'gru_unit', + 'gru_group', 'simple_gru', 'simple_attention', 'dot_product_attention', + 'multi_head_attention', 'simple_gru2', 'bidirectional_gru', + 'text_conv_pool', 'bidirectional_lstm', 'inputs', 'outputs' ] ###################################################### @@ -251,13 +251,13 @@ def img_conv_bn_pool(input, pool_layer_attr=None): """ Convolution, batch normalization, pooling group. - + Img input => Conv => BN => Pooling => Output. :param name: group name. :type name: basestring :param input: input layer. - :type input: LayerOutput + :type input: LayerOutput :param filter_size: see img_conv_layer for details. :type filter_size: int :param num_filters: see img_conv_layer for details. @@ -435,6 +435,85 @@ def img_conv_group(input, input=tmp, stride=pool_stride, pool_size=pool_size, pool_type=pool_type) +@wrap_name_default("separable_conv") +def img_separable_conv(input, + num_channels, + num_out_channels, + filter_size, + stride=1, + padding=0, + depth_multiplier=1, + act=None, + bias_attr=None, + param_attr=None, + shared_bias=True, + layer_type='exconv', + name=None): + """ + Separable Convolution. + + The separable convolution module is consisted of a depthwise convolution + that acts separately on input channels, followed by a pointwise convolution + with 1*1 kernels that mixes channels. It is used for Xception: + https://arxiv.org/pdf/1610.02357.pdf + + :param input: input layer. + :type input: LayerOutput + :param num_channels: the number of input channels. + :type num_channels: int + :param num_out_channels: the number of output channels. + :type num_out_channels: int + :param filter_size: the filter size for the depthwise convolution. + :type filter_size: int|tuple + :param stride: the stride size for the depthwise convolution. + :type stride: int|tuple + :param padding: the padding size for the depthwise convolution. + :type padding: int|tuple + :param depth_multiplier: the number of filter for one channel in the + depthwize convolution. + :type depth_multiplier: int + :param act: the activation function for the output. + :type act: BaseActivation + :param bias_attr: see img_conv_layer for details. + :type bias_attr: ParameterAttribute + :param param_attr: see img_conv_layer for details. + :type param_attr: ParameterAttribute + :param shared_bias: see img_conv_layer for details. + :type shared_bias: bool + :param layer_type: see img_conv_layer for details. + :type layer_type: bool + :return: layer's output + :rtype: LayerOutput + """ + __depthwise_conv__ = img_conv_layer( + name="%s_depthwise_conv" % name, + input=input, + num_channels=num_channels, + num_filters=num_channels * depth_multiplier, + groups=num_channels, + filter_size=filter_size, + stride=stride, + padding=padding, + act=LinearActivation(), + bias_attr=bias_attr, + param_attr=param_attr, + shared_biases=shared_bias, + layer_type=layer_type) + __pointwise_conv__ = img_conv_layer( + name="%s_pointwise_conv" % name, + input=__depthwise_conv__, + num_channels=num_channels * depth_multiplier, + num_filters=num_out_channels, + filter_size=1, + stride=1, + padding=0, + act=act, + bias_attr=bias_attr, + param_attr=param_attr, + shared_biases=shared_bias) + return __pointwise_conv__ + + def small_vgg(input_image, num_channels, num_classes): def __vgg__(ipt, num_filter, times, dropouts, num_channels_=None): return img_conv_group( @@ -648,7 +727,7 @@ def lstmemory_unit(input, lstm_bias_attr=None, lstm_layer_attr=None): """ - lstmemory_unit defines the caculation process of a LSTM unit during a + lstmemory_unit defines the caculation process of a LSTM unit during a single time step. This function is not a recurrent layer, so it can not be directly used to process sequence input. This function is always used in recurrent_group (see layers.py for more details) to implement attention @@ -869,7 +948,7 @@ def gru_unit(input, gru_layer_attr=None, naive=False): """ - gru_unit defines the calculation process of a gated recurrent unit during a single + gru_unit defines the calculation process of a gated recurrent unit during a single time step. This function is not a recurrent layer, so it can not be directly used to process sequence input. This function is always used in the recurrent_group (see layers.py for more details) to implement attention @@ -1012,7 +1091,7 @@ def simple_gru(input, simple_gru in network.py. The reason why there are so many interfaces is that we have two ways to implement recurrent neural network. One way is to use one complete layer to implement rnn (including simple rnn, gru and lstm) - with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But + with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But the multiplication operation :math:`W x_t` is not computed in these layers. See details in their interfaces in layers.py. The other implementation is to use an recurrent group which can ensemble a @@ -1116,11 +1195,12 @@ def simple_gru2(input, :type act: BaseActivation :param gate_act: gate activiation type of gru :type gate_act: BaseActivation - :param gru_bias_attr: bias parameter attribute of gru layer, + :param gru_bias_attr: bias parameter attribute of gru layer, False means no bias, None means default bias. :type gru_bias_attr: ParameterAttribute|False|None - :param gru_layer_attr: Extra attribute of the gru layer. - :type gru_layer_attr: ExtraLayerAttribute + :param gru_param_attr: param parameter attribute of gru layer, + None means default param. + :type gru_param_attr: ParameterAttribute|None :return: the gru group. :rtype: LayerOutput """ @@ -1188,7 +1268,7 @@ def bidirectional_gru(input, :type size: int :param return_seq: If set False, the last time step of output are concatenated and returned. - If set True, the entire output sequences in forward + If set True, the entire output sequences in forward and backward directions are concatenated and returned. :type return_seq: bool :return: LayerOutput object. @@ -1277,7 +1357,7 @@ def bidirectional_lstm(input, :type size: int :param return_seq: If set False, the last time step of output are concatenated and returned. - If set True, the entire output sequences in forward + If set True, the entire output sequences in forward and backward directions are concatenated and returned. :type return_seq: bool :return: LayerOutput object. diff --git a/python/paddle/v2/dataset/imdb.py b/python/paddle/v2/dataset/imdb.py index cfc1c886e1..21ed7f7a5c 100644 --- a/python/paddle/v2/dataset/imdb.py +++ b/python/paddle/v2/dataset/imdb.py @@ -23,10 +23,9 @@ Besides, this module also provides API for building dictionary. import paddle.v2.dataset.common import collections import tarfile -import Queue import re import string -import threading +import random __all__ = ['build_dict', 'train', 'test', 'convert'] @@ -74,47 +73,21 @@ def build_dict(pattern, cutoff): return word_idx -def reader_creator(pos_pattern, neg_pattern, word_idx, buffer_size): +def reader_creator(pos_pattern, neg_pattern, word_idx): UNK = word_idx[''] + INS = [] - qs = [Queue.Queue(maxsize=buffer_size), Queue.Queue(maxsize=buffer_size)] - - def load(pattern, queue): + def load(pattern, out, label): for doc in tokenize(pattern): - queue.put(doc) - queue.put(None) + out.append(([word_idx.get(w, UNK) for w in doc], label)) + + load(pos_pattern, INS, 0) + load(neg_pattern, INS, 1) + random.shuffle(INS) def reader(): - # Creates two threads that loads positive and negative samples - # into qs. - t0 = threading.Thread( - target=load, args=( - pos_pattern, - qs[0], )) - t0.daemon = True - t0.start() - - t1 = threading.Thread( - target=load, args=( - neg_pattern, - qs[1], )) - t1.daemon = True - t1.start() - - # Read alternatively from qs[0] and qs[1]. - i = 0 - doc = qs[i].get() - while doc != None: - yield [word_idx.get(w, UNK) for w in doc], i % 2 - i += 1 - doc = qs[i % 2].get() - - # If any queue is empty, reads from the other queue. - i += 1 - doc = qs[i % 2].get() - while doc != None: - yield [word_idx.get(w, UNK) for w in doc], i % 2 - doc = qs[i % 2].get() + for doc, label in INS: + yield doc, label return reader @@ -133,7 +106,7 @@ def train(word_idx): """ return reader_creator( re.compile("aclImdb/train/pos/.*\.txt$"), - re.compile("aclImdb/train/neg/.*\.txt$"), word_idx, 1000) + re.compile("aclImdb/train/neg/.*\.txt$"), word_idx) def test(word_idx): @@ -150,7 +123,7 @@ def test(word_idx): """ return reader_creator( re.compile("aclImdb/test/pos/.*\.txt$"), - re.compile("aclImdb/test/neg/.*\.txt$"), word_idx, 1000) + re.compile("aclImdb/test/neg/.*\.txt$"), word_idx) def word_dict(): diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 59986c9f0c..c72b573069 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -15,13 +15,15 @@ import backward import regularizer from param_attr import ParamAttr from data_feeder import DataFeeder -from core import LoDTensor, CPUPlace, GPUPlace +from core import LoDTensor, CPUPlace, CUDAPlace +from distribute_transpiler import DistributeTranspiler +import clip Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + [ 'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward', - 'regularizer', 'LoDTensor', 'CPUPlace', 'GPUPlace', 'Tensor', 'ParamAttr' - 'DataFeeder' + 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', 'ParamAttr' + 'DataFeeder', 'clip', 'DistributeTranspiler' ] @@ -40,5 +42,10 @@ def __read_gflags_from_env__(): core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) + if core.is_compile_gpu(): + core.init_devices(["CPU", "GPU:0"]) + else: + core.init_devices(["CPU"]) + __read_gflags_from_env__() diff --git a/python/paddle/v2/fluid/clip.py b/python/paddle/v2/fluid/clip.py new file mode 100644 index 0000000000..d7ec2fbe13 --- /dev/null +++ b/python/paddle/v2/fluid/clip.py @@ -0,0 +1,61 @@ +import functools +import layers + +__all__ = ['GradientClipByValue', 'append_gradient_clip_ops'] + + +class BaseGradientClipAttr(object): + def process_context(self, context, p_g): + raise NotImplementedError() + + def create_operators(self, param, grad): + raise NotImplementedError() + + +class NullGradientClipAttr(BaseGradientClipAttr): + def process_context(self, context, p_g): + pass + + def create_operators(self, param, grad): + return param, grad + + +class GradientClipByValue(BaseGradientClipAttr): + def __init__(self, max, min=None): + max = float(max) + if min is None: + min = -max + else: + min = float(min) + self.max = max + self.min = min + + def process_context(self, context, p_g): + pass + + def create_operators(self, param, grad): + new_grad = layers.clip(x=grad, min=self.min, max=self.max) + return param, new_grad + + +def append_gradient_clip_ops(param_grad): + context = dict() + create_op_callbacks = [] + for p, g in param_grad: + clip_attr = getattr(p, 'clip_attr', NullGradientClipAttr()) + if clip_attr is None: + clip_attr = NullGradientClipAttr() + if not isinstance(clip_attr, BaseGradientClipAttr): + raise TypeError( + "clip attribute should be an instance of BaseGradientClippingAttr" + ) + + clip_attr.process_context(context=context, p_g=param_grad) + create_op_callbacks.append( + functools.partial( + clip_attr.create_operators, param=p, grad=g)) + + return [each_callback() for each_callback in create_op_callbacks] + + +ClipByValue = GradientClipByValue diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py new file mode 100644 index 0000000000..49ece7b725 --- /dev/null +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -0,0 +1,242 @@ +import framework +from framework import Program, default_main_program, Parameter, Variable +import optimizer +from layer_helper import LayerHelper + + +def hash_name_to_server(params_grads, pserver_endpoints): + """ + :param param_grads: + :return: a map of pserver endpoint -> + params -> [param list] + grads -> [grad list] + """ + + def _hash_param(param_name, total): + return hash(param_name) % total + + param_grad_map = dict() + for param, grad in params_grads: + if param.trainable is True and grad is not None: + server_id = _hash_param(param.name, len(pserver_endpoints)) + server_for_param = pserver_endpoints[server_id] + if not param_grad_map.has_key(server_for_param): + param_grad_map[server_for_param] = {"params": [], "grads": []} + param_grad_map[server_for_param]["params"].append(param) + param_grad_map[server_for_param]["grads"].append(grad) + + return param_grad_map + + +def round_robin(params_grads, pserver_endpoints): + assert (len(params_grads) > len(pserver_endpoints)) + + param_grad_map = dict() + pserver_idx = 0 + for param, grad in params_grads: + if param.trainable is True: + server_for_param = pserver_endpoints[pserver_idx] + if not param_grad_map.has_key(server_for_param): + param_grad_map[server_for_param] = {"params": [], "grads": []} + + param_grad_map[server_for_param]["params"].append(param) + param_grad_map[server_for_param]["grads"].append(grad) + + pserver_idx += 1 + if pserver_idx >= len(pserver_endpoints): + pserver_idx = 0 + return param_grad_map + + +class DistributeTranspiler: + def transpile(self, + optimize_ops, + params_grads, + program=None, + pservers="127.0.0.1:6174", + trainers=1, + split_method=round_robin): + """ + Transpile the program to a distributed data-parallelism programs. + + The main_program will be transform to use a remote parameter server + to do parameter optimization. And the optimization graph will be put + in to a parameter server program. + + Use different methods to split trainable varialbles to different + parameter servers. + + Example to run: + + exe = fluid.Executor(place) + t = fluid.DistributeTranspiler() + t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) + + pserver_endpoint = os.getenv("PSERVER") + if pserver_endpoint: + pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) + exe.run(fluid.default_startup_program()) + exe.run(pserver_prog) + else: + feeder = fluid.DataFeeder(feed_list=[images, label], place=place) + exe.run(fluid.default_startup_program()) + + for pass_id in range(PASS_NUM): + ... + + :param optimize_ops: op list of optimization, should be the + return value of Optimizer.minimize + :type optimize_ops: list + :param program: program to optimize, default default_main_program + :param pservers: parameter server endpoints like "m1:6174,m2:6174" + :type pservers: string + + :return: return a list of programs + """ + if program is None: + program = default_main_program() + self.program = program + self.trainers = trainers + self.optimize_ops = optimize_ops + self._optimize_distributed( + optimize_ops, + program, + params_grads, + pservers=pservers, + trainers=trainers, + split_method=split_method) + + def _clone_param(self, block, v): + assert isinstance(v, Parameter) + new_p = Parameter( + block=block, + shape=v.shape, + dtype=v.dtype, + type=v.type, + lod_level=v.lod_level, + stop_gradient=v.stop_gradient, + trainable=v.trainable, + optimize_attr=v.optimize_attr, + regularizer=v.regularizer, + name=v.name) + block.vars[new_p.name] = new_p + + def _clone_var(self, block, var): + assert isinstance(var, Variable) + return block.create_var( + name=var.name, + shape=var.shape, + dtype=var.dtype, + type=var.type, + lod_level=var.lod_level, + persistable=var.persistable) + + def _optimize_distributed(self, optimize_ops, program, params_and_grads, + **kwargs): + if kwargs.has_key("split_method"): + split_method = kwargs["split_method"] + else: + split_method = round_robin + + assert (callable(split_method)) + pserver_endpoints = kwargs["pservers"].split(",") + self.param_grad_map = split_method(params_and_grads, pserver_endpoints) + + send_op_ordered_inputs = [] + send_op_ordered_outputs = [] + epmap = [] + for ep, v in self.param_grad_map.iteritems(): + send_op_ordered_inputs.extend(v["grads"]) + send_op_ordered_outputs.extend(v["params"]) + for i in v["grads"]: + epmap.append(ep) + send_op = program.global_block().append_op( + type="send", + inputs={"X": send_op_ordered_inputs + }, # inputs is a list of tensors to be send + outputs={"Out": send_op_ordered_outputs}, + attrs={"endpoints": pserver_endpoints, + "epmap": epmap}) + + def get_trainer_program(self): + # remove optimize ops and add a send op to main_program + self.program.global_block().delete_ops(self.optimize_ops) + return self.program + + def _create_var_for_trainers(self, block, var, trainers): + var_list = [] + for i in xrange(trainers): + var_each = block.create_var( + name="%s.trainer_%d" % (var.name, i), + psersistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + var_list.append(var_each) + return var_list + + def get_pserver_program(self, endpoint, optimize_ops): + pserver_program = Program() + for v in self.param_grad_map[endpoint]["params"]: + self._clone_param(pserver_program.global_block(), v) + + optimize_sub_program = Program() + grad_var_names = [ + var.name for var in self.param_grad_map[endpoint]["grads"] + ] + for opt_op in optimize_ops: + for _, var in opt_op.inputs.iteritems(): + # NOTE: append operators to merge gradients from multiple + # trainers. If trainers == 1, this is not needed. + if self.trainers > 1 and var.name in grad_var_names: + vars2merge = self._create_var_for_trainers( + optimize_sub_program.global_block(), var, self.trainers) + merged_var = optimize_sub_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + optimize_sub_program.global_block().append_op( + type="sum", + inputs={"X": vars2merge}, + outputs={"Out": merged_var}) + optimize_sub_program.global_block().append_op( + type="scale", + inputs={"X": merged_var}, + outputs={"Out": merged_var}, + attrs={"scale": 1.0 / float(self.trainers)}) + else: + optimize_sub_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + + if opt_op.inputs.has_key("Grad"): + if opt_op.inputs["Grad"].name in grad_var_names: + optimize_sub_program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + else: + optimize_sub_program.global_block().append_op( + type=opt_op.type, + inputs=opt_op.inputs, + outputs=opt_op.outputs, + attrs=opt_op.attrs) + pserver_program.global_block().append_op( + type="recv", + inputs={"RX": + self.param_grad_map[endpoint]["grads"]}, # grads to recv + outputs={}, + attrs={ + "OptimizeProgram": optimize_sub_program.desc, + "endpoint": endpoint, + "ParamList": + [p.name for p in self.param_grad_map[endpoint]["params"]], + "GradList": + [p.name for p in self.param_grad_map[endpoint]["grads"]], + "Trainers": self.trainers + }) + pserver_program.sync_with_cpp() + return pserver_program diff --git a/python/paddle/v2/fluid/evaluator.py b/python/paddle/v2/fluid/evaluator.py index 137c573622..e186ee96c3 100644 --- a/python/paddle/v2/fluid/evaluator.py +++ b/python/paddle/v2/fluid/evaluator.py @@ -1,10 +1,10 @@ import numpy as np import layers -from framework import Program, unique_name, Variable +from framework import Program, unique_name, Variable, program_guard from layer_helper import LayerHelper -__all__ = ['Accuracy'] +__all__ = ['Accuracy', 'ChunkEvaluator'] def _clone_var_(block, var): @@ -49,15 +49,12 @@ class Evaluator(object): if reset_program is None: reset_program = Program() - for var in self.states: - assert isinstance(var, Variable) - g_var = _clone_var_(reset_program.current_block(), var) - layers.fill_constant( - shape=g_var.shape, - value=0.0, - dtype=g_var.dtype, - out=g_var, - main_program=reset_program) + with program_guard(main_program=reset_program): + for var in self.states: + assert isinstance(var, Variable) + g_var = _clone_var_(reset_program.current_block(), var) + layers.fill_constant( + shape=g_var.shape, value=0.0, dtype=g_var.dtype, out=g_var) executor.run(reset_program) @@ -104,20 +101,14 @@ class Accuracy(Evaluator): self.total = self.create_state(dtype='int64', shape=[1], suffix='total') self.correct = self.create_state( dtype='int64', shape=[1], suffix='correct') - kwargs = {'main_program': main_program} total = self.helper.create_tmp_variable(dtype='int') correct = self.helper.create_tmp_variable(dtype='int') acc = layers.accuracy( - input=input, - label=label, - k=k, - total=total, - correct=correct, - **kwargs) - total = layers.cast(x=total, dtype='int64', **kwargs) - correct = layers.cast(x=correct, dtype='int64', **kwargs) - layers.sums(input=[self.total, total], out=self.total, **kwargs) - layers.sums(input=[self.correct, correct], out=self.correct, **kwargs) + input=input, label=label, k=k, total=total, correct=correct) + total = layers.cast(x=total, dtype='int64') + correct = layers.cast(x=correct, dtype='int64') + layers.sums(input=[self.total, total], out=self.total) + layers.sums(input=[self.correct, correct], out=self.correct) self.metrics.append(acc) @@ -125,10 +116,75 @@ class Accuracy(Evaluator): if eval_program is None: eval_program = Program() block = eval_program.current_block() - kwargs = {'main_program': eval_program} - total = _clone_var_(block, self.total) - correct = _clone_var_(block, self.correct) - total = layers.cast(total, dtype='float32', **kwargs) - correct = layers.cast(correct, dtype='float32', **kwargs) - out = layers.elementwise_div(x=correct, y=total, **kwargs) + with program_guard(main_program=eval_program): + total = _clone_var_(block, self.total) + correct = _clone_var_(block, self.correct) + total = layers.cast(total, dtype='float32') + correct = layers.cast(correct, dtype='float32') + out = layers.elementwise_div(x=correct, y=total) return np.array(executor.run(eval_program, fetch_list=[out])[0]) + + +class ChunkEvaluator(Evaluator): + """ + Accumulate counter numbers output by chunk_eval from mini-batches and + compute the precision recall and F1-score using the accumulated counter + numbers. + """ + + def __init__( + self, + input, + label, + chunk_scheme, + num_chunk_types, + excluded_chunk_types=None, ): + super(ChunkEvaluator, self).__init__("chunk_eval") + main_program = self.helper.main_program + if main_program.current_block().idx != 0: + raise ValueError("You can only invoke Evaluator in root block") + + self.num_infer_chunks = self.create_state( + dtype='int64', shape=[1], suffix='num_infer_chunks') + self.num_label_chunks = self.create_state( + dtype='int64', shape=[1], suffix='num_label_chunks') + self.num_correct_chunks = self.create_state( + dtype='int64', shape=[1], suffix='num_correct_chunks') + precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks = layers.chunk_eval( + input=input, + label=label, + chunk_scheme=chunk_scheme, + num_chunk_types=num_chunk_types, + excluded_chunk_types=excluded_chunk_types, ) + layers.sums( + input=[self.num_infer_chunks, num_infer_chunks], + out=self.num_infer_chunks) + layers.sums( + input=[self.num_label_chunks, num_label_chunks], + out=self.num_label_chunks) + layers.sums( + input=[self.num_correct_chunks, num_correct_chunks], + out=self.num_correct_chunks) + + self.metrics.extend([precision, recall, f1_score]) + + def eval(self, executor, eval_program=None): + if eval_program is None: + eval_program = Program() + block = eval_program.current_block() + num_infer_chunks, num_label_chunks, num_correct_chunks = executor.run( + eval_program, + fetch_list=[_clone_var_(block, state) for state in self.states]) + num_infer_chunks = num_infer_chunks[0] + num_label_chunks = num_label_chunks[0] + num_correct_chunks = num_correct_chunks[0] + precision = float( + num_correct_chunks) / num_infer_chunks if num_infer_chunks else 0 + recall = float( + num_correct_chunks) / num_label_chunks if num_label_chunks else 0 + f1_score = float(2 * precision * recall) / ( + precision + recall) if num_correct_chunks else 0 + return np.array( + [precision], dtype='float32'), np.array( + [recall], dtype='float32'), np.array( + [f1_score], dtype='float32') diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index bdc82eede9..2c91afb363 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -1,6 +1,6 @@ import numpy as np from . import core -from framework import Program, default_main_program +from framework import Program, default_main_program, Parameter, Variable __all__ = ['Executor', 'g_scope'] @@ -46,7 +46,15 @@ class Executor(object): p.set_place(each) act_places.append(p) - self.executor = core.Executor(act_places) + # TODO(dzhwinter) : consider that our fluid tests all written in + # CUDAPlace(gpu_id), this will be changed in the future + if core.is_compile_gpu(): + core.init_devices(["CPU", "GPU:0"]) + else: + core.init_devices(["CPU"]) + + # TODO(dzhwinter) : only use the first place + self.executor = core.Executor(act_places[0]) self.places = places def aslodtensor(self, data): @@ -141,7 +149,7 @@ class Executor(object): outputs={'Out': [fetch_var]}, attrs={'col': i}) - self.executor.run(program.desc, scope, 0, True) + self.executor.run(program.desc, scope, 0, True, True) outs = [ core.get_fetch_variable(scope, fetch_var_name, i) for i in xrange(len(fetch_list)) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index bf0cd275b6..dbdf9a043c 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -1,10 +1,10 @@ import collections +import contextlib import numpy as np -from . import core + import proto.framework_pb2 as framework_pb2 -import google.protobuf.message -import contextlib +from . import core __all__ = [ 'Block', 'Variable', 'Program', 'Operator', 'default_startup_program', @@ -12,6 +12,22 @@ __all__ = [ 'switch_main_program' ] +EMPTY_VAR_NAME = core.kEmptyVarName() +TEMP_VAR_NAME = core.kTempVarName() +GRAD_VAR_SUFFIX = core.kGradVarSuffix() +ZERO_VAR_SUFFIX = core.kZeroVarSuffix() + +USE_CPU = core.kUseCPU() +USE_CUDNN = core.kUseMKLDNN() +USE_MKLDNN = core.kUseMKLDNN() + + +def grad_var_name(var_name): + """ + return gradient name for a certain var name + """ + return var_name + GRAD_VAR_SUFFIX + def unique_name(prefix): """ @@ -347,6 +363,10 @@ class Operator(object): """ self.block = block self.desc = desc + # for clone a new operator + self.inputs = inputs + self.outputs = outputs + self.attrs = attrs if len(self.desc.type()) != 0: return if type is None: @@ -377,7 +397,10 @@ class Operator(object): % (in_proto.name, len(in_args))) in_arg_names = [] for arg in in_args: - in_arg_names.append(arg.name) + if isinstance(arg, basestring): + in_arg_names.append(arg) + else: + in_arg_names.append(arg.name) self.desc.set_input(in_proto.name, in_arg_names) else: self.desc.set_input(in_proto.name, []) @@ -418,13 +441,18 @@ class Operator(object): continue if isinstance(attrs[attr_name], Block): self.desc.set_block_attr(attr_name, attrs[attr_name].desc) + elif isinstance(attrs[attr_name], core.BlockDesc) or \ + isinstance(attrs[attr_name], core.ProgramDesc): + self.desc.set_serialized_attr( + attr_name, attrs[attr_name].serialize_to_string()) else: self.desc.set_attr(attr_name, attrs[attr_name]) self.desc.check_attrs() no_kernel_op_set = { 'feed', 'fetch', 'save', 'load', 'recurrent', - 'rnn_memory_helper_grad', 'conditional_block', 'while' + 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', + 'recv' } if type not in no_kernel_op_set: self.desc.infer_var_type(self.block.desc) @@ -570,6 +598,7 @@ class Block(object): self.vars = dict() # var_name --> var self.ops = collections.deque() # operator list self.program = program + self.removed_vars = dict() def __str__(self): return self.to_string(True) @@ -626,6 +655,16 @@ class Block(object): self.ops.append(op) return op + def delete_ops(self, ops): + # remove from cpp + # FIXME(typhoonzero): remove only the first occuracy. + try: + start = list(self.ops).index(ops[0]) + end = list(self.ops).index(ops[-1]) + except Exception, e: + raise e + self.desc.remove_op(start, end + 1) + def prepend_op(self, *args, **kwargs): op_desc = self.desc.prepend_op() op = Operator(self, op_desc, *args, **kwargs) @@ -704,6 +743,7 @@ class Block(object): trainable=p.trainable, optimize_attr=p.optimize_attr, regularizer=p.regularizer, + clip_attr=p.clip_attr, name=v.name) self.vars[new_p.name] = new_p @@ -866,6 +906,8 @@ class Parameter(Variable): self.regularizer = kwargs.get('regularizer', None) + self.clip_attr = kwargs.get('clip_attr', None) + # program is a global instance. _main_program_ = Program() diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index e147ac22ad..69a732fc45 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -180,10 +180,22 @@ def save_inference_model(dirname, :return: None """ + if isinstance(feeded_var_names, basestring): + feeded_var_names = [feeded_var_names] + else: + if not (bool(feeded_var_names) and all( + isinstance(name, basestring) for name in feeded_var_names)): + raise ValueError("'feed_var_names' should be a list of str.") + + if isinstance(target_vars, Variable): + feeded_var_names = [feeded_var_names] + else: + if not (bool(target_vars) and all( + isinstance(var, Variable) for var in target_vars)): + raise ValueError("'target_vars' should be a list of Variable.") + if main_program is None: main_program = default_main_program() - if not isinstance(target_vars, list): - target_vars = [target_vars] if not os.path.isdir(dirname): os.makedirs(dirname) diff --git a/python/paddle/v2/fluid/layer_helper.py b/python/paddle/v2/fluid/layer_helper.py index 3963e13222..4469f7285e 100644 --- a/python/paddle/v2/fluid/layer_helper.py +++ b/python/paddle/v2/fluid/layer_helper.py @@ -21,19 +21,11 @@ class LayerHelper(object): @property def main_program(self): - prog = self.kwargs.get('main_program', None) - if prog is None: - return default_main_program() - else: - return prog + return default_main_program() @property def startup_program(self): - prog = self.kwargs.get('startup_program', None) - if prog is None: - return default_startup_program() - else: - return prog + return default_startup_program() def append_op(self, *args, **kwargs): return self.main_program.current_block().append_op(*args, **kwargs) @@ -151,13 +143,6 @@ class LayerHelper(object): persistable=True, initializer=initializer) - @property - def to_kwargs(self): - return { - 'main_program': self.main_program, - 'startup_program': self.startup_program - } - def append_bias_op(self, input_var, dim_start=1, dim_end=None): """ Append bias operator and return its output. If the user does not set @@ -199,7 +184,7 @@ class LayerHelper(object): self.append_op( type=act_type, inputs={"X": [input_var]}, - outputs={"Y": [tmp]}, + outputs={"Out": [tmp]}, attrs=act) return tmp @@ -209,3 +194,9 @@ class LayerHelper(object): else: # For integer and boolean types, initialize with all zeros return Constant() + + def is_instance(self, param_name, cls): + param = self.kwargs.get(param_name, None) + if not isinstance(param, cls): + raise TypeError("The input {0} parameter of method {1} must be {2}", + param_name, self.layer_type, cls.__name__) diff --git a/python/paddle/v2/fluid/layers.py b/python/paddle/v2/fluid/layers.py deleted file mode 100644 index 1f45487902..0000000000 --- a/python/paddle/v2/fluid/layers.py +++ /dev/null @@ -1,2183 +0,0 @@ -import core -import proto.framework_pb2 as framework_pb2 -from framework import OpProtoHolder, Variable, Program, Operator -from initializer import Constant, Normal, Xavier, Initializer -from paddle.v2.fluid.layer_helper import LayerHelper, unique_name -import re -import cStringIO -from param_attr import ParamAttr -import contextlib - -__all__ = [ - 'fc', 'data', 'cross_entropy', 'conv2d', 'pool2d', 'embedding', 'concat', - 'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'sums', 'cos_sim', - 'batch_norm', 'accuracy', 'split_lod_tensor', 'While' -] - - -def fc(input, - size, - num_flatten_dims=1, - param_attr=None, - bias_attr=None, - act=None, - name=None, - main_program=None, - startup_program=None): - """ - Fully Connected Layer. - - Args: - input: The input tensor to the function - size: The size of the layer - num_flatten_dims: Number of columns in input - param_attr: The parameters/weights to the FC Layer - param_initializer: Initializer used for the weight/parameter. If None, XavierInitializer() is used - bias_attr: The bias parameter for the FC layer - bias_initializer: Initializer used for the bias. If None, then ConstantInitializer() is used - act: Activation to be applied to the output of FC layer - name: Name/alias of the function - main_program: Name of the main program that calls this - startup_program: Name of the startup program - - This function can take in multiple inputs and performs the Fully Connected - function (linear transformation) on top of each of them. - So for input x, the output will be : Wx + b. Where W is the parameter, - b the bias and x is the input. - - The function also applies an activation (non-linearity) on top of the - output, if activation is passed in the input. - - All the input variables of this function are passed in as local variables - to the LayerHelper constructor. - - """ - helper = LayerHelper('fc', **locals()) - - dtype = helper.input_dtype() - - mul_results = [] - for input_var, param_attr in helper.iter_inputs_and_params(): - input_shape = input_var.shape - param_shape = [ - reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) - ] + [size] - w = helper.create_parameter( - attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) - helper.append_op( - type="mul", - inputs={ - "X": input_var, - "Y": w, - }, - outputs={"Out": tmp}, - attrs={'x_num_col_dims': num_flatten_dims, - 'y_num_col_dims': 1}) - mul_results.append(tmp) - - # sum - if len(mul_results) == 1: - pre_bias = mul_results[0] - else: - pre_bias = helper.create_tmp_variable(dtype) - helper.append_op( - type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}) - # add bias - pre_activation = helper.append_bias_op(pre_bias) - # add activation - return helper.append_activation(pre_activation) - - -def embedding(input, - size, - is_sparse=False, - param_attr=None, - dtype='float32', - main_program=None, - startup_program=None): - """ - Embedding Layer. - - Args: - param_initializer: - input: The input to the function - size: The size of the layer - is_sparse: A flag that decleares whether the input is sparse - param_attr: Parameters for this layer - dtype: The type of data : float32, float_16, int etc - main_program: Name of the main program that calls this - startup_program: Name of the startup program - - This function can take in the input (which is a vector of IDs) and - performs a lookup in the lookup_table using these IDs, to result into - the embedding of each ID in the input. - - All the input variables of this function are passed in as local variables - to the LayerHelper constructor. - - """ - - helper = LayerHelper('embedding', **locals()) - w = helper.create_parameter( - attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) - helper.append_op( - type='lookup_table', - inputs={'Ids': input, - 'W': w}, - outputs={'Out': tmp}, - attrs={'is_sparse': is_sparse}) - return tmp - - -# TODO(qijun): expose H0 and C0 -def dynamic_lstm(input, - size, - param_attr=None, - bias_attr=None, - use_peepholes=True, - is_reverse=False, - gate_activation='sigmoid', - cell_activation='tanh', - candidate_activation='tanh', - dtype='float32', - main_program=None, - startup_program=None): - helper = LayerHelper('lstm', **locals()) - size = size / 4 - weight = helper.create_parameter( - attr=helper.param_attr, shape=[size, 4 * size], dtype=dtype) - bias_size = [1, 7 * size] - if not use_peepholes: - bias_size[1] = 4 * size - bias = helper.create_parameter( - attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) - - hidden = helper.create_tmp_variable(dtype) - cell = helper.create_tmp_variable(dtype) - batch_gate = helper.create_tmp_variable(dtype) - batch_cell_pre_act = helper.create_tmp_variable(dtype) - - helper.append_op( - type='lstm', - inputs={'Input': input, - 'Weight': weight, - 'Bias': bias}, - outputs={ - 'Hidden': hidden, - 'Cell': cell, - 'BatchGate': batch_gate, - 'BatchCellPreAct': batch_cell_pre_act - }, - attrs={ - 'use_peepholes': use_peepholes, - 'is_reverse': is_reverse, - 'gate_activation': gate_activation, - 'cell_activation': cell_activation, - 'candidate_activation': candidate_activation - }) - return hidden, cell - - -def gru_unit(input, - hidden, - size, - weight=None, - bias=None, - activation='tanh', - gate_activation='sigmoid', - main_program=None, - startup_program=None): - """ - GRUUnit Operator implements partial calculations of the GRU unit as following: - - $$ - update \ gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\ - reset \ gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r) \\ - output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\ - output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t) - $$ - - which is same as one time step of GRU Operator. - - @note To implement the complete GRU unit, fully-connected operator must be - used before to feed xu, xr and xc as the Input of GRUUnit operator. - - TODO(ChunweiYan) add more document here - """ - activation_dict = dict( - identity=0, - sigmoid=1, - tanh=2, - relu=3, ) - activation = activation_dict[activation] - gate_activation = activation_dict[gate_activation] - - helper = LayerHelper('gru_unit', **locals()) - dtype = helper.input_dtype() - size = size / 3 - - # create weight - if weight is None: - weight = helper.create_parameter( - attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) - - # create bias - if bias is None: - bias_size = [1, 3 * size] - bias = helper.create_parameter( - attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) - - gate = helper.create_tmp_variable(dtype) - reset_hidden_pre = helper.create_tmp_variable(dtype) - updated_hidden = helper.create_tmp_variable(dtype) - - helper.append_op( - type='gru_unit', - inputs={'Input': input, - 'HiddenPrev': hidden, - 'Weight': weight}, - outputs={ - 'Gate': gate, - 'ResetHiddenPrev': reset_hidden_pre, - 'Hidden': updated_hidden, - }, - attrs={ - 'activation': 0, - 'gate_activation': 1, - }) - - return updated_hidden, reset_hidden_pre, gate - - -def data(name, - shape, - append_batch_size=True, - dtype='float32', - lod_level=0, - type=core.VarDesc.VarType.LOD_TENSOR, - main_program=None, - startup_program=None, - stop_gradient=True): - """ - Data Layer. - - Args: - name: The name/alias of the function - shape: Tuple declaring the shape. - append_batch_size: Whether or not to append the data as a batch. - dtype: The type of data : float32, float_16, int etc - type: The output type. By default it is LOD_TENSOR. - lod_level(int): The LoD Level. 0 means the input data is not a sequence. - main_program: Name of the main program that calls this - startup_program: Name of the startup program - stop_gradient: A boolean that mentions whether gradient should flow. - - This function takes in input and based on whether data has - to be returned back as a minibatch, it creates the global variable using - the helper functions. The global variables can be accessed by all the - following operations and layers in the graph. - - All the input variables of this function are passed in as local variables - to the LayerHelper constructor. - - """ - helper = LayerHelper('data', **locals()) - shape = list(shape) - for i in xrange(len(shape)): - if shape[i] is None: - shape[i] = -1 - append_batch_size = False - elif shape[i] < 0: - append_batch_size = False - - if append_batch_size: - shape = [-1] + shape # append batch size as -1 - - return helper.create_global_variable( - name=name, - shape=shape, - dtype=dtype, - type=type, - stop_gradient=stop_gradient, - lod_level=lod_level) - - -def create_tensor(dtype, name=None, main_program=None, startup_program=None): - helper = LayerHelper("create_tensor", **locals()) - return helper.create_variable(name=helper.name, dtype=dtype) - - -def _convert_(name): - """ - Formatting. - - Args: - name: The name/alias - - This function takes in a name and converts it to a standard format of - group1_group2. Where as per the regular expression, group1 can have - alphabets and numbers and group2 has capital alphabets. - - """ - s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() - - -def _generate_doc_string_(op_proto): - """ - Generate docstring by OpProto - - Args: - op_proto (framework_pb2.OpProto): a protobuf message typed OpProto - - Returns: - str: the document string - """ - - def _type_to_str_(tp): - return framework_pb2.AttrType.Name(tp) - - if not isinstance(op_proto, framework_pb2.OpProto): - raise TypeError("OpProto should be `framework_pb2.OpProto`") - - buf = cStringIO.StringIO() - buf.write(op_proto.comment) - buf.write('\nArgs:\n') - for each_input in op_proto.inputs: - line_begin = ' {0}: '.format(_convert_(each_input.name)) - buf.write(line_begin) - buf.write(each_input.comment) - buf.write('\n') - buf.write(' ' * len(line_begin)) - buf.write('Duplicable: ') - buf.write(str(each_input.duplicable)) - buf.write(' Optional: ') - buf.write(str(each_input.dispensable)) - buf.write('\n') - - for each_attr in op_proto.attrs: - buf.write(' ') - buf.write(each_attr.name) - buf.write(' (') - buf.write(_type_to_str_(each_attr.type)) - buf.write('): ') - buf.write(each_attr.comment) - buf.write('\n') - - if len(op_proto.outputs) != 0: - buf.write('\nReturns:\n') - buf.write(' ') - for each_opt in op_proto.outputs: - if not each_opt.intermediate: - break - buf.write(each_opt.comment) - - return buf.getvalue() - - -def _create_op_func_(op_type): - """ - Create an Operator for a Function. - - Args: - op_type: The name of the operator to be created - - This function takes in the operator type (sigmoid, mean , average etc) and - creates the operator functionality. - - """ - op_proto = OpProtoHolder.instance().get_op_proto(op_type) - not_intermediate_outputs = \ - filter(lambda output: not output.intermediate, op_proto.outputs) - intermediate_outputs = \ - filter(lambda output: output.intermediate, op_proto.outputs) - - if len(not_intermediate_outputs) != 1: - raise ValueError("Only one non intermediate output operator can be", - "automatically generated") - - if not_intermediate_outputs[0].duplicable: - raise ValueError( - "Only non duplicable op can be automatically generated") - - for output in intermediate_outputs: - if output.duplicable: - raise ValueError("The op can be automatically generated only when ", - "all intermediate ops are not duplicable") - - o_name = not_intermediate_outputs[0].name - intermediate_output_names = [output.name for output in intermediate_outputs] - - def infer_and_check_dtype(op_proto, **kwargs): - """ - This function performs the sanity check for dtype and - instance type. - """ - dtype = None - for ipt in op_proto.inputs: - name = _convert_(ipt.name) - val = kwargs.pop(name, []) - if not isinstance(val, list) and not isinstance(val, tuple): - val = [val] - for each in val: - if not isinstance(each, Variable): - raise ValueError("input of {0} must be variable".format( - op_type)) - - if dtype is None: - dtype = each.dtype - elif dtype != each.dtype: - raise ValueError( - "operator {0} must input same dtype. {1} vs {2}".format( - op_type, dtype, each.dtype)) - - return dtype - - def func(**kwargs): - helper = LayerHelper(op_type, **kwargs) - - dtype = infer_and_check_dtype(op_proto, **kwargs) - - inputs = dict() - for ipt in op_proto.inputs: - name = _convert_(ipt.name) - val = kwargs.pop(name, []) - if not isinstance(val, list) and not isinstance(val, tuple): - val = [val] - inputs[ipt.name] = val - - outputs = dict() - out = helper.create_tmp_variable(dtype=dtype) - outputs[o_name] = [out] - for name in intermediate_output_names: - outputs[name] = [helper.create_tmp_variable(dtype=dtype)] - helper.append_op( - type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs) - return helper.append_activation(out) - - func.__name__ = op_type - globals()[op_type] = func - func.__doc__ = _generate_doc_string_(op_proto) - global __all__ - __all__.append(op_type) - - -_create_op_func_('mean') -_create_op_func_('mul') -_create_op_func_('elementwise_add') -_create_op_func_('elementwise_div') -_create_op_func_('dropout') -_create_op_func_('reshape') -_create_op_func_('sigmoid') -_create_op_func_('scale') -_create_op_func_('reshape') -_create_op_func_('transpose') -_create_op_func_('sigmoid_cross_entropy_with_logits') - - -def cast(x, dtype, main_program=None): - """ - This function takes in the input with input_dtype - and casts it to the output_dtype as the output. - """ - helper = LayerHelper('cast', **locals()) - out = helper.create_tmp_variable(dtype=dtype) - helper.append_op( - type='cast', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={'in_dtype': x.dtype, - 'out_dtype': out.dtype}) - return out - - -def concat(input, axis, main_program=None, startup_program=None): - """ - This function concats the input along the axis mentioned - and returns that as the output. - """ - helper = LayerHelper('concat', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) - helper.append_op( - type='concat', - inputs={'X': input}, - outputs={'Out': [out]}, - attrs={'axis': axis}) - return out - - -def sums(input, out=None, main_program=None, startup_program=None): - """ - This function takes in the input and performs the sum operation on it - and returns that as the output. - """ - helper = LayerHelper('sum', **locals()) - if out is None: - out = helper.create_tmp_variable(dtype=helper.input_dtype()) - helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out}) - return out - - -def linear_chain_crf(input, - label, - param_attr=None, - main_program=None, - startup_program=None): - helper = LayerHelper('linear_chain_crf', **locals()) - size = input.shape[1] - transition = helper.create_parameter( - attr=helper.param_attr, - shape=[size + 2, size], - dtype=helper.input_dtype()) - alpha = helper.create_tmp_variable(dtype=helper.input_dtype()) - emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) - transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) - log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype()) - helper.append_op( - type='linear_chain_crf', - inputs={"Emission": [input], - "Transition": transition, - "Label": label}, - outputs={ - "Alpha": [alpha], - "EmissionExps": [emission_exps], - "TransitionExps": transition_exps, - "LogLikelihood": log_likelihood - }) - - return log_likelihood - - -def crf_decoding(input, - param_attr, - label=None, - main_program=None, - startup_program=None): - helper = LayerHelper('crf_decoding', **locals()) - transition = helper.get_parameter(param_attr.name) - viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) - helper.append_op( - type='crf_decoding', - inputs={"Emission": [input], - "Transition": transition, - "Label": label}, - outputs={"ViterbiPath": [viterbi_path]}) - - return viterbi_path - - -def assign(input, output, main_program=None, startup_program=None): - helper = LayerHelper('assign', **locals()) - helper.append_op( - type='scale', - inputs={'X': [input]}, - outputs={'Out': [output]}, - attrs={'scale': 1.0}) - return output - - -def split_lod_tensor(input, - mask, - level=0, - main_program=None, - startup_program=None): - helper = LayerHelper('split_lod_tensor', **locals()) - out_true = helper.create_tmp_variable(dtype=input.dtype) - out_false = helper.create_tmp_variable(dtype=input.dtype) - helper.append_op( - type='split_lod_tensor', - inputs={ - 'X': input, - 'Mask': mask, - }, - outputs={'OutTrue': out_true, - 'OutFalse': out_false}, - attrs={'level': level}) - return out_true, out_false - - -def merge_lod_tensor(in_true, - in_false, - x, - mask, - level=0, - main_program=None, - startup_program=None): - helper = LayerHelper('merge_lod_tensor', **locals()) - out = helper.create_tmp_variable(dtype=in_true.dtype) - helper.append_op( - type='merge_lod_tensor', - inputs={'X': x, - 'Mask': mask, - 'InTrue': in_true, - 'InFalse': in_false}, - outputs={'Out': out}, - attrs={'level': level}) - return out - - -def cos_sim(X, Y, **kwargs): - """ - This function performs the cosine similarity between two tensors - X and Y and returns that as the output. - """ - helper = LayerHelper('cos_sim', **kwargs) - out = helper.create_tmp_variable(dtype=X.dtype) - xnorm = helper.create_tmp_variable(dtype=X.dtype) - ynorm = helper.create_tmp_variable(dtype=X.dtype) - helper.append_op( - type='cos_sim', - inputs={'X': [X], - 'Y': [Y]}, - outputs={'Out': [out], - 'XNorm': [xnorm], - 'YNorm': [ynorm]}) - return out - - -def cross_entropy(input, label, **kwargs): - """ - This function computes cross_entropy using the input and label. - """ - helper = LayerHelper('cross_entropy', **kwargs) - out = helper.create_tmp_variable(dtype=input.dtype) - helper.append_op( - type='cross_entropy', - inputs={'X': [input], - 'Label': [label]}, - outputs={'Y': [out]}, - attrs=kwargs) - return out - - -def square_error_cost(input, label, **kwargs): - """ - This functions returns the squared error cost using the input and label. - The output is appending the op to do the above. - """ - helper = LayerHelper('square_error_cost', **kwargs) - minus_out = helper.create_tmp_variable(dtype=input.dtype) - helper.append_op( - type='elementwise_sub', - inputs={'X': [input], - 'Y': [label]}, - outputs={'Out': [minus_out]}) - - square_out = helper.create_tmp_variable(dtype=input.dtype) - helper.append_op( - type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]}) - return square_out - - -def accuracy(input, label, k=1, correct=None, total=None, **kwargs): - """ - This function computes the accuracy using the input and label. - The output is the top_k inputs and their indices. - """ - helper = LayerHelper("accuracy", **kwargs) - topk_out = helper.create_tmp_variable(dtype=input.dtype) - topk_indices = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="top_k", - inputs={"X": [input]}, - outputs={"Out": [topk_out], - "Indices": [topk_indices]}, - attrs={"k": k}) - acc_out = helper.create_tmp_variable(dtype="float32") - if correct is None: - correct = helper.create_tmp_variable(dtype="int64") - if total is None: - total = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="accuracy", - inputs={ - "Out": [topk_out], - "Indices": [topk_indices], - "Label": [label] - }, - outputs={ - "Accuracy": [acc_out], - "Correct": [correct], - "Total": [total], - }) - return acc_out - - -def chunk_eval(input, - label, - chunk_scheme, - num_chunk_types, - excluded_chunk_types=None, - **kwargs): - """ - This function computes the accuracy using the input and label. - The output is the top_k inputs and their indices. - """ - helper = LayerHelper("chunk_eval", **kwargs) - - # prepare output - precision = helper.create_tmp_variable(dtype="float32") - recall = helper.create_tmp_variable(dtype="float32") - f1_score = helper.create_tmp_variable(dtype="float32") - - helper.append_op( - type="chunk_eval", - inputs={"Inference": [input], - "Label": [label]}, - outputs={ - "Precision": [precision], - "Recall": [recall], - "F1-Score": [f1_score] - }, - attrs={ - "num_chunk_types": num_chunk_types, - 'chunk_scheme': chunk_scheme, - 'excluded_chunk_types': excluded_chunk_types or [] - }) - return precision, recall, f1_score - - -def sequence_conv(input, - num_filters, - filter_size=3, - filter_stride=1, - padding=None, - bias_attr=None, - param_attr=None, - act=None, - main_program=None, - startup_program=None): - """ - This function creates the op for sequence_conv, using the inputs and - other convolutional configurations for the filters and stride as given - in the input parameters to the function. - """ - - # FIXME(dzh) : want to unify the argument of python layer - # function. So we ignore some unecessary attributes. - # such as, padding_trainable, context_start. - - helper = LayerHelper('sequence_conv', **locals()) - dtype = helper.input_dtype() - filter_shape = [filter_size * input.shape[1], num_filters] - filter_param = helper.create_parameter( - attr=helper.param_attr, shape=filter_shape, dtype=dtype) - pre_bias = helper.create_tmp_variable(dtype) - - helper.append_op( - type='sequence_conv', - inputs={ - 'X': [input], - 'Filter': [filter_param], - }, - outputs={"Out": pre_bias}, - attrs={ - 'contextStride': filter_stride, - 'contextStart': -int(filter_size / 2), - 'contextLength': filter_size - }) - pre_act = helper.append_bias_op(pre_bias) - return helper.append_activation(pre_act) - - -def conv2d(input, - num_filters, - filter_size, - stride=None, - padding=None, - groups=None, - param_attr=None, - bias_attr=None, - act=None, - name=None, - main_program=None, - startup_program=None): - """ - This function creates the op for a 2-dimensional Convolution. - This is performed using the parameters of filters(size, dimensionality etc) - , stride and other configurations for a Convolution operation. - This funciton can also append an activation on top of the - conv-2d output, if mentioned in the input parameters. - """ - - if stride is None: - stride = [1, 1] - helper = LayerHelper('conv2d', **locals()) - dtype = helper.input_dtype() - - num_channels = input.shape[1] - if groups is None: - num_filter_channels = num_channels - else: - if num_channels % groups != 0: - raise ValueError("num_channels must be divisible by groups.") - num_filter_channels = num_channels / groups - - if isinstance(filter_size, int): - filter_size = [filter_size, filter_size] - if isinstance(stride, int): - stride = [stride, stride] - if isinstance(padding, int): - padding = [padding, padding] - - input_shape = input.shape - filter_shape = [num_filters, num_filter_channels] + filter_size - - def _get_default_param_initializer(): - std = (2.0 / (filter_size[0]**2 * num_channels))**0.5 - return Normal(0.0, std, 0) - - filter_param = helper.create_parameter( - attr=helper.param_attr, - shape=filter_shape, - dtype=dtype, - default_initializer=_get_default_param_initializer()) - - pre_bias = helper.create_tmp_variable(dtype) - - helper.append_op( - type='conv2d_cudnn', - inputs={ - 'Input': input, - 'Filter': filter_param, - }, - outputs={"Output": pre_bias}, - attrs={'strides': stride, - 'paddings': padding, - 'groups': groups}) - - pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) - - return helper.append_activation(pre_act) - - -def sequence_pool(input, pool_type, **kwargs): - """ - This function add the operator for sequence pooling. - This is applied on top of the input using pool_type mentioned - in the parameters. - """ - helper = LayerHelper('sequence_pool', input=input, **kwargs) - dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) - max_index = helper.create_tmp_variable(dtype) - - helper.append_op( - type="sequence_pool", - inputs={"X": input}, - outputs={"Out": pool_out, - "MaxIndex": max_index}, - attrs={"pooltype": pool_type.upper()}) - - return pool_out - - -def pool2d(input, - pool_size, - pool_type, - pool_stride=None, - pool_padding=None, - global_pooling=False, - main_program=None, - startup_program=None): - """ - This function adds the operator for pooling in 2 dimensions, using the - pooling configurations mentioned in input parameters. - """ - if pool_padding is None: - pool_padding = [0, 0] - if pool_stride is None: - pool_stride = [1, 1] - if pool_type not in ["max", "avg"]: - raise ValueError( - "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", - str(pool_type)) - if isinstance(pool_size, int): - pool_size = [pool_size, pool_size] - if isinstance(pool_stride, int): - pool_stride = [pool_stride, pool_stride] - if isinstance(pool_padding, int): - pool_padding = [pool_padding, pool_padding] - - helper = LayerHelper('pool2d', **locals()) - dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) - - helper.append_op( - type="pool2d", - inputs={"X": input}, - outputs={"Out": pool_out}, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "global_pooling": global_pooling, - "strides": pool_stride, - "paddings": pool_padding - }) - - return pool_out - - -def batch_norm(input, - act=None, - is_test=False, - momentum=0.9, - epsilon=1e-05, - param_attr=None, - bias_attr=None, - data_layout='NCHW', - main_program=None, - startup_program=None): - """ - This function helps create an operator to implement - the BatchNorm layer using the configurations from the input parameters. - """ - helper = LayerHelper('batch_norm', **locals()) - dtype = helper.input_dtype() - - input_shape = input.shape - if data_layout == 'NCHW': - channel_num = input_shape[1] - else: - if data_layout == 'NHWC': - channel_num = input_shape[-1] - else: - raise ValueError("unsupported data layout:" + data_layout) - - param_shape = [channel_num] - - # create parameter - scale = helper.create_parameter( - attr=helper.param_attr, - shape=param_shape, - dtype=dtype, - default_initializer=Constant(1.0)) - - bias = helper.create_parameter( - attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=True) - - mean = helper.create_global_variable( - dtype=input.dtype, shape=param_shape, persistable=True) - helper.set_variable_initializer(var=mean, initializer=Constant(0.0)) - - variance = helper.create_global_variable( - dtype=input.dtype, shape=param_shape, persistable=True) - helper.set_variable_initializer(var=variance, initializer=Constant(1.0)) - - # create output - # mean and mean_out share the same memory - mean_out = mean - # variance and variance out share the same memory - variance_out = variance - saved_mean = helper.create_tmp_variable(dtype) - saved_variance = helper.create_tmp_variable(dtype) - - batch_norm_out = helper.create_tmp_variable(dtype) - - helper.append_op( - type="batch_norm", - inputs={ - "X": input, - "Scale": scale, - "Bias": bias, - "Mean": mean, - "Variance": variance - }, - outputs={ - "Y": batch_norm_out, - "MeanOut": mean_out, - "VarianceOut": variance_out, - "SavedMean": saved_mean, - "SavedVariance": saved_variance - }, - attrs={"momentum": momentum, - "epsilon": epsilon, - "is_test": is_test}) - - return helper.append_activation(batch_norm_out) - - -def beam_search_decode(ids, scores, main_program=None, startup_program=None): - helper = LayerHelper('beam_search_decode', **locals()) - sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) - sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) - - helper.append_op( - type="beam_search_decode", - inputs={"Ids": ids, - "Scores": scores}, - outputs={ - "SentenceIds": sentence_ids, - "SentenceScores": sentence_scores - }) - - return sentence_ids, sentence_scores - - -class BlockGuard(object): - """ - BlockGuard class. - - BlockGuard class is used to create a sub-block in a program by - using the Python `with` keyword. - """ - - def __init__(self, main_program): - if not isinstance(main_program, Program): - raise TypeError("BlockGuard takes a program") - self.main_program = main_program - - def __enter__(self): - self.main_program.create_block() - - def __exit__(self, exc_type, exc_val, exc_tb): - self.main_program.rollback() - if exc_type is not None: - return False # re-raise exception - return True - - -class StaticRNNGuard(BlockGuard): - """ - StaticRNNGuard class. - - StaticRNNGuard class is used to create a StaticRNN block in a program. - """ - - def __init__(self, rnn): - if not isinstance(rnn, StaticRNN): - raise TypeError("StaticRNNGuard takes a StaticRNN") - super(StaticRNNGuard, self).__init__(rnn.helper.main_program) - self.rnn = rnn - - def __enter__(self): - self.rnn.status = StaticRNN.IN_RNN_BLOCK - return super(StaticRNNGuard, self).__enter__() - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - return False - self.rnn.status = StaticRNN.AFTER_RNN_BLOCK - self.rnn.complete_rnn_op() - return super(StaticRNNGuard, self).__exit__(exc_type, exc_val, exc_tb) - - -class StaticRNNMemoryLink(object): - """ - StaticRNNMemoryLink class. - - Args: - init: the initial variable for Memory - init: Variable - pre_mem: the memory variable in previous time step - pre_mem: Variable - mem: the memory variable in current time step - mem: Variable - - StaticRNNMemoryLink class is used to create a link between two - memory cells of a StaticRNN. - """ - - def __init__(self, init, pre_mem, mem=None): - self.init = init - self.pre_mem = pre_mem - self.mem = mem - - -class StaticRNN(object): - """ - StaticRNN class. - - StaticRNN class is used to create a StaticRNN. The RNN will have its - own parameters like inputs, outputs, memories, status and length. - """ - BEFORE_RNN_BLOCK = 0 - IN_RNN_BLOCK = 1 - AFTER_RNN_BLOCK = 2 - - def __init__(self, name=None, main_program=None): - self.helper = LayerHelper( - "static_rnn", name=name, main_program=main_program) - self.memories = {} # memory map, from pre_mem.name --> MemoryLink - self.inputs = [] # input variable list in current block - self.outputs = [] # output variable list in parent block - self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag. - # sequence length, since it is a static RNN, sequence length are fixed. - self.seq_len = None - - def step(self): - return StaticRNNGuard(self) - - def _assert_in_rnn_block_(self, method): - if self.status != StaticRNN.IN_RNN_BLOCK: - raise ValueError("You must invoke {0} in rnn block".format(method)) - - def memory(self, - init=None, - shape=None, - batch_ref=None, - init_value=0.0, - init_batch_dim_idx=0, - ref_batch_dim_idx=1): - """ - Args: - init: boot memory, if not set, a shape, batch_ref must be provided - shape: shape of the boot memory - batch_ref: batch size reference variable - init_value: the init value of boot memory - init_batch_dim_idx: the index of batch size in init's dimension - ref_batch_dim_idx: the index of batch size in batch_ref's dimension - """ - self._assert_in_rnn_block_('memory') - if init is None: - if shape is None or batch_ref is None: - raise ValueError( - "if init is None, memory at least need shape and batch_ref") - parent_block = self.parent_block() - var_name = unique_name("@".join([self.helper.name, "memory_boot"])) - boot_var = parent_block.create_var( - name=var_name, - shape=shape, - dtype=batch_ref.dtype, - persistable=False) - - parent_block.append_op( - type="fill_constant_batch_size_like", - inputs={'Input': [batch_ref]}, - outputs={'Out': [boot_var]}, - attrs={ - 'value': init_value, - 'shape': boot_var.shape, - 'dtype': boot_var.dtype, - 'input_dim_idx': ref_batch_dim_idx, - 'output_dim_idx': init_batch_dim_idx - }) - - return self.memory(init=boot_var) - else: - pre_mem = self.helper.create_variable( - name=unique_name("@".join([self.helper.name, "mem"])), - dtype=init.dtype, - shape=init.shape) - self.memories[pre_mem.name] = StaticRNNMemoryLink( - init=init, pre_mem=pre_mem) - return pre_mem - - def step_input(self, x): - self._assert_in_rnn_block_('step_input') - if not isinstance(x, Variable): - raise TypeError("step input takes a Variable") - if self.seq_len is None: - self.seq_len = x.shape[0] - elif self.seq_len != x.shape[0]: - raise ValueError("Static RNN only take fix seq_len input") - - ipt = self.helper.create_variable( - name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type) - self.inputs.append(ipt) - return ipt - - def step_output(self, o): - self._assert_in_rnn_block_('step_output') - if not isinstance(o, Variable): - raise TypeError("step output takes a Variable") - - tmp_o = self.helper.create_tmp_variable(dtype=o.dtype) - self.helper.append_op( - type='rnn_memory_helper', - inputs={'X': [o]}, - outputs={'Out': tmp_o}, - attrs={'dtype': o.dtype}) - - out_var = self.parent_block().create_var( - name=tmp_o.name, - shape=[self.seq_len] + list(tmp_o.shape), - dtype=tmp_o.dtype) - - self.outputs.append(out_var) - - def output(self, *outputs): - for each in outputs: - self.step_output(each) - - def update_memory(self, mem, var): - if not isinstance(mem, Variable) or not isinstance(var, Variable): - raise TypeError("update memory should take variables") - self.memories[mem.name].mem = var - - def parent_block(self): - prog = self.helper.main_program - parent_idx = prog.current_block().parent_idx - assert parent_idx >= 0 - parent_block = prog.block(parent_idx) - return parent_block - - def __call__(self, *args, **kwargs): - if self.status != StaticRNN.AFTER_RNN_BLOCK: - raise ValueError("RNN output can only be retrieved after rnn block") - if len(self.outputs) == 0: - raise ValueError("RNN has no output") - elif len(self.outputs) == 1: - return self.outputs[0] - else: - return self.outputs - - def complete_rnn_op(self): - main_program = self.helper.main_program - rnn_block = main_program.current_block() - parent_block = self.parent_block() - - local_inputs = set() - - for op in rnn_block.ops: - assert isinstance(op, Operator) - for oname in op.output_names: - for out_var_name in op.output(oname): - local_inputs.add(out_var_name) - - for var in self.inputs: - local_inputs.add(var.name) - for m in self.memories: - local_inputs.add(m) - - params = list() - for op in rnn_block.ops: - assert isinstance(op, Operator) - for iname in op.input_names: - for in_var_name in op.input(iname): - if in_var_name not in local_inputs: - params.append(in_var_name) - - parameters = [parent_block.var(name) for name in params] - - step_scope = parent_block.create_var( - type=core.VarDesc.VarType.STEP_SCOPES) - - inlinks = [parent_block.var(i.name) for i in self.inputs] - outlinks = self.outputs - - boot_memories = [] - pre_memories = [] - memories = [] - for _, mem in self.memories.iteritems(): - boot_memories.append(mem.init) - pre_memories.append(mem.pre_mem.name) - mem_var = rnn_block.var(mem.mem.name) - assert isinstance(mem_var, Variable) - new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype) - - rnn_block.append_op( - type='rnn_memory_helper', - inputs={'X': [mem_var]}, - outputs={'Out': [new_mem]}, - attrs={'dtype': mem_var.dtype}) - - memories.append(new_mem.name) - - parent_block.append_op( - type='recurrent', - inputs={ - 'inputs': inlinks, - 'initial_states': boot_memories, - 'parameters': parameters - }, - outputs={'outputs': outlinks, - 'step_scopes': [step_scope]}, - attrs={ - 'ex_states': pre_memories, - 'states': memories, - 'step_block': rnn_block - }) - - -class WhileGuard(BlockGuard): - def __init__(self, while_op): - if not isinstance(while_op, While): - raise TypeError("WhileGuard takes a while op") - super(WhileGuard, self).__init__(while_op.helper.main_program) - self.while_op = while_op - - def __enter__(self): - self.while_op.status = While.IN_WHILE_BLOCK - return super(WhileGuard, self).__enter__() - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is not None: - return False - self.while_op.status = While.AFTER_WHILE_BLOCK - self.while_op.complete() - return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb) - - -class While(object): - BEFORE_WHILE_BLOCK = 0 - IN_WHILE_BLOCK = 1 - AFTER_WHILE_BLOCK = 2 - - def __init__(self, cond, name=None, main_program=None): - self.helper = LayerHelper("while", name=name, main_program=main_program) - self.status = While.BEFORE_WHILE_BLOCK - if not isinstance(cond, Variable): - raise TypeError("condition should be a variable") - assert isinstance(cond, Variable) - if cond.dtype != core.DataType.BOOL: - raise TypeError("condition should be a bool variable") - if reduce(lambda a, b: a * b, cond.shape, 1) != 1: - raise TypeError("condition should be a bool scalar") - self.cond_var = cond - - def block(self): - return WhileGuard(self) - - def complete(self): - main_program = self.helper.main_program - while_block = main_program.current_block() - parent_block = main_program.block(main_program.current_block() - .parent_idx) - - inner_outputs = {self.cond_var.name} - x_name_list = set() - for op in while_block.ops: - for iname in op.input_names: - for in_var_name in op.input(iname): - if in_var_name not in inner_outputs: - x_name_list.add(in_var_name) - - for oname in op.output_names: - for out_var_name in op.output(oname): - inner_outputs.add(out_var_name) - - out_vars = [] - for inner_out_name in inner_outputs: - if inner_out_name in parent_block.vars: - out_vars.append(parent_block.var(inner_out_name)) - - step_scope = parent_block.create_var( - type=core.VarDesc.VarType.STEP_SCOPES) - - parent_block.append_op( - type='while', - inputs={ - 'X': [parent_block.var(x_name) for x_name in x_name_list], - 'Condition': [self.cond_var] - }, - outputs={'Out': out_vars, - 'StepScopes': [step_scope]}, - attrs={'step_block': while_block}) - - -def lstm(x, - c_pre_init, - hidden_dim, - forget_bias=None, - main_program=None, - startup_program=None): - """ - This function helps create an operator for the LSTM (Long Short Term - Memory) cell that can be used inside an RNN. - """ - helper = LayerHelper('lstm_unit', **locals()) - rnn = StaticRNN() - with rnn.step(): - c_pre = rnn.memory(init=c_pre_init) - x_t = rnn.step_input(x) - - before_fc = concat( - input=[x_t, c_pre], - axis=1, - main_program=main_program, - startup_program=startup_program) - after_fc = fc(input=before_fc, - size=hidden_dim * 4, - main_program=main_program, - startup_program=startup_program) - - dtype = x.dtype - c = helper.create_tmp_variable(dtype) - h = helper.create_tmp_variable(dtype) - - helper.append_op( - type='lstm_unit', - inputs={"X": after_fc, - "C_prev": c_pre}, - outputs={"C": c, - "H": h}, - attrs={"forget_bias": forget_bias}) - - rnn.update_memory(c_pre, c) - rnn.output(h) - - return rnn() - - -def lod_rank_table(x, level=0, main_program=None): - """ - This function creates an operator for creating a LOD_RANK_TABLE - using the input x. - """ - helper = LayerHelper("lod_rank_table", **locals()) - table = helper.create_variable( - type=core.VarDesc.VarType.LOD_RANK_TABLE, - name=unique_name("lod_rank_table")) - helper.append_op( - type='lod_rank_table', - inputs={'X': x}, - outputs={'Out': table}, - attrs={'level': level}) - return table - - -def max_sequence_len(rank_table, main_program=None): - """ - This function creates an operator to calculate the length of - max seqence through input rank_table(should be a lod_rank_table) - """ - helper = LayerHelper("max_seqence_len", **locals()) - res = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="max_sequence_len", - inputs={"RankTable": rank_table}, - outputs={"Out": res}) - return res - - -def topk(input, k, main_program=None, startup_program=None): - helper = LayerHelper('topk', **locals()) - topk_out = helper.create_tmp_variable(dtype=input.data_type) - topk_indices = helper.create_tmp_variable(dtype='int64') - helper.append_op( - type='top_k', - inputs={'X': [input]}, - outputs={'Out': [topk_out], - 'Indices': [topk_indices]}, - attrs={'k': k}) - return topk_out, topk_indices - - -def lod_tensor_to_array(x, table, main_program=None): - """ - This function creates an operator to convert an LOD_Tensor to - an array. - """ - helper = LayerHelper("lod_tensor_to_array", **locals()) - array = helper.create_variable( - name=unique_name("lod_tensor_to_array"), - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=x.dtype) - helper.append_op( - type='lod_tensor_to_array', - inputs={'X': x, - 'RankTable': table}, - outputs={'Out': array}) - return array - - -def array_to_lod_tensor(x, table, main_program=None, startup_program=None): - """ - This function creates an operator to convert an array to a - LOD_Tensor. - """ - helper = LayerHelper("array_to_lod_tensor", **locals()) - tmp = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op( - type="array_to_lod_tensor", - inputs={'X': x, - 'RankTable': table}, - outputs={'Out': tmp}) - return tmp - - -def fill_constant(shape, - dtype, - value, - out=None, - main_program=None, - startup_program=None): - """ - This function creates a tensor , with shape as mentioned in the input and - specified dtype and fills this up with a constant value that - comes in the input. It also sets the stop_gradient to be True. - """ - helper = LayerHelper("fill_constant", **locals()) - if out is None: - out = helper.create_tmp_variable(dtype=dtype) - helper.append_op( - type='fill_constant', - inputs={}, - outputs={'Out': [out]}, - attrs={'shape': shape, - 'dtype': out.dtype, - 'value': float(value)}) - out.stop_gradient = True - return out - - -def fill_constant_batch_size_like(input, - shape, - dtype, - value, - input_dim_idx=0, - output_dim_idx=0, - main_program=None, - startup_program=None): - helper = LayerHelper("fill_constant_batch_size_like", **locals()) - out = helper.create_tmp_variable(dtype=dtype) - helper.append_op( - type='fill_constant_batch_size_like', - inputs={'Input': input}, - outputs={'Out': [out]}, - attrs={ - 'shape': shape, - 'dtype': out.dtype, - 'value': float(value), - 'input_dim_idx': input_dim_idx, - 'output_dim_idx': output_dim_idx - }) - out.stop_gradient = True - return out - - -def ones(shape, dtype, main_program=None): - """ - This function performs the same function as fill_constant() declared above - with the constant value being 1.0. - """ - return fill_constant(value=1.0, **locals()) - - -def zeros(shape, dtype, main_program=None): - """ - This function performs the same function as fill_constant() declared above - with the constant value being 0.0. - """ - return fill_constant(value=0.0, **locals()) - - -def increment(x, - value=1.0, - in_place=True, - main_program=None, - startup_program=None): - """ - This function creates an operator to increment each value in the input - `x` by an amount: `value` as mentioned in the input parameter. This - operation is performed in-place by default. - """ - helper = LayerHelper("increment", **locals()) - if not in_place: - out = helper.create_tmp_variable(dtype=x.dtype) - else: - out = x - helper.append_op( - type='increment', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={'step': float(value)}) - return out - - -def array_write(x, i, array=None, main_program=None, startup_program=None): - """ - This function creates an operator to write the data out as a - LOD_TENSOR_ARRAY. - """ - helper = LayerHelper('array_write', **locals()) - if array is None: - array = helper.create_variable( - name="{0}.out".format(helper.name), - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=x.dtype) - helper.append_op( - type='write_to_array', - inputs={'X': [x], - 'I': [i]}, - outputs={'Out': [array]}) - return array - - -def create_array(dtype, main_program=None): - helper = LayerHelper("array", **locals()) - return helper.create_variable( - name="{0}.out".format(helper.name), - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=dtype) - - -def less_than(x, y, cond=None, main_program=None, **ignored): - helper = LayerHelper("less_than", **locals()) - if cond is None: - cond = helper.create_tmp_variable(dtype='bool') - cond.stop_gradient = True - - helper.append_op( - type='less_than', inputs={'X': [x], - 'Y': [y]}, outputs={'Out': [cond]}) - return cond - - -def array_read(array, i, main_program=None, startup_program=None): - """ - This function creates an operator to read the data in as a - LOD_TENSOR_ARRAY. - """ - helper = LayerHelper('array_read', **locals()) - if not isinstance( - array, - Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: - raise TypeError("array should be tensor array vairable") - out = helper.create_tmp_variable(dtype=array.dtype) - helper.append_op( - type='read_from_array', - inputs={'X': [array], - 'I': [i]}, - outputs={'Out': [out]}) - return out - - -def shrink_memory(x, i, table, main_program=None, startup_program=None): - """ - This function creates an operator to shrink_rnn_memory using the RankTable - as mentioned in the input parameter. - """ - helper = LayerHelper('shrink_memory', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op( - type='shrink_rnn_memory', - inputs={'X': [x], - 'I': [i], - 'RankTable': [table]}, - outputs={'Out': [out]}, - attrs={}) - return out - - -def array_length(array, main_program=None): - """ - This function creates an operator to find the length of the - LOD_TENSOR_ARRAY. - """ - helper = LayerHelper('array_length', **locals()) - tmp = helper.create_tmp_variable(dtype='int64') - tmp.stop_gradient = True - helper.append_op( - type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}) - return tmp - - -def conv2d_transpose(input, - num_filters, - output_size=None, - filter_size=None, - padding=None, - stride=None, - param_attr=None, - main_program=None, - startup_program=None): - """ - The transpose of conv2d layer. - - This layer is also known as deconvolution layer. - - Args: - input(Variable): The input image with [N, C, H, W] format. - num_filters(int): The number of filter. It is as same as the output - image channel. - output_size(int|tuple|None): The output image size. If output size is a - tuple, it must contain two integers, (image_H, image_W). This - parameter only works when filter_size is None. - filter_size(int|tuple|None): The filter size. If filter_size is a tuple, - it must contain two integers, (filter_size_H, filter_size_W). - Otherwise, the filter will be a square. None if use output size to - calculate filter_size - padding(int|tuple): The padding size. If padding is a tuple, it must - contain two integers, (padding_H, padding_W). Otherwise, the - padding_H = padding_W = padding. - stride(int|tuple): The stride size. If stride is a tuple, it must - contain two integers, (stride_H, stride_W). Otherwise, the - stride_H = stride_W = stride. - param_attr: Parameter Attribute. - main_program(Program): the main program - startup_program(Program): the startup program - - Returns: - Variable: Output image. - """ - helper = LayerHelper("conv2d_transpose", **locals()) - if not isinstance(input, Variable): - raise TypeError("Input of conv2d_transpose must be Variable") - input_channel = input.shape[1] - - op_attr = dict() - - if isinstance(padding, int): - op_attr['paddings'] = [padding, padding] - elif padding is not None: - op_attr['paddings'] = padding - - if isinstance(stride, int): - op_attr['strides'] = stride - elif stride is not None: - op_attr['strides'] = stride - - if filter_size is None: - if output_size is None: - raise ValueError("output_size must be set when filter_size is None") - if isinstance(output_size, int): - output_size = [output_size, output_size] - - padding = op_attr.get('paddings', [0, 0]) - stride = op_attr.get('strides', [1, 1]) - - h_in = input.shape[2] - w_in = input.shape[3] - filter_size_h = output_size[0] - \ - (h_in - 1) * stride[0] + 2 * padding[0] - filter_size_w = output_size[1] - \ - (w_in - 1) * stride[1] + 2 * padding[1] - filter_size = [filter_size_h, filter_size_w] - elif isinstance(filter_size, int): - filter_size = [filter_size, filter_size] - - filter_shape = [input_channel, num_filters] + filter_size - img_filter = helper.create_parameter( - dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) - - out = helper.create_tmp_variable(dtype=input.dtype) - helper.append_op( - type='conv2d_transpose', - inputs={'Input': [input], - 'Filter': [img_filter]}, - outputs={'Output': out}, - attrs=op_attr) - - return out - - -class ConditionalBlockGuard(BlockGuard): - def __init__(self, block): - if not isinstance(block, ConditionalBlock): - raise TypeError("block should be conditional block") - super(ConditionalBlockGuard, self).__init__(block.helper.main_program) - self.block = block - - def __enter__(self): - return super(ConditionalBlockGuard, self).__enter__() - - def __exit__(self, exc_type, exc_val, exc_tb): - self.block.complete() - return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val, - exc_tb) - - -class ConditionalBlock(object): - def __init__(self, - inputs, - name=None, - main_program=None, - startup_program=None): - for each_input in inputs: - if not isinstance(each_input, Variable): - raise TypeError("Each input should be variable") - self.inputs = inputs - self.helper = LayerHelper( - 'conditional_block', - name=name, - main_program=main_program, - startup_program=startup_program) - - def block(self): - return ConditionalBlockGuard(self) - - def complete(self): - inside_block = self.helper.main_program.current_block() - parent_block = self.helper.main_program.block(inside_block.parent_idx) - - intermediate = set() - params = set() - - for each_op in inside_block.ops: - assert isinstance(each_op, Operator) - for iname in each_op.input_names: - for in_var_name in each_op.input(iname): - if in_var_name not in intermediate: - params.add(in_var_name) - - for oname in each_op.output_names: - for out_var_name in each_op.output(oname): - intermediate.add(out_var_name) - input_set = set([ipt.name for ipt in self.inputs]) - - param_list = [ - parent_block.var(each_name) for each_name in params - if each_name not in input_set - ] - - out_list = [ - parent_block.var(var_name) for var_name in parent_block.vars - if var_name not in intermediate - ] - - step_scope = parent_block.create_var( - type=core.VarDesc.VarType.STEP_SCOPES) - parent_block.append_op( - type='conditional_block', - inputs={ - 'X': self.inputs, - 'Params': param_list, - }, - outputs={'Out': out_list, - 'Scope': [step_scope]}, - attrs={'block': inside_block}) - - -class IfElseBlockGuard(object): - def __init__(self, is_true, ifelse): - if not isinstance(ifelse, IfElse): - raise TypeError("ifelse must be an instance of IfElse class") - - if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS: - raise ValueError("You cannot invoke IfElse.block() inside a block") - - self.is_true = is_true - self.ie = ifelse - if is_true: - self.cond_block = ifelse.conditional_true_block - else: - self.cond_block = ifelse.conditional_false_block - - if not isinstance(self.cond_block, ConditionalBlock): - raise TypeError("Unexpected situation") - - self.cond_block = self.cond_block.block() - - def __enter__(self): - self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS - self.cond_block.__enter__() - - def __exit__(self, exc_type, exc_val, exc_tb): - if not self.cond_block.__exit__(exc_type, exc_val, exc_tb): - # re-raise inside exception - return False - if len(self.ie.output_table[1 if self.is_true else 0]) == 0: - raise ValueError("Must set output inside block") - self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS - - -class IfElse(object): - OUT_IF_ELSE_BLOCKS = 0 - IN_IF_ELSE_TRUE_BLOCKS = 1 - IN_IF_ELSE_FALSE_BLOCKS = 2 - - def __init__(self, cond, name=None, main_program=None, - startup_program=None): - if not isinstance(cond, Variable): - raise TypeError("cond must be a Variable") - self.helper = LayerHelper( - 'ifelse', - name=name, - main_program=main_program, - startup_program=startup_program) - self.cond = cond - self.input_table = {} - self.status = IfElse.OUT_IF_ELSE_BLOCKS - self.conditional_true_block = ConditionalBlock(inputs=[self.cond]) - self.conditional_false_block = ConditionalBlock(inputs=[self.cond]) - self.output_table = ([], []) # (true_outs, false_outs) - - def input(self, x): - if self.status == IfElse.OUT_IF_ELSE_BLOCKS: - raise ValueError("input must in true/false blocks") - if id(x) not in self.input_table: - parent_block = self.parent_block() - out_true = parent_block.create_var( - name=unique_name('ifelse_input' + self.helper.name), - dtype=x.dtype) - - out_false = parent_block.create_var( - name=unique_name('ifelse_input' + self.helper.name), - dtype=x.dtype) - parent_block.append_op( - type='split_lod_tensor', - inputs={ - 'X': x, - 'Mask': self.cond, - }, - outputs={'OutTrue': out_true, - 'OutFalse': out_false}, - attrs={'level': 0}) - self.input_table[id(x)] = (out_true, out_false) - else: - out_true, out_false = self.input_table[id(x)] - - if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS: - return out_true - else: - return out_false - - def parent_block(self): - current_block = self.helper.main_program.current_block() - return self.helper.main_program.block(current_block.parent_idx) - - def true_block(self): - return IfElseBlockGuard(True, self) - - def false_block(self): - return IfElseBlockGuard(False, self) - - def output(self, *outs): - if self.status == self.OUT_IF_ELSE_BLOCKS: - raise ValueError("output can only be invoked in the sub-block") - - out_table = self.output_table[1 if self.status == - self.IN_IF_ELSE_TRUE_BLOCKS else 0] - parent_block = self.parent_block() - for each_out in outs: - if not isinstance(each_out, Variable): - raise TypeError("Each output should be a variable") - # create outside tensor - outside_out = parent_block.create_var( - name=unique_name("_".join([self.helper.name, 'output'])), - dtype=each_out.dtype) - out_table.append(outside_out) - - # assign local var to outside - assign( - input=each_out, - output=outside_out, - main_program=self.helper.main_program, - startup_program=self.helper.startup_program) - - def __call__(self): - if self.status != self.OUT_IF_ELSE_BLOCKS: - raise ValueError("IfElse::__call__ must be out of sub-block") - false_len, true_len = map(len, self.output_table) - if false_len == 0 and true_len == 0: - raise ValueError("Must invoke true_block/false_block before " - "__call__") - elif false_len != true_len and false_len != 0 and true_len != 0: - raise ValueError("The output side must be same") - elif false_len == 0 or true_len == 0: - return self.output_table[0 if false_len != 0 else 1] - - # else none of false_len/true_len is zero - # merge together - rlist = [] - for false_var, true_var in zip(*self.output_table): - rlist.append( - merge_lod_tensor( - in_true=true_var, - in_false=false_var, - mask=self.cond, - x=self.cond, - level=0, - main_program=self.helper.main_program, - startup_program=self.helper.startup_program)) - return rlist - - -class DynamicRNN(object): - BEFORE_RNN = 0 - IN_RNN = 1 - AFTER_RNN = 2 - - def __init__(self, name=None, main_program=None, startup_program=None): - self.helper = LayerHelper( - 'dynamic_rnn', - name=name, - main_program=main_program, - startup_program=startup_program) - self.status = DynamicRNN.BEFORE_RNN - self.lod_rank_table = None - self.max_seq_len = None - self.step_idx = None - self.zero_idx = fill_constant(shape=[1], value=0, dtype='int64') - self.mem_dict = dict() - self.output_array = [] - self.outputs = [] - self.cond = self.helper.create_tmp_variable(dtype='bool') - self.cond.stop_gradient = False - self.while_op = While(self.cond) - self.input_array = [] - self.mem_link = [] - - def step_input(self, x): - self._assert_in_rnn_block_("step_input") - if not isinstance(x, Variable): - raise TypeError( - "step_input() can only take a Variable as its input") - parent_block = self._parent_block_() - if self.lod_rank_table is None: - self.lod_rank_table = parent_block.create_var( - name=unique_name('lod_rank_table'), - type=core.VarDesc.VarType.LOD_RANK_TABLE) - self.lod_rank_table.stop_gradient = True - parent_block.append_op( - type='lod_rank_table', - inputs={"X": x}, - outputs={"Out": self.lod_rank_table}) - self.max_seq_len = parent_block.create_var( - name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64') - self.max_seq_len.stop_gradient = False - parent_block.append_op( - type='max_sequence_len', - inputs={'RankTable': self.lod_rank_table}, - outputs={"Out": self.max_seq_len}) - self.cond.stop_gradient = True - parent_block.append_op( - type='less_than', - inputs={'X': self.step_idx, - 'Y': self.max_seq_len}, - outputs={'Out': self.cond}) - - input_array = parent_block.create_var( - name=unique_name('dynamic_rnn_input_array'), - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=x.dtype) - self.input_array.append((input_array, x.dtype)) - parent_block.append_op( - type='lod_tensor_to_array', - inputs={'X': x, - 'RankTable': self.lod_rank_table}, - outputs={'Out': input_array}) - return array_read( - array=input_array, i=self.step_idx, **self.helper.to_kwargs) - - @contextlib.contextmanager - def block(self): - if self.status != DynamicRNN.BEFORE_RNN: - raise ValueError("rnn.block() can only be invoke once") - self.step_idx = fill_constant(shape=[1], dtype='int64', value=0) - self.step_idx.stop_gradient = False - self.status = DynamicRNN.IN_RNN - with self.while_op.block(): - yield - increment( - x=self.step_idx, - value=1.0, - in_place=True, - **self.helper.to_kwargs) - - for new_mem, mem_array in self.mem_link: - array_write( - x=new_mem, - i=self.step_idx, - array=mem_array, - **self.helper.to_kwargs) - - less_than( - x=self.step_idx, - y=self.max_seq_len, - cond=self.cond, - **self.helper.to_kwargs) - - self.status = DynamicRNN.AFTER_RNN - for each_array in self.output_array: - self.outputs.append( - array_to_lod_tensor( - x=each_array, - table=self.lod_rank_table, - **self.helper.to_kwargs)) - - def __call__(self, *args, **kwargs): - if self.status != DynamicRNN.AFTER_RNN: - raise ValueError( - "Dynamic RNN outputs can only be retrieved after rnn block") - if len(self.outputs) == 1: - return self.outputs[0] - else: - return self.outputs - - def memory(self, init=None, shape=None, value=0.0, dtype='float32'): - self._assert_in_rnn_block_('memory') - if init is not None: - if not isinstance(init, Variable): - raise TypeError( - "The input arg `init` of memory() must be a Variable") - parent_block = self._parent_block_() - mem_array = parent_block.create_var( - name=unique_name('dynamic_rnn_mem_array'), - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=init.dtype) - parent_block.append_op( - type='write_to_array', - inputs={'X': init, - 'I': self.zero_idx}, - outputs={'Out': mem_array}) - retv = array_read( - array=mem_array, i=self.step_idx, **self.helper.to_kwargs) - retv = shrink_memory( - x=retv, - i=self.step_idx, - table=self.lod_rank_table, - **self.helper.to_kwargs) - self.mem_dict[retv.name] = mem_array - return retv - else: - if len(self.input_array) == 0: - raise ValueError( - "step_input should be invoked before memory(shape=..., value=...)" - ) - parent_block = self._parent_block_() - init = parent_block.create_var( - name=unique_name('mem_init'), dtype=dtype) - arr, dtype = self.input_array[0] - in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype) - parent_block.append_op( - type='read_from_array', - inputs={'X': [arr], - 'I': [self.zero_idx]}, - outputs={'Out': [in0]}) - parent_block.append_op( - type='fill_constant_batch_size_like', - inputs={'Input': [in0]}, - outputs={'Out': [init]}, - attrs={ - 'shape': [-1] + shape, - 'value': float(value), - 'dtype': init.dtype - }) - return self.memory(init=init) - - def update_memory(self, ex_mem, new_mem): - self._assert_in_rnn_block_('update_memory') - if not isinstance(ex_mem, Variable): - raise TypeError("The input arg `ex_mem` of update_memory() must " - "be a Variable") - if not isinstance(new_mem, Variable): - raise TypeError("The input arg `new_mem` of update_memory() must " - "be a Variable") - - mem_array = self.mem_dict.get(ex_mem.name, None) - if mem_array is None: - raise ValueError("Please invoke memory before update_memory") - if self.lod_rank_table is None: - raise ValueError("Please invoke step_input before update_memory") - - self.mem_link.append((new_mem, mem_array)) - - def output(self, *outputs): - self._assert_in_rnn_block_('output') - parent_block = self._parent_block_() - for each in outputs: - outside_array = parent_block.create_var( - name=unique_name("_".join( - [self.helper.name, "output_array", each.name])), - type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, - dtype=each.dtype) - array_write(x=each, i=self.step_idx, array=outside_array) - self.output_array.append(outside_array) - - def _parent_block_(self): - prog = self.helper.main_program - parent_idx = prog.current_block().parent_idx - assert parent_idx >= 0 - parent_block = prog.block(parent_idx) - - return parent_block - - def _assert_in_rnn_block_(self, method): - if self.status != DynamicRNN.IN_RNN: - raise ValueError("{0} can only be invoked inside rnn block.".format( - method)) diff --git a/python/paddle/v2/fluid/layers/__init__.py b/python/paddle/v2/fluid/layers/__init__.py new file mode 100644 index 0000000000..249f570e13 --- /dev/null +++ b/python/paddle/v2/fluid/layers/__init__.py @@ -0,0 +1,17 @@ +import ops +from ops import * +import nn +from nn import * +import io +from io import * +import tensor +from tensor import * +import control_flow +from control_flow import * + +__all__ = [] +__all__ += nn.__all__ +__all__ += io.__all__ +__all__ += tensor.__all__ +__all__ += control_flow.__all__ +__all__ += ops.__all__ diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py new file mode 100644 index 0000000000..22a37c22c3 --- /dev/null +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -0,0 +1,1100 @@ +from ..layer_helper import LayerHelper, unique_name +from ..framework import Program, Variable, Operator +from .. import core +from tensor import assign, fill_constant +import contextlib +from ..registry import autodoc + +__all__ = [ + 'split_lod_tensor', 'merge_lod_tensor', 'BlockGuard', 'StaticRNNGuard', + 'StaticRNNMemoryLink', 'WhileGuard', 'While', 'lod_rank_table', + 'max_sequence_len', 'topk', 'lod_tensor_to_array', 'array_to_lod_tensor', + 'increment', 'array_write', 'create_array', 'less_than', 'array_read', + 'shrink_memory', 'array_length', 'IfElse', 'DynamicRNN', 'ConditionalBlock', + 'StaticRNN', 'reorder_lod_tensor_by_rank' +] + + +def split_lod_tensor(input, mask, level=0): + helper = LayerHelper('split_lod_tensor', **locals()) + out_true = helper.create_tmp_variable(dtype=input.dtype) + out_false = helper.create_tmp_variable(dtype=input.dtype) + helper.append_op( + type='split_lod_tensor', + inputs={ + 'X': input, + 'Mask': mask, + }, + outputs={'OutTrue': out_true, + 'OutFalse': out_false}, + attrs={'level': level}) + return out_true, out_false + + +def merge_lod_tensor(in_true, in_false, x, mask, level=0): + helper = LayerHelper('merge_lod_tensor', **locals()) + out = helper.create_tmp_variable(dtype=in_true.dtype) + helper.append_op( + type='merge_lod_tensor', + inputs={'X': x, + 'Mask': mask, + 'InTrue': in_true, + 'InFalse': in_false}, + outputs={'Out': out}, + attrs={'level': level}) + return out + + +class BlockGuard(object): + """ + BlockGuard class. + + BlockGuard class is used to create a sub-block in a program by + using the Python `with` keyword. + """ + + def __init__(self, main_program): + if not isinstance(main_program, Program): + raise TypeError("BlockGuard takes a program") + self.main_program = main_program + + def __enter__(self): + self.main_program.create_block() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.main_program.rollback() + if exc_type is not None: + return False # re-raise exception + return True + + +class StaticRNNGuard(BlockGuard): + """ + StaticRNNGuard class. + + StaticRNNGuard class is used to create a StaticRNN block in a program. + """ + + def __init__(self, rnn): + if not isinstance(rnn, StaticRNN): + raise TypeError("StaticRNNGuard takes a StaticRNN") + super(StaticRNNGuard, self).__init__(rnn.helper.main_program) + self.rnn = rnn + + def __enter__(self): + self.rnn.status = StaticRNN.IN_RNN_BLOCK + return super(StaticRNNGuard, self).__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + return False + self.rnn.status = StaticRNN.AFTER_RNN_BLOCK + self.rnn.complete_rnn_op() + return super(StaticRNNGuard, self).__exit__(exc_type, exc_val, exc_tb) + + +class StaticRNNMemoryLink(object): + """ + StaticRNNMemoryLink class. + + Args: + init: the initial variable for Memory + init: Variable + pre_mem: the memory variable in previous time step + pre_mem: Variable + mem: the memory variable in current time step + mem: Variable + + StaticRNNMemoryLink class is used to create a link between two + memory cells of a StaticRNN. + """ + + def __init__(self, init, pre_mem, mem=None): + self.init = init + self.pre_mem = pre_mem + self.mem = mem + + +class StaticRNN(object): + """ + StaticRNN class. + + StaticRNN class is used to create a StaticRNN. The RNN will have its + own parameters like inputs, outputs, memories, status and length. + """ + BEFORE_RNN_BLOCK = 0 + IN_RNN_BLOCK = 1 + AFTER_RNN_BLOCK = 2 + + def __init__(self, name=None): + self.helper = LayerHelper("static_rnn", name=name) + self.memories = {} # memory map, from pre_mem.name --> MemoryLink + self.inputs = [] # input variable list in current block + self.outputs = [] # output variable list in parent block + self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag. + # sequence length, since it is a static RNN, sequence length are fixed. + self.seq_len = None + + def step(self): + return StaticRNNGuard(self) + + def _assert_in_rnn_block_(self, method): + if self.status != StaticRNN.IN_RNN_BLOCK: + raise ValueError("You must invoke {0} in rnn block".format(method)) + + def memory(self, + init=None, + shape=None, + batch_ref=None, + init_value=0.0, + init_batch_dim_idx=0, + ref_batch_dim_idx=1): + """ + Args: + init: boot memory, if not set, a shape, batch_ref must be provided + shape: shape of the boot memory + batch_ref: batch size reference variable + init_value: the init value of boot memory + init_batch_dim_idx: the index of batch size in init's dimension + ref_batch_dim_idx: the index of batch size in batch_ref's dimension + """ + self._assert_in_rnn_block_('memory') + if init is None: + if shape is None or batch_ref is None: + raise ValueError( + "if init is None, memory at least need shape and batch_ref") + parent_block = self.parent_block() + var_name = unique_name("@".join([self.helper.name, "memory_boot"])) + boot_var = parent_block.create_var( + name=var_name, + shape=shape, + dtype=batch_ref.dtype, + persistable=False) + + parent_block.append_op( + type="fill_constant_batch_size_like", + inputs={'Input': [batch_ref]}, + outputs={'Out': [boot_var]}, + attrs={ + 'value': init_value, + 'shape': boot_var.shape, + 'dtype': boot_var.dtype, + 'input_dim_idx': ref_batch_dim_idx, + 'output_dim_idx': init_batch_dim_idx + }) + + return self.memory(init=boot_var) + else: + pre_mem = self.helper.create_variable( + name=unique_name("@".join([self.helper.name, "mem"])), + dtype=init.dtype, + shape=init.shape) + self.memories[pre_mem.name] = StaticRNNMemoryLink( + init=init, pre_mem=pre_mem) + return pre_mem + + def step_input(self, x): + self._assert_in_rnn_block_('step_input') + if not isinstance(x, Variable): + raise TypeError("step input takes a Variable") + if self.seq_len is None: + self.seq_len = x.shape[0] + elif self.seq_len != x.shape[0]: + raise ValueError("Static RNN only take fix seq_len input") + + ipt = self.helper.create_variable( + name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type) + self.inputs.append(ipt) + return ipt + + def step_output(self, o): + self._assert_in_rnn_block_('step_output') + if not isinstance(o, Variable): + raise TypeError("step output takes a Variable") + + tmp_o = self.helper.create_tmp_variable(dtype=o.dtype) + self.helper.append_op( + type='rnn_memory_helper', + inputs={'X': [o]}, + outputs={'Out': tmp_o}, + attrs={'dtype': o.dtype}) + + out_var = self.parent_block().create_var( + name=tmp_o.name, + shape=[self.seq_len] + list(tmp_o.shape), + dtype=tmp_o.dtype) + + self.outputs.append(out_var) + + def output(self, *outputs): + for each in outputs: + self.step_output(each) + + def update_memory(self, mem, var): + if not isinstance(mem, Variable) or not isinstance(var, Variable): + raise TypeError("update memory should take variables") + self.memories[mem.name].mem = var + + def parent_block(self): + prog = self.helper.main_program + parent_idx = prog.current_block().parent_idx + assert parent_idx >= 0 + parent_block = prog.block(parent_idx) + return parent_block + + def __call__(self, *args, **kwargs): + if self.status != StaticRNN.AFTER_RNN_BLOCK: + raise ValueError("RNN output can only be retrieved after rnn block") + if len(self.outputs) == 0: + raise ValueError("RNN has no output") + elif len(self.outputs) == 1: + return self.outputs[0] + else: + return self.outputs + + def complete_rnn_op(self): + main_program = self.helper.main_program + rnn_block = main_program.current_block() + parent_block = self.parent_block() + + local_inputs = set() + + for op in rnn_block.ops: + assert isinstance(op, Operator) + for oname in op.output_names: + for out_var_name in op.output(oname): + local_inputs.add(out_var_name) + + for var in self.inputs: + local_inputs.add(var.name) + for m in self.memories: + local_inputs.add(m) + + params = list() + for op in rnn_block.ops: + assert isinstance(op, Operator) + for iname in op.input_names: + for in_var_name in op.input(iname): + if in_var_name not in local_inputs: + params.append(in_var_name) + + parameters = [parent_block.var(name) for name in params] + + step_scope = parent_block.create_var( + type=core.VarDesc.VarType.STEP_SCOPES) + + inlinks = [parent_block.var(i.name) for i in self.inputs] + outlinks = self.outputs + + boot_memories = [] + pre_memories = [] + memories = [] + for _, mem in self.memories.iteritems(): + boot_memories.append(mem.init) + pre_memories.append(mem.pre_mem.name) + mem_var = rnn_block.var(mem.mem.name) + assert isinstance(mem_var, Variable) + new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype) + + rnn_block.append_op( + type='rnn_memory_helper', + inputs={'X': [mem_var]}, + outputs={'Out': [new_mem]}, + attrs={'dtype': mem_var.dtype}) + + memories.append(new_mem.name) + + parent_block.append_op( + type='recurrent', + inputs={ + 'inputs': inlinks, + 'initial_states': boot_memories, + 'parameters': parameters + }, + outputs={'outputs': outlinks, + 'step_scopes': [step_scope]}, + attrs={ + 'ex_states': pre_memories, + 'states': memories, + 'sub_block': rnn_block + }) + + +class WhileGuard(BlockGuard): + def __init__(self, while_op): + if not isinstance(while_op, While): + raise TypeError("WhileGuard takes a while op") + super(WhileGuard, self).__init__(while_op.helper.main_program) + self.while_op = while_op + + def __enter__(self): + self.while_op.status = While.IN_WHILE_BLOCK + return super(WhileGuard, self).__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + return False + self.while_op.status = While.AFTER_WHILE_BLOCK + self.while_op.complete() + return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb) + + +class While(object): + BEFORE_WHILE_BLOCK = 0 + IN_WHILE_BLOCK = 1 + AFTER_WHILE_BLOCK = 2 + + def __init__(self, cond, name=None): + self.helper = LayerHelper("while", name=name) + self.status = While.BEFORE_WHILE_BLOCK + if not isinstance(cond, Variable): + raise TypeError("condition should be a variable") + assert isinstance(cond, Variable) + if cond.dtype != core.DataType.BOOL: + raise TypeError("condition should be a bool variable") + if reduce(lambda a, b: a * b, cond.shape, 1) != 1: + raise TypeError("condition should be a bool scalar") + self.cond_var = cond + + def block(self): + return WhileGuard(self) + + def complete(self): + main_program = self.helper.main_program + while_block = main_program.current_block() + parent_block = main_program.block(main_program.current_block() + .parent_idx) + + inner_outputs = {self.cond_var.name} + x_name_list = set() + for op in while_block.ops: + for iname in op.input_names: + for in_var_name in op.input(iname): + if in_var_name not in inner_outputs: + x_name_list.add(in_var_name) + + for oname in op.output_names: + for out_var_name in op.output(oname): + inner_outputs.add(out_var_name) + + out_vars = [] + for inner_out_name in inner_outputs: + if inner_out_name in parent_block.vars: + out_vars.append(parent_block.var(inner_out_name)) + + step_scope = parent_block.create_var( + type=core.VarDesc.VarType.STEP_SCOPES) + + parent_block.append_op( + type='while', + inputs={ + 'X': [parent_block.var(x_name) for x_name in x_name_list], + 'Condition': [self.cond_var] + }, + outputs={'Out': out_vars, + 'StepScopes': [step_scope]}, + attrs={'sub_block': while_block}) + + +def lod_rank_table(x, level=0): + """ + This function creates an operator for creating a LOD_RANK_TABLE + using the input x. + """ + helper = LayerHelper("lod_rank_table", **locals()) + table = helper.create_variable( + type=core.VarDesc.VarType.LOD_RANK_TABLE, + name=unique_name("lod_rank_table")) + helper.append_op( + type='lod_rank_table', + inputs={'X': x}, + outputs={'Out': table}, + attrs={'level': level}) + return table + + +def max_sequence_len(rank_table): + """ + This function creates an operator to calculate the length of + max seqence through input rank_table(should be a lod_rank_table) + """ + helper = LayerHelper("max_seqence_len", **locals()) + res = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="max_sequence_len", + inputs={"RankTable": rank_table}, + outputs={"Out": res}) + return res + + +def topk(input, k): + helper = LayerHelper('topk', **locals()) + topk_out = helper.create_tmp_variable(dtype=input.data_type) + topk_indices = helper.create_tmp_variable(dtype='int64') + helper.append_op( + type='top_k', + inputs={'X': [input]}, + outputs={'Out': [topk_out], + 'Indices': [topk_indices]}, + attrs={'k': k}) + return topk_out, topk_indices + + +def lod_tensor_to_array(x, table): + """This function performs the operation that converts an LOD_Tensor to + an array. + + Args: + x (Variable|list): The tensor that needs to be converted to an array. + table (ParamAttr|list): The variable that stores the level of lod + which is ordered by sequence length in + descending order. + + Returns: + Variable: The variable of type array that has been converted from a + tensor. + + Examples: + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[10]) + table = fluid.layers.lod_rank_table(x, level=0) + array = fluid.layers.lod_tensor_to_array(x, table) + """ + helper = LayerHelper("lod_tensor_to_array", **locals()) + array = helper.create_variable( + name=unique_name("lod_tensor_to_array"), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=x.dtype) + helper.append_op( + type='lod_tensor_to_array', + inputs={'X': x, + 'RankTable': table}, + outputs={'Out': array}) + return array + + +def array_to_lod_tensor(x, table): + """This function performs the operations that converts an array to + an LOD_Tensor. + + Args: + x (Variable|list): The array that needs to be converted to a tensor. + table (ParamAttr|list): The variable that stores the level of lod + which is ordered by sequence length in + descending order. + + Returns: + Variable: The variable of type tensor that has been converted + from an array. + + Examples: + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[10]) + table = fluid.layers.lod_rank_table(x, level=0) + array = fluid.layers.lod_tensor_to_array(x, table) + lod_tensor = fluid.layers.array_to_lod_tensor(array, table) + """ + helper = LayerHelper("array_to_lod_tensor", **locals()) + tmp = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type="array_to_lod_tensor", + inputs={'X': x, + 'RankTable': table}, + outputs={'Out': tmp}) + return tmp + + +def increment(x, value=1.0, in_place=True): + """This function performs an operation that increments each value in the + input :math:`x` by an amount: :math:`value` as mentioned in the input + parameter. This operation is performed in-place by default. + + Args: + x (Variable|list): The tensor that has the input values. + value (float): The amount by which the values should be incremented. + in_place (bool): If the increment should be performed in-place. + + Returns: + Variable: The tensor variable storing the transformation of + element-wise increment of each value in the input. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='data', shape=[32, 32], dtype='float32') + data = fluid.layers.increment(x=data, value=3.0, in_place=True) + """ + helper = LayerHelper("increment", **locals()) + if not in_place: + out = helper.create_tmp_variable(dtype=x.dtype) + else: + out = x + helper.append_op( + type='increment', + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={'step': float(value)}) + return out + + +def array_write(x, i, array=None): + """This function performs the operation to write the data out as an + LOD_TENSOR_ARRAY. + + Args: + x (Variable|list): The input tensor from which the data will be read. + i (Variable|list): The subscript index in tensor array, that points the + place from which data will be read. + array (Variable|list): The data can be read into this variable if + this is assigned. + Returns: + Variable: The tensor type variable that has the data written to it. + + Examples: + .. code-block::python + + tmp = fluid.layers.zeros(shape=[10], dtype='int32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + arr = layers.array_write(tmp, i=i) + """ + helper = LayerHelper('array_write', **locals()) + if array is None: + array = helper.create_variable( + name="{0}.out".format(helper.name), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=x.dtype) + helper.append_op( + type='write_to_array', + inputs={'X': [x], + 'I': [i]}, + outputs={'Out': [array]}) + return array + + +def create_array(dtype): + """This function creates an array of type :math:`LOD_TENSOR_ARRAY` using the + LayerHelper. + + Args: + dtype (int|float): The data type of the elements in the array. + + Returns: + Variable: The tensor variable storing the elements of data type. + + Examples: + .. code-block:: python + + data = fluid.layers.create_array(dtype='float32') + + """ + helper = LayerHelper("array", **locals()) + return helper.create_variable( + name="{0}.out".format(helper.name), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=dtype) + + +def less_than(x, y, cond=None, **ignored): + """ + **Less than** + + This layer returns the truth value of :math:`x < y` elementwise. + + Args: + x(Variable): First operand of *less_than* + y(Variable): Second operand of *less_than* + cond(Variable|None): Optional output variable to store the result of *less_than* + + Returns: + Variable: The tensor variable storing the output of *less_than*. + + Examples: + .. code-block:: python + + less = fluid.layers.less_than(x=label, y=limit) + """ + helper = LayerHelper("less_than", **locals()) + if cond is None: + cond = helper.create_tmp_variable(dtype='bool') + cond.stop_gradient = True + + helper.append_op( + type='less_than', inputs={'X': [x], + 'Y': [y]}, outputs={'Out': [cond]}) + return cond + + +def array_read(array, i): + """This function performs the operation to read the data in as an + LOD_TENSOR_ARRAY. + Args: + array (Variable|list): The input tensor that will be written to an array. + i (Variable|list): The subscript index in tensor array, that points the + place where data will be written to. + Returns: + Variable: The tensor type variable that has the data written to it. + Examples: + .. code-block::python + tmp = fluid.layers.zeros(shape=[10], dtype='int32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + arr = layers.array_read(tmp, i=i) + """ + helper = LayerHelper('array_read', **locals()) + if not isinstance( + array, + Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: + raise TypeError("array should be tensor array vairable") + out = helper.create_tmp_variable(dtype=array.dtype) + helper.append_op( + type='read_from_array', + inputs={'X': [array], + 'I': [i]}, + outputs={'Out': [out]}) + return out + + +def shrink_memory(x, i, table): + """ + This function creates an operator to shrink_rnn_memory using the RankTable + as mentioned in the input parameter. + """ + helper = LayerHelper('shrink_memory', **locals()) + out = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type='shrink_rnn_memory', + inputs={'X': [x], + 'I': [i], + 'RankTable': [table]}, + outputs={'Out': [out]}, + attrs={}) + return out + + +def array_length(array): + """This function performs the operation to find the length of the input + LOD_TENSOR_ARRAY. + + Args: + array (LOD_TENSOR_ARRAY): The input array that will be used + to compute the length. + + Returns: + Variable: The length of the input LoDTensorArray. + + Examples: + .. code-block::python + + tmp = fluid.layers.zeros(shape=[10], dtype='int32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + arr = fluid.layers.array_write(tmp, i=i) + arr_len = fluid.layers.array_length(arr) + """ + helper = LayerHelper('array_length', **locals()) + tmp = helper.create_tmp_variable(dtype='int64') + tmp.stop_gradient = True + helper.append_op( + type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}) + return tmp + + +class ConditionalBlockGuard(BlockGuard): + def __init__(self, block): + if not isinstance(block, ConditionalBlock): + raise TypeError("block should be conditional block") + super(ConditionalBlockGuard, self).__init__(block.helper.main_program) + self.block = block + + def __enter__(self): + return super(ConditionalBlockGuard, self).__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + self.block.complete() + return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val, + exc_tb) + + +class ConditionalBlock(object): + def __init__(self, inputs, name=None): + for each_input in inputs: + if not isinstance(each_input, Variable): + raise TypeError("Each input should be variable") + self.inputs = inputs + self.helper = LayerHelper('conditional_block', name=name) + + def block(self): + return ConditionalBlockGuard(self) + + def complete(self): + inside_block = self.helper.main_program.current_block() + parent_block = self.helper.main_program.block(inside_block.parent_idx) + + intermediate = set() + params = set() + + for each_op in inside_block.ops: + assert isinstance(each_op, Operator) + for iname in each_op.input_names: + for in_var_name in each_op.input(iname): + if in_var_name not in intermediate: + params.add(in_var_name) + + for oname in each_op.output_names: + for out_var_name in each_op.output(oname): + intermediate.add(out_var_name) + input_set = set([ipt.name for ipt in self.inputs]) + + param_list = [ + parent_block.var(each_name) for each_name in params + if each_name not in input_set + ] + + out_list = [ + parent_block.var(var_name) for var_name in parent_block.vars + if var_name not in intermediate + ] + + step_scope = parent_block.create_var( + type=core.VarDesc.VarType.STEP_SCOPES) + parent_block.append_op( + type='conditional_block', + inputs={ + 'X': self.inputs, + 'Params': param_list, + }, + outputs={'Out': out_list, + 'Scope': [step_scope]}, + attrs={'sub_block': inside_block}) + + +class IfElseBlockGuard(object): + def __init__(self, is_true, ifelse): + if not isinstance(ifelse, IfElse): + raise TypeError("ifelse must be an instance of IfElse class") + + if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS: + raise ValueError("You cannot invoke IfElse.block() inside a block") + + self.is_true = is_true + self.ie = ifelse + if is_true: + self.cond_block = ifelse.conditional_true_block + else: + self.cond_block = ifelse.conditional_false_block + + if not isinstance(self.cond_block, ConditionalBlock): + raise TypeError("Unexpected situation") + + self.cond_block = self.cond_block.block() + + def __enter__(self): + self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS + self.cond_block.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.cond_block.__exit__(exc_type, exc_val, exc_tb): + # re-raise inside exception + return False + if len(self.ie.output_table[1 if self.is_true else 0]) == 0: + raise ValueError("Must set output inside block") + self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS + + +class IfElse(object): + OUT_IF_ELSE_BLOCKS = 0 + IN_IF_ELSE_TRUE_BLOCKS = 1 + IN_IF_ELSE_FALSE_BLOCKS = 2 + + def __init__(self, cond, name=None): + if not isinstance(cond, Variable): + raise TypeError("cond must be a Variable") + self.helper = LayerHelper('ifelse', name=name) + self.cond = cond + self.input_table = {} + self.status = IfElse.OUT_IF_ELSE_BLOCKS + self.conditional_true_block = ConditionalBlock(inputs=[self.cond]) + self.conditional_false_block = ConditionalBlock(inputs=[self.cond]) + self.output_table = ([], []) # (true_outs, false_outs) + + def input(self, x): + if self.status == IfElse.OUT_IF_ELSE_BLOCKS: + raise ValueError("input must in true/false blocks") + if id(x) not in self.input_table: + parent_block = self.parent_block() + out_true = parent_block.create_var( + name=unique_name('ifelse_input' + self.helper.name), + dtype=x.dtype) + + out_false = parent_block.create_var( + name=unique_name('ifelse_input' + self.helper.name), + dtype=x.dtype) + parent_block.append_op( + type='split_lod_tensor', + inputs={ + 'X': x, + 'Mask': self.cond, + }, + outputs={'OutTrue': out_true, + 'OutFalse': out_false}, + attrs={'level': 0}) + self.input_table[id(x)] = (out_true, out_false) + else: + out_true, out_false = self.input_table[id(x)] + + if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS: + return out_true + else: + return out_false + + def parent_block(self): + current_block = self.helper.main_program.current_block() + return self.helper.main_program.block(current_block.parent_idx) + + def true_block(self): + return IfElseBlockGuard(True, self) + + def false_block(self): + return IfElseBlockGuard(False, self) + + def output(self, *outs): + if self.status == self.OUT_IF_ELSE_BLOCKS: + raise ValueError("output can only be invoked in the sub-block") + + out_table = self.output_table[1 if self.status == + self.IN_IF_ELSE_TRUE_BLOCKS else 0] + parent_block = self.parent_block() + for each_out in outs: + if not isinstance(each_out, Variable): + raise TypeError("Each output should be a variable") + # create outside tensor + outside_out = parent_block.create_var( + name=unique_name("_".join([self.helper.name, 'output'])), + dtype=each_out.dtype) + out_table.append(outside_out) + + # assign local var to outside + assign(input=each_out, output=outside_out) + + def __call__(self): + if self.status != self.OUT_IF_ELSE_BLOCKS: + raise ValueError("IfElse::__call__ must be out of sub-block") + false_len, true_len = map(len, self.output_table) + if false_len == 0 and true_len == 0: + raise ValueError("Must invoke true_block/false_block before " + "__call__") + elif false_len != true_len and false_len != 0 and true_len != 0: + raise ValueError("The output side must be same") + elif false_len == 0 or true_len == 0: + return self.output_table[0 if false_len != 0 else 1] + + # else none of false_len/true_len is zero + # merge together + rlist = [] + for false_var, true_var in zip(*self.output_table): + rlist.append( + merge_lod_tensor( + in_true=true_var, + in_false=false_var, + mask=self.cond, + x=self.cond, + level=0)) + return rlist + + +class DynamicRNN(object): + BEFORE_RNN = 0 + IN_RNN = 1 + AFTER_RNN = 2 + + def __init__(self, name=None): + self.helper = LayerHelper('dynamic_rnn', name=name) + self.status = DynamicRNN.BEFORE_RNN + self.lod_rank_table = None + self.max_seq_len = None + self.step_idx = None + self.zero_idx = fill_constant(shape=[1], value=0, dtype='int64') + self.mem_dict = dict() + self.output_array = [] + self.outputs = [] + self.cond = self.helper.create_tmp_variable(dtype='bool') + self.cond.stop_gradient = False + self.while_op = While(self.cond) + self.input_array = [] + self.mem_link = [] + + def step_input(self, x): + self._assert_in_rnn_block_("step_input") + if not isinstance(x, Variable): + raise TypeError( + "step_input() can only take a Variable as its input") + parent_block = self._parent_block_() + if self.lod_rank_table is None: + self.lod_rank_table = parent_block.create_var( + name=unique_name('lod_rank_table'), + type=core.VarDesc.VarType.LOD_RANK_TABLE) + self.lod_rank_table.stop_gradient = True + parent_block.append_op( + type='lod_rank_table', + inputs={"X": x}, + outputs={"Out": self.lod_rank_table}) + self.max_seq_len = parent_block.create_var( + name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64') + self.max_seq_len.stop_gradient = False + parent_block.append_op( + type='max_sequence_len', + inputs={'RankTable': self.lod_rank_table}, + outputs={"Out": self.max_seq_len}) + self.cond.stop_gradient = True + parent_block.append_op( + type='less_than', + inputs={'X': self.step_idx, + 'Y': self.max_seq_len}, + outputs={'Out': self.cond}) + + input_array = parent_block.create_var( + name=unique_name('dynamic_rnn_input_array'), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=x.dtype) + self.input_array.append((input_array, x.dtype)) + parent_block.append_op( + type='lod_tensor_to_array', + inputs={'X': x, + 'RankTable': self.lod_rank_table}, + outputs={'Out': input_array}) + return array_read(array=input_array, i=self.step_idx) + + @contextlib.contextmanager + def block(self): + if self.status != DynamicRNN.BEFORE_RNN: + raise ValueError("rnn.block() can only be invoke once") + self.step_idx = fill_constant(shape=[1], dtype='int64', value=0) + self.step_idx.stop_gradient = False + self.status = DynamicRNN.IN_RNN + with self.while_op.block(): + yield + increment(x=self.step_idx, value=1.0, in_place=True) + + for new_mem, mem_array in self.mem_link: + array_write(x=new_mem, i=self.step_idx, array=mem_array) + + less_than(x=self.step_idx, y=self.max_seq_len, cond=self.cond) + + self.status = DynamicRNN.AFTER_RNN + for each_array in self.output_array: + self.outputs.append( + array_to_lod_tensor( + x=each_array, table=self.lod_rank_table)) + + def __call__(self, *args, **kwargs): + if self.status != DynamicRNN.AFTER_RNN: + raise ValueError( + "Dynamic RNN outputs can only be retrieved after rnn block") + if len(self.outputs) == 1: + return self.outputs[0] + else: + return self.outputs + + def memory(self, init=None, shape=None, value=0.0, dtype='float32'): + self._assert_in_rnn_block_('memory') + if init is not None: + if not isinstance(init, Variable): + raise TypeError( + "The input arg `init` of memory() must be a Variable") + parent_block = self._parent_block_() + mem_array = parent_block.create_var( + name=unique_name('dynamic_rnn_mem_array'), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=init.dtype) + parent_block.append_op( + type='write_to_array', + inputs={'X': init, + 'I': self.zero_idx}, + outputs={'Out': mem_array}) + retv = array_read(array=mem_array, i=self.step_idx) + retv = shrink_memory( + x=retv, i=self.step_idx, table=self.lod_rank_table) + self.mem_dict[retv.name] = mem_array + return retv + else: + if len(self.input_array) == 0: + raise ValueError( + "step_input should be invoked before memory(shape=..., value=...)" + ) + parent_block = self._parent_block_() + init = parent_block.create_var( + name=unique_name('mem_init'), dtype=dtype) + arr, dtype = self.input_array[0] + in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype) + parent_block.append_op( + type='read_from_array', + inputs={'X': [arr], + 'I': [self.zero_idx]}, + outputs={'Out': [in0]}) + parent_block.append_op( + type='fill_constant_batch_size_like', + inputs={'Input': [in0]}, + outputs={'Out': [init]}, + attrs={ + 'shape': [-1] + shape, + 'value': float(value), + 'dtype': init.dtype + }) + return self.memory(init=init) + + def update_memory(self, ex_mem, new_mem): + self._assert_in_rnn_block_('update_memory') + if not isinstance(ex_mem, Variable): + raise TypeError("The input arg `ex_mem` of update_memory() must " + "be a Variable") + if not isinstance(new_mem, Variable): + raise TypeError("The input arg `new_mem` of update_memory() must " + "be a Variable") + + mem_array = self.mem_dict.get(ex_mem.name, None) + if mem_array is None: + raise ValueError("Please invoke memory before update_memory") + if self.lod_rank_table is None: + raise ValueError("Please invoke step_input before update_memory") + + self.mem_link.append((new_mem, mem_array)) + + def output(self, *outputs): + self._assert_in_rnn_block_('output') + parent_block = self._parent_block_() + for each in outputs: + outside_array = parent_block.create_var( + name=unique_name("_".join( + [self.helper.name, "output_array", each.name])), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=each.dtype) + array_write(x=each, i=self.step_idx, array=outside_array) + self.output_array.append(outside_array) + + def _parent_block_(self): + prog = self.helper.main_program + parent_idx = prog.current_block().parent_idx + assert parent_idx >= 0 + parent_block = prog.block(parent_idx) + + return parent_block + + def _assert_in_rnn_block_(self, method): + if self.status != DynamicRNN.IN_RNN: + raise ValueError("{0} can only be invoked inside rnn block.".format( + method)) + + +@autodoc +def reorder_lod_tensor_by_rank(x, rank_table): + helper = LayerHelper('reorder_lod_tensor_by_rank', **locals()) + helper.is_instance('x', Variable) + helper.is_instance('rank_table', Variable) + + out = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type='reorder_lod_tensor_by_rank', + inputs={'X': [x], + 'RankTable': [rank_table]}, + outputs={'Out': [out]}) + return out diff --git a/python/paddle/v2/fluid/layers/io.py b/python/paddle/v2/fluid/layers/io.py new file mode 100644 index 0000000000..56c3f7b7b7 --- /dev/null +++ b/python/paddle/v2/fluid/layers/io.py @@ -0,0 +1,62 @@ +from .. import core +from ..layer_helper import LayerHelper + +__all__ = ['data'] + + +def data(name, + shape, + append_batch_size=True, + dtype='float32', + lod_level=0, + type=core.VarDesc.VarType.LOD_TENSOR, + stop_gradient=True): + """ + **Data Layer** + + This function takes in the input and based on whether data has + to be returned back as a minibatch, it creates the global variable using + the helper functions. The global variables can be accessed by all the + following operations and layers in the graph. + + All the input variables of this function are passed in as local variables + to the LayerHelper constructor. + + Args: + name(str): The name/alias of the function + shape(list): Tuple declaring the shape. + append_batch_size(bool): Whether or not to append the data as a batch. + dtype(int|float): The type of data : float32, float_16, int etc + type(VarType): The output type. By default it is LOD_TENSOR. + lod_level(int): The LoD Level. 0 means the input data is not a sequence. + main_program(Program): Name of the main program that calls this + startup_program(Program): Name of the startup program + stop_gradient(bool): A boolean that mentions whether gradient should flow. + + Returns: + Variable: The global variable that gives access to the data. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='x', shape=[784], dtype='float32') + """ + helper = LayerHelper('data', **locals()) + shape = list(shape) + for i in xrange(len(shape)): + if shape[i] is None: + shape[i] = -1 + append_batch_size = False + elif shape[i] < 0: + append_batch_size = False + + if append_batch_size: + shape = [-1] + shape # append batch size as -1 + + return helper.create_global_variable( + name=name, + shape=shape, + dtype=dtype, + type=type, + stop_gradient=stop_gradient, + lod_level=lod_level) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py new file mode 100644 index 0000000000..2a462ee6cb --- /dev/null +++ b/python/paddle/v2/fluid/layers/nn.py @@ -0,0 +1,1292 @@ +""" +All layers just related to the neural network. +""" + +from ..layer_helper import LayerHelper +from ..initializer import Normal, Constant +from ..framework import Variable +from ..param_attr import ParamAttr +from tensor import concat + +__all__ = [ + 'fc', 'embedding', 'dynamic_lstm', 'gru_unit', 'linear_chain_crf', + 'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy', + 'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d', + 'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', + 'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', + 'sequence_first_step', 'sequence_last_step' +] + + +def fc(input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + name=None): + """ + **Fully Connected Layer** + + The fully connected layer can take multiple tensors as its inputs. It + creates a variable (one for each input tensor) called weights for each input + tensor, which represents a fully connected weight matrix from each input + unit to each output unit. The fully connected layer multiplies each input + tensor with its coresponding weight to produce an output Tensor. If + multiple input tensors are given, the results of multiple multiplications + will be sumed up. If bias_attr is not None, a biases variable will be + created and added to the output. Finally, if activation is not None, + it will be applied to the output as well. + + This process can be formulated as follows: + + .. math:: + + Out = Act({\sum_{i=0}^{N-1}W_iX_i + b}) + + In the above equation: + + * :math:`N`: Number of the input. + * :math:`X_i`: The input tensor. + * :math:`W`: The weights created by this layer. + * :math:`b`: The bias parameter created by this layer (if needed). + * :math:`Act`: The activation funtion. + * :math:`Out`: The output tensor. + + Args: + input(Variable|list): The input tensor(s) to the fully connected layer. + size(int): The number of output units in the fully connected layer. + num_flatten_dims(int): The fc layer can accept an input tensor with more + than two dimensions. If this happens, the + multidimensional tensor will first be flattened + into a 2-dimensional matrix. The parameter + `num_flatten_dims` determines how the input tensor + is flattened: the first `num_flatten_dims` + dimensions will be flatten to form the first + dimension of the final matrix (height of the + matrix), and the rest `rank(X) - num_col_dims` + dimensions are flattened to form the second + dimension of the final matrix (width of the matrix). + For example, suppose `X` is a 6-dimensional tensor + with a shape [2, 3, 4, 5, 6], and + `x_num_col_dims` = 3. Then, the flattened matrix + will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. + By default, `x_num_col_dims` is set to 1. + param_attr(ParamAttr|list): The parameter attribute for learnable + parameters/weights of the fully connected + layer. + param_initializer(ParamAttr|list): The initializer used for the + weight/parameter. If set None, + XavierInitializer() will be used. + bias_attr(ParamAttr|list): The parameter attribute for the bias parameter + for this layer. If set None, no bias will be + added to the output units. + bias_initializer(ParamAttr|list): The initializer used for the bias. + If set None, then ConstantInitializer() + will be used. + act(str): Activation to be applied to the output of the fully connected + layer. + name(str): Name/alias of the fully connected layer. + + + Returns: + Variable: The output tensor variable. + + Raises: + ValueError: If rank of the input tensor is less than 2. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + """ + + helper = LayerHelper("fc", **locals()) + + dtype = helper.input_dtype() + + mul_results = [] + for input_var, param_attr in helper.iter_inputs_and_params(): + input_shape = input_var.shape + param_shape = [ + reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1) + ] + [size] + w = helper.create_parameter( + attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) + tmp = helper.create_tmp_variable(dtype) + helper.append_op( + type="mul", + inputs={ + "X": input_var, + "Y": w, + }, + outputs={"Out": tmp}, + attrs={"x_num_col_dims": num_flatten_dims, + "y_num_col_dims": 1}) + mul_results.append(tmp) + + # sum + if len(mul_results) == 1: + pre_bias = mul_results[0] + else: + pre_bias = helper.create_tmp_variable(dtype) + helper.append_op( + type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}) + # add bias + pre_activation = helper.append_bias_op(pre_bias) + # add activation + return helper.append_activation(pre_activation) + + +def embedding(input, size, is_sparse=False, param_attr=None, dtype='float32'): + """ + **Embedding Layer** + + This layer is used to lookup a vector of IDs, provided by *input*, in a lookup table. + The result of this lookup is the embedding of each ID in the *input*. + + All the input variables are passed in as local variables to the LayerHelper + constructor. + + Args: + input(Variable): Input to the function + size(tuple|list|None): Shape of the look up table parameter + is_sparse(bool): Boolean flag that specifying whether the input is sparse + param_attr(ParamAttr): Parameters for this layer + dtype(np.dtype|core.DataType|str): The type of data : float32, float_16, int etc + + Returns: + Variable: The tensor variable storing the embeddings of the \ + supplied inputs. + + Examples: + .. code-block:: python + + dict_size = len(dataset.ids) + data = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32') + fc = fluid.layers.embedding(input=data, size=[dict_size, 16]) + """ + + helper = LayerHelper('embedding', **locals()) + w = helper.create_parameter( + attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) + tmp = helper.create_tmp_variable(dtype) + helper.append_op( + type='lookup_table', + inputs={'Ids': input, + 'W': w}, + outputs={'Out': tmp}, + attrs={'is_sparse': is_sparse}) + return tmp + + +# TODO(qijun): expose H0 and C0 +def dynamic_lstm(input, + size, + param_attr=None, + bias_attr=None, + use_peepholes=True, + is_reverse=False, + gate_activation='sigmoid', + cell_activation='tanh', + candidate_activation='tanh', + dtype='float32'): + helper = LayerHelper('lstm', **locals()) + size = size / 4 + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 4 * size], dtype=dtype) + bias_size = [1, 7 * size] + if not use_peepholes: + bias_size[1] = 4 * size + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) + + hidden = helper.create_tmp_variable(dtype) + cell = helper.create_tmp_variable(dtype) + batch_gate = helper.create_tmp_variable(dtype) + batch_cell_pre_act = helper.create_tmp_variable(dtype) + + helper.append_op( + type='lstm', + inputs={'Input': input, + 'Weight': weight, + 'Bias': bias}, + outputs={ + 'Hidden': hidden, + 'Cell': cell, + 'BatchGate': batch_gate, + 'BatchCellPreAct': batch_cell_pre_act + }, + attrs={ + 'use_peepholes': use_peepholes, + 'is_reverse': is_reverse, + 'gate_activation': gate_activation, + 'cell_activation': cell_activation, + 'candidate_activation': candidate_activation + }) + return hidden, cell + + +def gru_unit(input, + hidden, + size, + weight=None, + bias=None, + activation='tanh', + gate_activation='sigmoid'): + """ + GRUUnit Operator implements partial calculations of the GRU unit as following: + + $$ + update \ gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\ + reset \ gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r) \\ + output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\ + output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t) + $$ + + which is same as one time step of GRU Operator. + + @note To implement the complete GRU unit, fully-connected operator must be + used before to feed xu, xr and xc as the Input of GRUUnit operator. + + TODO(ChunweiYan) add more document here + """ + activation_dict = dict( + identity=0, + sigmoid=1, + tanh=2, + relu=3, ) + activation = activation_dict[activation] + gate_activation = activation_dict[gate_activation] + + helper = LayerHelper('gru_unit', **locals()) + dtype = helper.input_dtype() + size = size / 3 + + # create weight + if weight is None: + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) + + # create bias + if bias is None: + bias_size = [1, 3 * size] + bias = helper.create_parameter( + attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) + + gate = helper.create_tmp_variable(dtype) + reset_hidden_pre = helper.create_tmp_variable(dtype) + updated_hidden = helper.create_tmp_variable(dtype) + + helper.append_op( + type='gru_unit', + inputs={'Input': input, + 'HiddenPrev': hidden, + 'Weight': weight}, + outputs={ + 'Gate': gate, + 'ResetHiddenPrev': reset_hidden_pre, + 'Hidden': updated_hidden, + }, + attrs={ + 'activation': 0, + 'gate_activation': 1, + }) + + return updated_hidden, reset_hidden_pre, gate + + +def linear_chain_crf(input, label, param_attr=None): + helper = LayerHelper('linear_chain_crf', **locals()) + size = input.shape[1] + transition = helper.create_parameter( + attr=helper.param_attr, + shape=[size + 2, size], + dtype=helper.input_dtype()) + alpha = helper.create_tmp_variable(dtype=helper.input_dtype()) + emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) + transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) + log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='linear_chain_crf', + inputs={"Emission": [input], + "Transition": transition, + "Label": label}, + outputs={ + "Alpha": [alpha], + "EmissionExps": [emission_exps], + "TransitionExps": transition_exps, + "LogLikelihood": log_likelihood + }) + + return log_likelihood + + +def crf_decoding(input, param_attr, label=None): + helper = LayerHelper('crf_decoding', **locals()) + transition = helper.get_parameter(param_attr.name) + viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='crf_decoding', + inputs={"Emission": [input], + "Transition": transition, + "Label": label}, + outputs={"ViterbiPath": [viterbi_path]}) + + return viterbi_path + + +def cos_sim(X, Y, **kwargs): + """ + This function performs the cosine similarity between two tensors + X and Y and returns that as the output. + """ + helper = LayerHelper('cos_sim', **kwargs) + out = helper.create_tmp_variable(dtype=X.dtype) + xnorm = helper.create_tmp_variable(dtype=X.dtype) + ynorm = helper.create_tmp_variable(dtype=X.dtype) + helper.append_op( + type='cos_sim', + inputs={'X': [X], + 'Y': [Y]}, + outputs={'Out': [out], + 'XNorm': [xnorm], + 'YNorm': [ynorm]}) + return out + + +def cross_entropy(input, label, **kwargs): + """ + This function computes cross_entropy using the input and label. + """ + helper = LayerHelper('cross_entropy', **kwargs) + out = helper.create_tmp_variable(dtype=input.dtype) + helper.append_op( + type='cross_entropy', + inputs={'X': [input], + 'Label': [label]}, + outputs={'Y': [out]}, + attrs=kwargs) + return out + + +def square_error_cost(input, label, **kwargs): + """ + This functions returns the squared error cost using the input and label. + The output is appending the op to do the above. + """ + helper = LayerHelper('square_error_cost', **kwargs) + minus_out = helper.create_tmp_variable(dtype=input.dtype) + helper.append_op( + type='elementwise_sub', + inputs={'X': [input], + 'Y': [label]}, + outputs={'Out': [minus_out]}) + + square_out = helper.create_tmp_variable(dtype=input.dtype) + helper.append_op( + type='square', inputs={'X': [minus_out]}, + outputs={'Out': [square_out]}) + return square_out + + +def accuracy(input, label, k=1, correct=None, total=None, **kwargs): + """ + This function computes the accuracy using the input and label. + The output is the top_k inputs and their indices. + """ + helper = LayerHelper("accuracy", **kwargs) + topk_out = helper.create_tmp_variable(dtype=input.dtype) + topk_indices = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="top_k", + inputs={"X": [input]}, + outputs={"Out": [topk_out], + "Indices": [topk_indices]}, + attrs={"k": k}) + acc_out = helper.create_tmp_variable(dtype="float32") + if correct is None: + correct = helper.create_tmp_variable(dtype="int64") + if total is None: + total = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="accuracy", + inputs={ + "Out": [topk_out], + "Indices": [topk_indices], + "Label": [label] + }, + outputs={ + "Accuracy": [acc_out], + "Correct": [correct], + "Total": [total], + }) + return acc_out + + +def chunk_eval(input, + label, + chunk_scheme, + num_chunk_types, + excluded_chunk_types=None, + **kwargs): + """ + This function computes and outputs the precision, recall and + F1-score of chunk detection. + """ + helper = LayerHelper("chunk_eval", **kwargs) + + # prepare output + precision = helper.create_tmp_variable(dtype="float32") + recall = helper.create_tmp_variable(dtype="float32") + f1_score = helper.create_tmp_variable(dtype="float32") + num_infer_chunks = helper.create_tmp_variable(dtype="int64") + num_label_chunks = helper.create_tmp_variable(dtype="int64") + num_correct_chunks = helper.create_tmp_variable(dtype="int64") + + helper.append_op( + type="chunk_eval", + inputs={"Inference": [input], + "Label": [label]}, + outputs={ + "Precision": [precision], + "Recall": [recall], + "F1-Score": [f1_score], + "NumInferChunks": [num_infer_chunks], + "NumLabelChunks": [num_label_chunks], + "NumCorrectChunks": [num_correct_chunks] + }, + attrs={ + "num_chunk_types": num_chunk_types, + "chunk_scheme": chunk_scheme, + "excluded_chunk_types": excluded_chunk_types or [] + }) + return precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks + + +def sequence_conv(input, + num_filters, + filter_size=3, + filter_stride=1, + padding=None, + bias_attr=None, + param_attr=None, + act=None): + """ + This function creates the op for sequence_conv, using the inputs and + other convolutional configurations for the filters and stride as given + in the input parameters to the function. + """ + + # FIXME(dzh) : want to unify the argument of python layer + # function. So we ignore some unecessary attributes. + # such as, padding_trainable, context_start. + + helper = LayerHelper('sequence_conv', **locals()) + dtype = helper.input_dtype() + filter_shape = [filter_size * input.shape[1], num_filters] + filter_param = helper.create_parameter( + attr=helper.param_attr, shape=filter_shape, dtype=dtype) + pre_bias = helper.create_tmp_variable(dtype) + + helper.append_op( + type='sequence_conv', + inputs={ + 'X': [input], + 'Filter': [filter_param], + }, + outputs={"Out": pre_bias}, + attrs={ + 'contextStride': filter_stride, + 'contextStart': -int(filter_size / 2), + 'contextLength': filter_size + }) + pre_act = helper.append_bias_op(pre_bias) + return helper.append_activation(pre_act) + + +def conv2d(input, + num_filters, + filter_size, + stride=None, + padding=None, + groups=None, + param_attr=None, + bias_attr=None, + act=None, + name=None): + """ + This function creates the op for a 2-dimensional Convolution. + This is performed using the parameters of filters(size, dimensionality etc) + , stride and other configurations for a Convolution operation. + This funciton can also append an activation on top of the + conv-2d output, if mentioned in the input parameters. + """ + + if stride is None: + stride = [1, 1] + helper = LayerHelper('conv2d', **locals()) + dtype = helper.input_dtype() + + num_channels = input.shape[1] + if groups is None: + num_filter_channels = num_channels + else: + if num_channels % groups != 0: + raise ValueError("num_channels must be divisible by groups.") + num_filter_channels = num_channels / groups + + if isinstance(filter_size, int): + filter_size = [filter_size, filter_size] + if isinstance(stride, int): + stride = [stride, stride] + if isinstance(padding, int): + padding = [padding, padding] + + input_shape = input.shape + filter_shape = [num_filters, num_filter_channels] + filter_size + + def _get_default_param_initializer(): + std = (2.0 / (filter_size[0]**2 * num_channels))**0.5 + return Normal(0.0, std, 0) + + filter_param = helper.create_parameter( + attr=helper.param_attr, + shape=filter_shape, + dtype=dtype, + default_initializer=_get_default_param_initializer()) + + pre_bias = helper.create_tmp_variable(dtype) + + helper.append_op( + type='conv2d_cudnn', + inputs={ + 'Input': input, + 'Filter': filter_param, + }, + outputs={"Output": pre_bias}, + attrs={'strides': stride, + 'paddings': padding, + 'groups': groups}) + + pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) + + return helper.append_activation(pre_act) + + +def sequence_pool(input, pool_type, **kwargs): + """ + This function add the operator for sequence pooling. + It pools features of all time-steps of each instance, and is applied + on top of the input using pool_type mentioned in the parameters. + + It supports four pool_type: + + - average: :math:`Out[i] = \\frac{\sum_i X_i}{N}` + - sum: :math:`Out[i] = \sum_jX_{ij}` + - sqrt: :math:`Out[i] = \\frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}` + - max: :math:`Out[i] = max(X_i)` + + .. code-block:: text + + x is a 1-level LoDTensor: + x.lod = [[0, 2, 5, 7]] + x.data = [1, 3, 2, 4, 6, 5, 1] + x.dims = [7, 1] + + then output is a Tensor: + out.dim = [3, 1] + with condition len(x.lod[-1]) - 1 == out.dims[0] + + for different pool_type: + average: out.data = [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 + sum : out.data = [4, 12, 6], where 4=1+3, 12=2+4+6, 6=5+1 + sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), + 6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2) + max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) + + Args: + input(variable): The input variable which is a LoDTensor. + pool_type (string): The pooling type of sequence_pool. + It supports average, sum, sqrt and max. + + Returns: + The sequence pooling variable which is a Tensor. + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[7, 1], + dtype='float32', lod_level=1) + avg_x = fluid.layers.sequence_pool(input=x, pool_type='average') + sum_x = fluid.layers.sequence_pool(input=x, pool_type='sum') + sqrt_x = fluid.layers.sequence_pool(input=x, pool_type='sqrt') + max_x = fluid.layers.sequence_pool(input=x, pool_type='max') + """ + helper = LayerHelper('sequence_pool', input=input, **kwargs) + dtype = helper.input_dtype() + pool_out = helper.create_tmp_variable(dtype) + max_index = helper.create_tmp_variable(dtype) + + helper.append_op( + type="sequence_pool", + inputs={"X": input}, + outputs={"Out": pool_out, + "MaxIndex": max_index}, + attrs={"pooltype": pool_type.upper()}) + + return pool_out + + +def sequence_first_step(input, **kwargs): + """ + This funciton get the first step of sequence. + + .. code-block:: text + + x is a 1-level LoDTensor: + x.lod = [[0, 2, 5, 7]] + x.data = [1, 3, 2, 4, 6, 5, 1] + x.dims = [7, 1] + + then output is a Tensor: + out.dim = [3, 1] + with condition len(x.lod[-1]) - 1 == out.dims[0] + out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) + + Args: + input(variable): The input variable which is a LoDTensor. + + Returns: + The sequence's first step variable which is a Tensor. + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[7, 1], + dtype='float32', lod_level=1) + x_first_step = fluid.layers.sequence_first_step(input=x) + """ + return sequence_pool(input=input, pool_type="first") + + +def sequence_last_step(input, **kwargs): + """ + This funciton get the last step of sequence. + + .. code-block:: text + + x is a 1-level LoDTensor: + x.lod = [[0, 2, 5, 7]] + x.data = [1, 3, 2, 4, 6, 5, 1] + x.dims = [7, 1] + + then output is a Tensor: + out.dim = [3, 1] + with condition len(x.lod[-1]) - 1 == out.dims[0] + out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) + + Args: + input(variable): The input variable which is a LoDTensor. + + Returns: + The sequence's last step variable which is a Tensor. + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[7, 1], + dtype='float32', lod_level=1) + x_last_step = fluid.layers.sequence_last_step(input=x) + """ + return sequence_pool(input=input, pool_type="last") + + +def pool2d(input, + pool_size, + pool_type, + pool_stride=None, + pool_padding=None, + global_pooling=False): + """ + This function adds the operator for pooling in 2 dimensions, using the + pooling configurations mentioned in input parameters. + """ + if pool_padding is None: + pool_padding = [0, 0] + if pool_stride is None: + pool_stride = [1, 1] + if pool_type not in ["max", "avg"]: + raise ValueError( + "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", + str(pool_type)) + if isinstance(pool_size, int): + pool_size = [pool_size, pool_size] + if isinstance(pool_stride, int): + pool_stride = [pool_stride, pool_stride] + if isinstance(pool_padding, int): + pool_padding = [pool_padding, pool_padding] + + helper = LayerHelper('pool2d', **locals()) + dtype = helper.input_dtype() + pool_out = helper.create_tmp_variable(dtype) + + helper.append_op( + type="pool2d", + inputs={"X": input}, + outputs={"Out": pool_out}, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "global_pooling": global_pooling, + "strides": pool_stride, + "paddings": pool_padding + }) + + return pool_out + + +def batch_norm(input, + act=None, + is_test=False, + momentum=0.9, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + data_layout='NCHW'): + """ + This function helps create an operator to implement + the BatchNorm layer using the configurations from the input parameters. + """ + helper = LayerHelper('batch_norm', **locals()) + dtype = helper.input_dtype() + + input_shape = input.shape + if data_layout == 'NCHW': + channel_num = input_shape[1] + else: + if data_layout == 'NHWC': + channel_num = input_shape[-1] + else: + raise ValueError("unsupported data layout:" + data_layout) + + param_shape = [channel_num] + + # create parameter + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0)) + + bias = helper.create_parameter( + attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=True) + + mean = helper.create_global_variable( + dtype=input.dtype, shape=param_shape, persistable=True) + helper.set_variable_initializer(var=mean, initializer=Constant(0.0)) + + variance = helper.create_global_variable( + dtype=input.dtype, shape=param_shape, persistable=True) + helper.set_variable_initializer(var=variance, initializer=Constant(1.0)) + + # create output + # mean and mean_out share the same memory + mean_out = mean + # variance and variance out share the same memory + variance_out = variance + saved_mean = helper.create_tmp_variable(dtype) + saved_variance = helper.create_tmp_variable(dtype) + + batch_norm_out = helper.create_tmp_variable(dtype) + + helper.append_op( + type="batch_norm", + inputs={ + "X": input, + "Scale": scale, + "Bias": bias, + "Mean": mean, + "Variance": variance + }, + outputs={ + "Y": batch_norm_out, + "MeanOut": mean_out, + "VarianceOut": variance_out, + "SavedMean": saved_mean, + "SavedVariance": saved_variance + }, + attrs={"momentum": momentum, + "epsilon": epsilon, + "is_test": is_test}) + + return helper.append_activation(batch_norm_out) + + +def beam_search_decode(ids, scores): + helper = LayerHelper('beam_search_decode', **locals()) + sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) + sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) + + helper.append_op( + type="beam_search_decode", + inputs={"Ids": ids, + "Scores": scores}, + outputs={ + "SentenceIds": sentence_ids, + "SentenceScores": sentence_scores + }) + + return sentence_ids, sentence_scores + + +def conv2d_transpose(input, + num_filters, + output_size=None, + filter_size=None, + padding=None, + stride=None, + dilation=None, + param_attr=None): + """ + The transpose of conv2d layer. + + This layer is also known as deconvolution layer. + + Args: + input(Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of filter. It is as same as the output + image channel. + output_size(int|tuple|None): The output image size. If output size is a + tuple, it must contain two integers, (image_H, image_W). This + parameter only works when filter_size is None. + filter_size(int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. None if use output size to + calculate filter_size + padding(int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. + stride(int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. + dilation(int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. + param_attr: Parameter Attribute. + main_program(Program): the main program + startup_program(Program): the startup program + + Returns: + Variable: Output image. + """ + helper = LayerHelper("conv2d_transpose", **locals()) + if not isinstance(input, Variable): + raise TypeError("Input of conv2d_transpose must be Variable") + input_channel = input.shape[1] + + op_attr = dict() + + if isinstance(padding, int): + op_attr['paddings'] = [padding, padding] + elif padding is not None: + op_attr['paddings'] = padding + + if isinstance(stride, int): + op_attr['strides'] = [stride, stride] + elif stride is not None: + op_attr['strides'] = stride + + if isinstance(dilation, int): + op_attr['dilations'] = [dilation, dilation] + elif dilation is not None: + op_attr['dilations'] = dilation + + if filter_size is None: + if output_size is None: + raise ValueError("output_size must be set when filter_size is None") + if isinstance(output_size, int): + output_size = [output_size, output_size] + + padding = op_attr.get('paddings', [0, 0]) + stride = op_attr.get('strides', [1, 1]) + dilation = op_attr.get('dilations', [1, 1]) + + h_in = input.shape[2] + w_in = input.shape[3] + + filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + 2 * + padding[0] - 1) / dilation[0] + 1 + filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + 2 * + padding[1] - 1) / dilation[1] + 1 + filter_size = [filter_size_h, filter_size_w] + + elif isinstance(filter_size, int): + filter_size = [filter_size, filter_size] + + filter_shape = [input_channel, num_filters] + filter_size + img_filter = helper.create_parameter( + dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) + + out = helper.create_tmp_variable(dtype=input.dtype) + helper.append_op( + type='conv2d_transpose', + inputs={'Input': [input], + 'Filter': [img_filter]}, + outputs={'Output': out}, + attrs=op_attr) + + return out + + +def sequence_expand(x, y): + """Sequence Expand Layer. This layer will expand the input variable **x** + according to LoD information of **y**. And the following examples will + explain how sequence_expand works: + + .. code-block:: text + + * Case 1 + x is a LoDTensor: + x.lod = [[0, 2, 3], + [0, 1, 3, 4]] + x.data = [a, b, c, d] + x.dims = [4, 1] + + y is a LoDTensor: + y.lod = [[0, 2, 4], + [0, 3, 6, 7, 8]] + + with condition len(y.lod[-1]) - 1 == x.dims[0] + + then output is a 2-level LoDTensor: + out.lod = [[0, 2, 4], + [0, 3, 6, 7, 8]] + out.data = [a, a, a, b, b, b, c, d] + out.dims = [8, 1] + + * Case 2 + x is a Tensor: + x.data = [a, b, c] + x.dims = [3, 1] + + y is a LoDTensor: + y.lod = [[0, 2, 3, 6]] + + with condition len(y.lod[-1]) - 1 == x.dims[0] + + then output is a 1-level LoDTensor: + out.lod = [[0, 2, 3, 6]] + out.data = [a, a, b, c, c, c] + out.dims = [6, 1] + + Args: + x (Variable): The input variable which is a Tensor or LoDTensor. + y (Variable): The input variable which is a LoDTensor. + + Returns: + Variable: The expanded variable which is a LoDTensor. + + Examples: + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[10], dtype='float32') + y = fluid.layers.data(name='y', shape=[10, 20], + dtype='float32', lod_level=1) + out = layers.sequence_expand(x=x, y=y) + """ + helper = LayerHelper('sequence_expand', input=x, **locals()) + dtype = helper.input_dtype() + tmp = helper.create_tmp_variable(dtype) + helper.append_op( + type='sequence_expand', inputs={'X': x, + 'Y': y}, outputs={'Out': tmp}) + return tmp + + +def lstm_unit(x_t, + hidden_t_prev, + cell_t_prev, + forget_bias=0.0, + param_attr=None, + bias_attr=None): + """Lstm unit layer. The equation of a lstm step is: + + .. math:: + + i_t & = \sigma(W_{x_i}x_{t} + W_{h_i}h_{t-1} + W_{c_i}c_{t-1} + b_i) + + f_t & = \sigma(W_{x_f}x_{t} + W_{h_f}h_{t-1} + W_{c_f}c_{t-1} + b_f) + + c_t & = f_tc_{t-1} + i_t tanh (W_{x_c}x_t+W_{h_c}h_{t-1} + b_c) + + o_t & = \sigma(W_{x_o}x_{t} + W_{h_o}h_{t-1} + W_{c_o}c_t + b_o) + + h_t & = o_t tanh(c_t) + + The inputs of lstm unit includes :math:`x_t`, :math:`h_{t-1}` and + :math:`c_{t-1}`. The implementation separates the linear transformation + and non-linear transformation apart. Here, we take :math:`i_t` as an + example. The linear transformation is applied by calling a `fc` layer and + the equation is: + + .. math:: + + L_{i_t} = W_{x_i}x_{t} + W_{h_i}h_{t-1} + W_{c_i}c_{t-1} + b_i + + The non-linear transformation is applied by calling `lstm_unit_op` and the + equation is: + + .. math:: + + i_t = \sigma(L_{i_t}) + + This layer has two outputs including :math:`h_t` and :math:`o_t`. + + Args: + x_t (Variable): The input value of current step. + hidden_t_prev (Variable): The hidden value of lstm unit. + cell_t_prev (Variable): The cell value of lstm unit. + forget_bias (float): The forget bias of lstm unit. + param_attr (ParamAttr): The attributes of parameter weights, used to set + initializer, name etc. + bias_attr (ParamAttr): The attributes of bias weights, if not False, + bias weights will be created and be set to default value. + + Returns: + tuple: The hidden value and cell value of lstm unit. + + Raises: + ValueError: The ranks of **x_t**, **hidden_t_prev** and **cell_t_prev**\ + not be 2 or the 1st dimensions of **x_t**, **hidden_t_prev** \ + and **cell_t_prev** not be the same. + + Examples: + + .. code-block:: python + + x_t = fluid.layers.fc(input=x_t_data, size=10) + prev_hidden = fluid.layers.fc(input=prev_hidden_data, size=20) + prev_cell = fluid.layers.fc(input=prev_cell_data, size=30) + hidden_value, cell_value = fluid.layers.lstm_unit(x_t=x_t, + hidden_t_prev=prev_hidden, + cell_t_prev=prev_cell) + """ + helper = LayerHelper('lstm_unit', **locals()) + + if len(x_t.shape) != 2: + raise ValueError("Rank of x_t must be 2.") + + if len(hidden_t_prev.shape) != 2: + raise ValueError("Rank of hidden_t_prev must be 2.") + + if len(cell_t_prev.shape) != 2: + raise ValueError("Rank of cell_t_prev must be 2.") + + if x_t.shape[0] != hidden_t_prev.shape[0] or x_t.shape[ + 0] != cell_t_prev.shape[0]: + raise ValueError("The 1s dimension of x_t, hidden_t_prev and " + "cell_t_prev must be the same.") + + if bias_attr is None: + bias_attr = ParamAttr() + + size = cell_t_prev.shape[1] + concat_out = concat(input=[x_t, hidden_t_prev], axis=1) + fc_out = fc(input=concat_out, + size=4 * size, + param_attr=param_attr, + bias_attr=bias_attr) + dtype = x_t.dtype + c = helper.create_tmp_variable(dtype) + h = helper.create_tmp_variable(dtype) + + helper.append_op( + type='lstm_unit', + inputs={"X": fc_out, + "C_prev": cell_t_prev}, + outputs={"C": c, + "H": h}, + attrs={"forget_bias": forget_bias}) + + return h, c + + +def reduce_sum(input, dim=None, keep_dim=False): + """ + Computes the sum of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (int|None): The dimension along which the sum is performed. If + :attr:`None`, sum all elements of :attr:`input` and return a + Tensor variable with a single element, otherwise must be in the + range :math:`[-rank(input), rank(input))`. If :math:`dim < 0`, + the dimension to reduce is :math:`rank + dim`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + + # x is a Tensor variable with following elements: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_sum(x) # [3.5] + fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] + fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6] + fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]] + """ + helper = LayerHelper('reduce_sum', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='reduce_sum', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else 0, + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + +def reduce_mean(input, dim=None, keep_dim=False): + """ + Computes the mean of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (int|None): The dimension along which the mean is computed. If + :attr:`None`, compute the mean over all elements of :attr:`input` + and return a Tensor variable with a single element, otherwise + must be in the range :math:`[-rank(input), rank(input))`. If + :math:`dim < 0`, the dimension to reduce is :math:`rank + dim`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + + # x is a Tensor variable with following elements: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_mean(x) # [0.4375] + fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8] + fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4] + fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]] + """ + helper = LayerHelper('reduce_mean', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='reduce_mean', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else 0, + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + +def reduce_max(input, dim=None, keep_dim=False): + """ + Computes the maximum of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (int|None): The dimension along which the maximum is computed. + If :attr:`None`, compute the maximum over all elements of + :attr:`input` and return a Tensor variable with a single element, + otherwise must be in the range :math:`[-rank(input), rank(input))`. + If :math:`dim < 0`, the dimension to reduce is :math:`rank + dim`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + + # x is a Tensor variable with following elements: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_max(x) # [0.9] + fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9] + fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7] + fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]] + """ + helper = LayerHelper('reduce_max', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='reduce_max', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else 0, + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out + + +def reduce_min(input, dim=None, keep_dim=False): + """ + Computes the minimum of tensor elements over the given dimension. + + Args: + input (Variable): The input variable which is a Tensor or LoDTensor. + dim (int|None): The dimension along which the minimum is computed. + If :attr:`None`, compute the minimum over all elements of + :attr:`input` and return a Tensor variable with a single element, + otherwise must be in the range :math:`[-rank(input), rank(input))`. + If :math:`dim < 0`, the dimension to reduce is :math:`rank + dim`. + keep_dim (bool): Whether to reserve the reduced dimension in the + output Tensor. The result tensor will have one fewer dimension + than the :attr:`input` unless :attr:`keep_dim` is true. + + Returns: + Variable: The reduced Tensor variable. + + Examples: + .. code-block:: python + + # x is a Tensor variable with following elements: + # [[0.2, 0.3, 0.5, 0.9] + # [0.1, 0.2, 0.6, 0.7]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_min(x) # [0.1] + fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7] + fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1] + fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]] + """ + helper = LayerHelper('reduce_min', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='reduce_min', + inputs={'X': input}, + outputs={'Out': out}, + attrs={ + 'dim': dim if dim != None else 0, + 'keep_dim': keep_dim, + 'reduce_all': True if dim == None else False + }) + return out diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py new file mode 100644 index 0000000000..d2ff6841a3 --- /dev/null +++ b/python/paddle/v2/fluid/layers/ops.py @@ -0,0 +1,9 @@ +from ..registry import register_layer +__all__ = [ + 'mean', 'mul', 'dropout', 'reshape', 'sigmoid', 'scale', 'transpose', + 'sigmoid_cross_entropy_with_logits', 'elementwise_add', 'elementwise_div', + 'elementwise_sub', 'elementwise_mul', 'clip', 'abs', 'sequence_softmax' +] + +for _OP in set(__all__): + globals()[_OP] = register_layer(_OP) diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py new file mode 100644 index 0000000000..e5820d24cd --- /dev/null +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -0,0 +1,215 @@ +from ..layer_helper import LayerHelper + +__all__ = [ + 'create_tensor', 'cast', 'concat', 'sums', 'assign', + 'fill_constant_batch_size_like', 'fill_constant', 'ones', 'zeros' +] + + +def create_tensor(dtype, name=None): + helper = LayerHelper("create_tensor", **locals()) + return helper.create_variable(name=helper.name, dtype=dtype) + + +def cast(x, dtype): + """ + This function takes in the input with input_dtype + and casts it to the output_dtype as the output. + """ + helper = LayerHelper('cast', **locals()) + out = helper.create_tmp_variable(dtype=dtype) + helper.append_op( + type='cast', + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={'in_dtype': x.dtype, + 'out_dtype': out.dtype}) + return out + + +def concat(input, axis=0): + """ + **Concat** + + This function concatenates the input along the axis mentioned + and returns that as the output. + + Args: + input(list): List of tensors to be concatenated + axis(int): Integer axis along which the tensors will be concatenated + + Returns: + Variable: Output variable of the concatenation + + Examples: + .. code-block:: python + out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) + """ + helper = LayerHelper('concat', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='concat', + inputs={'X': input}, + outputs={'Out': [out]}, + attrs={'axis': axis}) + return out + + +def sums(input, out=None): + """This function performs the sum operation on the input and returns the + result as the output. + + Args: + input (Variable|list): The input tensor that has the elements + that need to be summed up. + + Returns: + Variable: The tensor type variable that has the sum of input + written to it. + + Examples: + .. code-block::python + + tmp = fluid.layers.zeros(shape=[10], dtype='int32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + a0 = layers.array_read(array=tmp, i=i) + i = layers.increment(x=i) + a1 = layers.array_read(array=tmp, i=i) + mean_a0 = layers.mean(x=a0) + mean_a1 = layers.mean(x=a1) + a_sum = layers.sums(input=[mean_a0, mean_a1]) + """ + helper = LayerHelper('sum', **locals()) + if out is None: + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out}) + return out + + +def assign(input, output): + """ + **Assign** + + This function copies the *input* Variable to the *output* Variable. + + Args: + input(Variable): The source variable + output(Variable): The destination variable + + Returns: + Variable: The destination variable that was supplied as the *output*. + + Examples: + .. code-block:: python + out = fluid.layers.create_tensor(dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + fluid.layers.assign(hidden, out) + """ + helper = LayerHelper('assign', **locals()) + helper.append_op( + type='scale', + inputs={'X': [input]}, + outputs={'Out': [output]}, + attrs={'scale': 1.0}) + return output + + +def fill_constant(shape, dtype, value, out=None): + """ + **fill_constant** + + This function creates a tensor of specified *shape* and + *dtype*, and initializes this with a constant supplied in *value*. + + It also sets *stop_gradient* to True. + + Args: + shape(tuple|list|None): Shape of output tensor + dtype(np.dtype|core.DataType|str): Data type of output tensor + value(float): Constant value to initialize the output tensor + out(Variable): Output Variable to initialize + + Returns: + Variable: The tensor variable storing the output + + Examples: + .. code-block:: python + + data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64') + """ + helper = LayerHelper("fill_constant", **locals()) + if out is None: + out = helper.create_tmp_variable(dtype=dtype) + helper.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [out]}, + attrs={'shape': shape, + 'dtype': out.dtype, + 'value': float(value)}) + out.stop_gradient = True + return out + + +def fill_constant_batch_size_like(input, + shape, + dtype, + value, + input_dim_idx=0, + output_dim_idx=0): + """ + **fill_constant_batch_size_like** + + This function creates a tensor of specified *shape*, *dtype* and batch size, + and initializes this with a constant supplied in *value*. The batch size is + obtained from the `input` tensor. + + It also sets *stop_gradient* to True. + + Args: + input(Variable): Tensor whose dimensions will be used to get batch size + shape(tuple|list|None): Shape of output tensor + dtype(np.dtype|core.DataType|str): Data type of output tensor + value(float): Constant value to initialize the output tensor + input_dim_idx(int): Index of input's batch size dimension + output_dim_idx(int): Index of output's batch size dimension + + Returns: + Variable: The tensor variable storing the output + + Examples: + .. code-block:: python + + data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64') + """ + helper = LayerHelper("fill_constant_batch_size_like", **locals()) + out = helper.create_tmp_variable(dtype=dtype) + helper.append_op( + type='fill_constant_batch_size_like', + inputs={'Input': input}, + outputs={'Out': [out]}, + attrs={ + 'shape': shape, + 'dtype': out.dtype, + 'value': float(value), + 'input_dim_idx': input_dim_idx, + 'output_dim_idx': output_dim_idx + }) + out.stop_gradient = True + return out + + +def ones(shape, dtype): + """ + This function performs the same function as fill_constant() declared above + with the constant value being 1.0. + """ + return fill_constant(value=1.0, **locals()) + + +def zeros(shape, dtype): + """ + This function performs the same function as fill_constant() declared above + with the constant value being 0.0. + """ + return fill_constant(value=0.0, **locals()) diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index 7ef524318e..54886a8f2c 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -10,25 +10,19 @@ def simple_img_conv_pool(input, pool_stride, act, param_attr=None, - pool_type='max', - main_program=None, - startup_program=None): + pool_type='max'): conv_out = layers.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, param_attr=param_attr, - act=act, - main_program=main_program, - startup_program=startup_program) + act=act) pool_out = layers.pool2d( input=conv_out, pool_size=pool_size, pool_type=pool_type, - pool_stride=pool_stride, - main_program=main_program, - startup_program=startup_program) + pool_stride=pool_stride) return pool_out @@ -42,9 +36,7 @@ def img_conv_group(input, conv_with_batchnorm=False, conv_batchnorm_drop_rate=None, pool_stride=1, - pool_type=None, - main_program=None, - startup_program=None): + pool_type=None): """ Image Convolution Group, Used for vgg net. """ @@ -75,31 +67,19 @@ def img_conv_group(input, filter_size=conv_filter_size[i], padding=conv_padding[i], param_attr=param_attr[i], - act=local_conv_act, - main_program=main_program, - startup_program=startup_program) + act=local_conv_act) if conv_with_batchnorm[i]: - tmp = layers.batch_norm( - input=tmp, - act=conv_act, - main_program=main_program, - startup_program=startup_program) + tmp = layers.batch_norm(input=tmp, act=conv_act) drop_rate = conv_batchnorm_drop_rate[i] if abs(drop_rate) > 1e-5: - tmp = layers.dropout( - x=tmp, - dropout_prob=drop_rate, - main_program=main_program, - startup_program=startup_program) + tmp = layers.dropout(x=tmp, dropout_prob=drop_rate) pool_out = layers.pool2d( input=tmp, pool_size=pool_size, pool_type=pool_type, - pool_stride=pool_stride, - main_program=main_program, - startup_program=startup_program) + pool_stride=pool_stride) return pool_out @@ -108,21 +88,13 @@ def sequence_conv_pool(input, filter_size, param_attr=None, act="sigmoid", - pool_type="max", - main_program=None, - startup_program=None): + pool_type="max"): conv_out = layers.sequence_conv( input=input, num_filters=num_filters, filter_size=filter_size, param_attr=param_attr, - act=act, - main_program=main_program, - startup_program=startup_program) + act=act) - pool_out = layers.sequence_pool( - input=conv_out, - pool_type=pool_type, - main_program=main_program, - startup_program=startup_program) + pool_out = layers.sequence_pool(input=conv_out, pool_type=pool_type) return pool_out diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index bbdfab2df9..c56a531ed5 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -2,10 +2,11 @@ from collections import defaultdict import framework from backward import append_backward_ops -from framework import unique_name +from framework import unique_name, program_guard from initializer import Constant from layer_helper import LayerHelper from regularizer import append_regularization_ops +from clip import append_gradient_clip_ops __all__ = ['SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad'] @@ -159,34 +160,32 @@ class Optimizer(object): # Create any accumulators program = loss.block.program - self.helper = LayerHelper( - self.__class__.__name__, - main_program=program, - startup_program=startup_program) - self._create_accumulators(loss.block, - [p[0] for p in parameters_and_grads]) - - optimize_ops = [] - for param_and_grad in parameters_and_grads: - if param_and_grad[0].trainable is True and param_and_grad[ - 1] is not None: - optimize_op = self._append_optimize_op(loss.block, - param_and_grad) - optimize_ops.append(optimize_op) - - # Returned list of ops can include more ops in addition - # to optimization ops - return_ops = optimize_ops - - # Get custom finish ops for subclasses - # FIXME: Need to fix this once we figure out how to handle dependencies - finish_ops = self._finish_update(loss.block) - if finish_ops is not None: - return_ops += finish_ops - - if self._global_step is not None: - return_ops.append(self._increment_global_step(loss.block)) - return return_ops + with program_guard(program, startup_program): + self.helper = LayerHelper(self.__class__.__name__) + self._create_accumulators(loss.block, + [p[0] for p in parameters_and_grads]) + + optimize_ops = [] + for param_and_grad in parameters_and_grads: + if param_and_grad[0].trainable is True and param_and_grad[ + 1] is not None: + optimize_op = self._append_optimize_op(loss.block, + param_and_grad) + optimize_ops.append(optimize_op) + + # Returned list of ops can include more ops in addition + # to optimization ops + return_ops = optimize_ops + + # Get custom finish ops for subclasses + # FIXME: Need to fix this once we figure out how to handle dependencies + finish_ops = self._finish_update(loss.block) + if finish_ops is not None: + return_ops += finish_ops + + if self._global_step is not None: + return_ops.append(self._increment_global_step(loss.block)) + return return_ops def minimize(self, loss, @@ -199,12 +198,16 @@ class Optimizer(object): `create_optimization_pass()` into one. """ params_grads = append_backward_ops(loss, parameter_list, no_grad_set) + + params_grads = append_gradient_clip_ops(params_grads) + # Add regularization if any params_grads = append_regularization_ops(params_grads, self.regularization) + optimize_ops = self.create_optimization_pass(params_grads, loss, startup_program) - return optimize_ops + return optimize_ops, params_grads class SGDOptimizer(Optimizer): diff --git a/python/paddle/v2/fluid/param_attr.py b/python/paddle/v2/fluid/param_attr.py index 86088fdd7c..ab4561b042 100644 --- a/python/paddle/v2/fluid/param_attr.py +++ b/python/paddle/v2/fluid/param_attr.py @@ -1,6 +1,8 @@ from initializer import Initializer, Xavier, Constant from regularizer import WeightDecayRegularizer +__all__ = ['ParamAttr'] + class ParamAttr(object): def __init__(self, @@ -8,12 +10,14 @@ class ParamAttr(object): initializer=None, learning_rate=1.0, regularizer=None, - trainable=True): + trainable=True, + clip=None): self.name = name self.initializer = initializer self.learning_rate = learning_rate self.regularizer = regularizer self.trainable = trainable + self.clip = clip def set_default_initializer(self, initializer): if initializer is None: @@ -36,6 +40,8 @@ class ParamAttr(object): def to_attr(arg): if arg is None: return ParamAttr() + elif isinstance(arg, list) or isinstance(arg, tuple): + return [ParamAttr.to_attr(a) for a in arg] elif isinstance(arg, ParamAttr): return arg elif isinstance(arg, str) or isinstance(arg, unicode): @@ -52,9 +58,12 @@ class ParamAttr(object): def to_kwargs(self, with_initializer=False): kwargs = { 'name': self.name, - 'learning_rate': self.learning_rate, + 'optimize_attr': { + 'learning_rate': self.learning_rate + }, 'regularizer': self.regularizer, - 'trainable': self.trainable + 'trainable': self.trainable, + 'clip_attr': self.clip } if with_initializer: kwargs['initializer'] = self.initializer diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/v2/fluid/profiler.py index 2069b713fa..dcecd76224 100644 --- a/python/paddle/v2/fluid/profiler.py +++ b/python/paddle/v2/fluid/profiler.py @@ -1,5 +1,6 @@ import paddle.v2.fluid.core as core from contextlib import contextmanager +import os __all__ = ['CudaProfiler'] @@ -30,17 +31,21 @@ def cuda_profiler(output_file, output_mode=None, config=None): written into this file. output_mode (string) : The output mode has Key-Value pair format and Comma separated values format. It should be 'kvp' or 'csv'. - config (string) : The profiler options and counters can refer to - "Compute Command Line Profiler User Guide". + config (list of string) : The profiler options and counters can refer + to "Compute Command Line Profiler User Guide". """ if output_mode is None: output_mode = 'csv' if output_mode not in ['kvp', 'csv']: raise ValueError("The output mode must be 'kvp' or 'csv'.") config = NVPROF_CONFIG if config is None else config - core.nvprof_init(output_file, output_mode, config) + config_file = 'nvprof_config_file' + with open(config_file, 'wb') as fp: + fp.writelines(["%s\n" % item for item in config]) + core.nvprof_init(output_file, output_mode, config_file) # Enables profiler collection by the active CUDA profiling tool. core.nvprof_start() yield # Disables profiler collection. core.nvprof_stop() + os.remove(config_file) diff --git a/python/paddle/v2/fluid/registry.py b/python/paddle/v2/fluid/registry.py new file mode 100644 index 0000000000..7aa8290611 --- /dev/null +++ b/python/paddle/v2/fluid/registry.py @@ -0,0 +1,192 @@ +import re +import cStringIO +import warnings +import functools +import inspect + +import proto.framework_pb2 as framework_pb2 +from framework import OpProtoHolder, Variable, Program, Operator +from paddle.v2.fluid.layer_helper import LayerHelper, unique_name + +__all__ = ['deprecated', 'register_layer', 'autodoc'] + + +def _convert_(name): + """ + Formatting. + + Args: + name: The name/alias + + This function takes in a name and converts it to a standard format of + group1_group2. Where as per the regular expression, group1 can have + alphabets and numbers and group2 has capital alphabets. + + """ + s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() + + +def _generate_doc_string_(op_proto): + """ + Generate docstring by OpProto + + Args: + op_proto (framework_pb2.OpProto): a protobuf message typed OpProto + + Returns: + str: the document string + """ + + def _type_to_str_(tp): + return framework_pb2.AttrType.Name(tp) + + if not isinstance(op_proto, framework_pb2.OpProto): + raise TypeError("OpProto should be `framework_pb2.OpProto`") + + buf = cStringIO.StringIO() + buf.write(op_proto.comment) + buf.write('\nArgs:\n') + for each_input in op_proto.inputs: + line_begin = ' {0}: '.format(_convert_(each_input.name)) + buf.write(line_begin) + buf.write(each_input.comment) + buf.write('\n') + buf.write(' ' * len(line_begin)) + buf.write('Duplicable: ') + buf.write(str(each_input.duplicable)) + buf.write(' Optional: ') + buf.write(str(each_input.dispensable)) + buf.write('\n') + + for each_attr in op_proto.attrs: + buf.write(' ') + buf.write(each_attr.name) + buf.write(' (') + buf.write(_type_to_str_(each_attr.type)) + buf.write('): ') + buf.write(each_attr.comment) + buf.write('\n') + + if len(op_proto.outputs) != 0: + buf.write('\nReturns:\n') + buf.write(' ') + for each_opt in op_proto.outputs: + if not each_opt.intermediate: + break + buf.write(each_opt.comment) + + return buf.getvalue() + + +def register_layer(op_type): + """ + Register an Python layer for an Operator + + Args: + op_type: The name of the operator to be created + + This function takes in the operator type (sigmoid, mean , average etc) and + creates the operator functionality. + + """ + op_proto = OpProtoHolder.instance().get_op_proto(op_type) + not_intermediate_outputs = \ + filter(lambda output: not output.intermediate, op_proto.outputs) + intermediate_outputs = \ + filter(lambda output: output.intermediate, op_proto.outputs) + + if len(not_intermediate_outputs) != 1: + raise ValueError("Only one non intermediate output operator can be", + "automatically generated") + + if not_intermediate_outputs[0].duplicable: + raise ValueError( + "Only non duplicable op can be automatically generated") + + for output in intermediate_outputs: + if output.duplicable: + raise ValueError("The op can be automatically generated only when ", + "all intermediate ops are not duplicable") + + o_name = not_intermediate_outputs[0].name + intermediate_output_names = [output.name for output in intermediate_outputs] + + def infer_and_check_dtype(op_proto, **kwargs): + """ + This function performs the sanity check for dtype and + instance type. + """ + dtype = None + for ipt in op_proto.inputs: + name = _convert_(ipt.name) + val = kwargs.pop(name, []) + if not isinstance(val, list) and not isinstance(val, tuple): + val = [val] + for each in val: + if not isinstance(each, Variable): + raise ValueError("input of {0} must be variable".format( + op_type)) + + if dtype is None: + dtype = each.dtype + elif dtype != each.dtype: + raise ValueError( + "operator {0} must input same dtype. {1} vs {2}".format( + op_type, dtype, each.dtype)) + + return dtype + + def func(**kwargs): + helper = LayerHelper(op_type, **kwargs) + + dtype = infer_and_check_dtype(op_proto, **kwargs) + + inputs = dict() + for ipt in op_proto.inputs: + name = _convert_(ipt.name) + val = kwargs.pop(name, []) + if not isinstance(val, list) and not isinstance(val, tuple): + val = [val] + inputs[ipt.name] = val + + outputs = dict() + out = helper.create_tmp_variable(dtype=dtype) + outputs[o_name] = [out] + for name in intermediate_output_names: + outputs[name] = [helper.create_tmp_variable(dtype=dtype)] + helper.append_op( + type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs) + return helper.append_activation(out) + + func.__name__ = op_type + func.__doc__ = _generate_doc_string_(op_proto) + return func + + +def deprecated(func_or_class): + """ + Deprecated warning decorator. It will result a warning message. + Should be used before class or function, member function + """ + + @functools.wraps(func) + def func_wrapper(*args, **kwargs): + """ + Wrap func with deprecated warning + """ + warnings.simplefilter('always', DeprecationWarning) # turn off filter + warnings.warn( + "Call to deprecated function {}.".format(func.__name__), + category=DeprecationWarning, + stacklevel=2) + warnings.simplefilter('default', DeprecationWarning) # reset filter + return func(*args, **kwargs) + + return func_wrapper + + +def autodoc(func): + func.__doc__ = _generate_doc_string_(OpProtoHolder.instance().get_op_proto( + func.__name__)) + return func diff --git a/python/paddle/v2/fluid/tests/.gitignore b/python/paddle/v2/fluid/tests/.gitignore index a648f2b387..62f82151eb 100644 --- a/python/paddle/v2/fluid/tests/.gitignore +++ b/python/paddle/v2/fluid/tests/.gitignore @@ -1,3 +1,4 @@ image/ fit_a_line.model/ tmp +cuda_profiler.txt diff --git a/python/paddle/v2/fluid/tests/__init__.py b/python/paddle/v2/fluid/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py index 4e71b6f345..3d336ffe95 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -1,9 +1,9 @@ from __future__ import print_function -import numpy as np +import sys + import paddle.v2 as paddle import paddle.v2.fluid as fluid -import sys def resnet_cifar10(input, depth=32): diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index d2693b602e..c3591a613a 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -150,7 +150,7 @@ def main(): crf_decode = fluid.layers.crf_decoding( input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) - precision, recall, f1_score = fluid.layers.chunk_eval( + chunk_evaluator = fluid.evaluator.ChunkEvaluator( input=crf_decode, label=target, chunk_scheme="IOB", @@ -176,20 +176,21 @@ def main(): batch_id = 0 for pass_id in xrange(PASS_NUM): + chunk_evaluator.reset(exe) for data in train_data(): - outs = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost, precision, recall, f1_score]) - avg_cost_val = np.array(outs[0]) - precision_val = np.array(outs[1]) - recall_val = np.array(outs[2]) - f1_score_val = np.array(outs[3]) + cost, precision, recall, f1_score = exe.run( + fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost] + chunk_evaluator.metrics) + pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( + exe) if batch_id % 10 == 0: - print("avg_cost=" + str(avg_cost_val)) - print("precision_val=" + str(precision_val)) - print("recall_val:" + str(recall_val)) - print("f1_score_val:" + str(f1_score_val)) + print("avg_cost:" + str(cost) + " precision:" + str( + precision) + " recall:" + str(recall) + " f1_score:" + str( + f1_score) + " pass_precision:" + str( + pass_precision) + " pass_recall:" + str(pass_recall) + + " pass_f1_score:" + str(pass_f1_score)) # exit early for CI exit(0) diff --git a/python/paddle/v2/fluid/tests/book/test_machine_translation.py b/python/paddle/v2/fluid/tests/book/test_machine_translation.py index 80ffc5a544..e79864b397 100644 --- a/python/paddle/v2/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book/test_machine_translation.py @@ -33,7 +33,7 @@ def encoder_decoder(): fc1 = fluid.layers.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') lstm_hidden0, lstm_0 = layers.dynamic_lstm(input=fc1, size=hidden_dim * 4) - encoder_out = layers.sequence_pool(input=lstm_hidden0, pool_type="last") + encoder_out = layers.sequence_last_step(input=lstm_hidden0) # decoder trg_language_word = layers.data( diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py index 4dc2c50e1c..fc073f6be8 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py @@ -11,7 +11,9 @@ regularizer = fluid.regularizer.L2Decay(0.0005 * BATCH_SIZE) hidden1 = fluid.layers.fc(input=image, size=128, act='relu', - param_attr=regularizer) + param_attr=fluid.ParamAttr( + regularizer=regularizer, + clip=fluid.clip.ClipByValue(10))) hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu', @@ -33,11 +35,10 @@ opts = optimizer.minimize(avg_cost) accuracy = fluid.evaluator.Accuracy(input=predict, label=label) inference_program = fluid.default_main_program().clone() -test_accuracy = fluid.evaluator.Accuracy( - input=predict, label=label, main_program=inference_program) -test_target = [avg_cost] + test_accuracy.metrics + test_accuracy.states -inference_program = fluid.io.get_inference_program( - test_target, main_program=inference_program) +with fluid.program_guard(inference_program): + test_accuracy = fluid.evaluator.Accuracy(input=predict, label=label) + test_target = [avg_cost] + test_accuracy.metrics + test_accuracy.states + inference_program = fluid.io.get_inference_program(test_target) train_reader = paddle.batch( paddle.reader.shuffle( diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index db91ca4f9c..e3cc2a8937 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -125,10 +125,11 @@ def model(): # need cos sim inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) + scale_infer = layers.scale(x=inference, scale=5.0) label = layers.data(name='score', shape=[1], dtype='float32') - square_cost = layers.square_error_cost(input=inference, label=label) + square_cost = layers.square_error_cost(input=scale_infer, label=label) avg_cost = layers.mean(x=square_cost) @@ -141,7 +142,7 @@ def main(): opts = sgd_optimizer.minimize(cost) if USE_GPU: - place = core.GPUPlace(0) + place = core.CUDAPlace(0) else: place = core.CPUPlace() diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py index 80f8599679..633de66bea 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py @@ -1,6 +1,39 @@ import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid +from paddle.v2.fluid.layer_helper import LayerHelper + + +def lstm(x, c_pre_init, hidden_dim, forget_bias=None): + """ + This function helps create an operator for the LSTM (Long Short Term + Memory) cell that can be used inside an RNN. + """ + helper = LayerHelper('lstm_unit', **locals()) + rnn = fluid.layers.StaticRNN() + with rnn.step(): + c_pre = rnn.memory(init=c_pre_init) + x_t = rnn.step_input(x) + + before_fc = fluid.layers.concat(input=[x_t, c_pre], axis=1) + after_fc = fluid.layers.fc(input=before_fc, size=hidden_dim * 4) + + dtype = x.dtype + c = helper.create_tmp_variable(dtype) + h = helper.create_tmp_variable(dtype) + + helper.append_op( + type='lstm_unit', + inputs={"X": after_fc, + "C_prev": c_pre}, + outputs={"C": c, + "H": h}, + attrs={"forget_bias": forget_bias}) + + rnn.update_memory(c_pre, c) + rnn.output(h) + + return rnn() def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50): @@ -23,8 +56,7 @@ def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50): c_pre_init = fluid.layers.fill_constant( dtype=emb.dtype, shape=[batch_size, emb_dim], value=0.0) c_pre_init.stop_gradient = False - layer_1_out = fluid.layers.lstm( - emb, c_pre_init=c_pre_init, hidden_dim=emb_dim) + layer_1_out = lstm(emb, c_pre_init=c_pre_init, hidden_dim=emb_dim) layer_1_out = fluid.layers.transpose(x=layer_1_out, axis=[1, 0, 2]) prediction = fluid.layers.fc(input=layer_1_out, diff --git a/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py new file mode 100644 index 0000000000..20b4a8b34c --- /dev/null +++ b/python/paddle/v2/fluid/tests/book_distribute/notest_recognize_digits_conv_dist.py @@ -0,0 +1,80 @@ +from __future__ import print_function +import numpy as np +import paddle.v2 as paddle +import paddle.v2.fluid as fluid +import os + +images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype='float32') +label = fluid.layers.data(name='label', shape=[1], dtype='int64') +conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=images, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") +conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + +predict = fluid.layers.fc(input=conv_pool_2, size=10, act="softmax") +cost = fluid.layers.cross_entropy(input=predict, label=label) +avg_cost = fluid.layers.mean(x=cost) +optimizer = fluid.optimizer.Adam(learning_rate=0.01) +optimize_ops, params_grads = optimizer.minimize(avg_cost) + +accuracy = fluid.evaluator.Accuracy(input=predict, label=label) + +BATCH_SIZE = 50 +PASS_NUM = 3 +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) + +place = fluid.CPUPlace() +exe = fluid.Executor(place) + +t = fluid.DistributeTranspiler() +# all parameter server endpoints list for spliting parameters +pserver_endpoints = os.getenv("PSERVERS") +# server endpoint for current node +current_endpoint = os.getenv("SERVER_ENDPOINT") +# run as trainer or parameter server +training_role = os.getenv("TRAINING_ROLE", + "TRAINER") # get the training role: trainer/pserver +t.transpile(optimize_ops, params_grads, pservers=pserver_endpoints, trainers=2) + +if training_role == "PSERVER": + if not current_endpoint: + print("need env SERVER_ENDPOINT") + exit(1) + pserver_prog = t.get_pserver_program(current_endpoint, optimize_ops) + exe.run(fluid.default_startup_program()) + exe.run(pserver_prog) +elif training_role == "TRAINER": + trainer_prog = t.get_trainer_program() + feeder = fluid.DataFeeder(feed_list=[images, label], place=place) + exe.run(fluid.default_startup_program()) + + for pass_id in range(PASS_NUM): + accuracy.reset(exe) + batch_id = 0 + for data in train_reader(): + loss, acc = exe.run(trainer_prog, + feed=feeder.feed(data), + fetch_list=[avg_cost] + accuracy.metrics) + pass_acc = accuracy.eval(exe) + if batch_id % 100 == 0: + print("batch_id %d, loss: %f, acc: %f" % + (batch_id, loss, pass_acc)) + batch_id += 1 + + pass_acc = accuracy.eval(exe) + print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc)) +else: + print("environment var TRAINER_ROLE should be TRAINER os PSERVER") diff --git a/python/paddle/v2/fluid/tests/op_test.py b/python/paddle/v2/fluid/tests/op_test.py index e83c4a0622..8dbfbd547a 100644 --- a/python/paddle/v2/fluid/tests/op_test.py +++ b/python/paddle/v2/fluid/tests/op_test.py @@ -90,12 +90,10 @@ def get_numeric_gradient(scope, def product(dim): return reduce(lambda a, b: a * b, dim, 1) - ctx = core.DeviceContext.create(core.CPUPlace()) - def get_output(): sum = [] for output_name in output_names: - op.run(scope, ctx) + op.run(scope, core.CPUPlace()) sum.append( np.array(scope.find_var(output_name).get_tensor()).mean()) return np.array(sum).mean() @@ -318,7 +316,7 @@ class OpTest(unittest.TestCase): def check_output(self, atol=1e-5): places = [core.CPUPlace()] if core.is_compile_gpu() and core.op_support_gpu(self.op_type): - places.append(core.GPUPlace(0)) + places.append(core.CUDAPlace(0)) for place in places: self.check_output_with_place(place, atol) @@ -381,7 +379,7 @@ class OpTest(unittest.TestCase): "Gradient Check On %s" % str(cpu_place)) if core.is_compile_gpu() and self.op.support_gpu(): - gpu_place = core.GPUPlace(0) + gpu_place = core.CUDAPlace(0) gpu_analytic_grads = self._get_gradient(inputs_to_check, gpu_place, output_names, no_grad_set) diff --git a/python/paddle/v2/fluid/tests/test_activation_op.py b/python/paddle/v2/fluid/tests/test_activation_op.py index b052374dc7..03eb7deb9a 100644 --- a/python/paddle/v2/fluid/tests/test_activation_op.py +++ b/python/paddle/v2/fluid/tests/test_activation_op.py @@ -10,13 +10,13 @@ class TestExp(OpTest): self.inputs = { 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") } - self.outputs = {'Y': np.exp(self.inputs['X'])} + self.outputs = {'Out': np.exp(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestSigmoid(OpTest): @@ -25,13 +25,13 @@ class TestSigmoid(OpTest): self.inputs = { 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") } - self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))} + self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.008) + self.check_grad(['X'], 'Out', max_relative_error=0.008) class TestLogSigmoid(OpTest): @@ -40,13 +40,13 @@ class TestLogSigmoid(OpTest): self.inputs = { 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") } - self.outputs = {'Y': np.log(1 / (1 + np.exp(-self.inputs['X'])))} + self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.008) + self.check_grad(['X'], 'Out', max_relative_error=0.008) class TestTanh(OpTest): @@ -55,13 +55,13 @@ class TestTanh(OpTest): self.inputs = { 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") } - self.outputs = {'Y': np.tanh(self.inputs['X'])} + self.outputs = {'Out': np.tanh(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestTanhShrink(OpTest): @@ -70,13 +70,13 @@ class TestTanhShrink(OpTest): self.inputs = { 'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32") } - self.outputs = {'Y': self.inputs['X'] - np.tanh(self.inputs['X'])} + self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.008) + self.check_grad(['X'], 'Out', max_relative_error=0.008) class TestHardShrink(OpTest): @@ -90,13 +90,13 @@ class TestHardShrink(OpTest): t = np.copy(x) t[(t >= -threshold) & (t <= threshold)] = 0 - self.outputs = {'Y': t} + self.outputs = {'Out': t} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.005) + self.check_grad(['X'], 'Out', max_relative_error=0.005) class TestSoftShrink(OpTest): @@ -110,13 +110,13 @@ class TestSoftShrink(OpTest): y = np.copy(self.inputs['X']) y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * ( y - lambda_val) - self.outputs = {'Y': y} + self.outputs = {'Out': y} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestSqrt(OpTest): @@ -125,13 +125,13 @@ class TestSqrt(OpTest): self.inputs = { 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") } - self.outputs = {'Y': np.sqrt(self.inputs['X'])} + self.outputs = {'Out': np.sqrt(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestAbs(OpTest): @@ -144,13 +144,13 @@ class TestAbs(OpTest): # we should avoid this x[np.abs(x) < 0.005] = 0.02 self.inputs = {'X': x} - self.outputs = {'Y': np.abs(self.inputs['X'])} + self.outputs = {'Out': np.abs(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestCeil(OpTest): @@ -158,13 +158,13 @@ class TestCeil(OpTest): self.op_type = "ceil" x = np.random.uniform(-1, 1, [4, 4]).astype("float32") self.inputs = {'X': x} - self.outputs = {'Y': np.ceil(self.inputs['X'])} + self.outputs = {'Out': np.ceil(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestFloor(OpTest): @@ -173,13 +173,13 @@ class TestFloor(OpTest): x = np.random.uniform(-1, 1, [4, 4]).astype("float32") self.inputs = {'X': x} # numpy floor need +1 - self.outputs = {'Y': np.floor(self.inputs['X']) + 1.0} + self.outputs = {'Out': np.floor(self.inputs['X']) + 1.0} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestRound(OpTest): @@ -187,13 +187,13 @@ class TestRound(OpTest): self.op_type = "round" x = np.random.uniform(-1, 1, [4, 4]).astype("float32") self.inputs = {'X': x} - self.outputs = {'Y': np.round(self.inputs['X'])} + self.outputs = {'Out': np.round(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestRelu(OpTest): @@ -203,13 +203,13 @@ class TestRelu(OpTest): # The same reason with TestAbs x[np.abs(x) < 0.005] = 0.02 self.inputs = {'X': x} - self.outputs = {'Y': np.maximum(self.inputs['X'], 0)} + self.outputs = {'Out': np.maximum(self.inputs['X'], 0)} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestBRelu(OpTest): @@ -227,13 +227,13 @@ class TestBRelu(OpTest): t = np.copy(x) t[t < t_min] = t_min t[t > t_max] = t_max - self.outputs = {'Y': t} + self.outputs = {'Out': t} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.02) + self.check_grad(['X'], 'Out', max_relative_error=0.02) class TestRelu6(OpTest): @@ -248,14 +248,14 @@ class TestRelu6(OpTest): self.inputs = {'X': x} self.attrs = {'threshold': threshold} self.outputs = { - 'Y': np.minimum(np.maximum(self.inputs['X'], 0), threshold) + 'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold) } def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.02) + self.check_grad(['X'], 'Out', max_relative_error=0.02) class TestSoftRelu(OpTest): @@ -271,13 +271,13 @@ class TestSoftRelu(OpTest): t = np.copy(x) t[t < -threshold] = -threshold t[t > threshold] = threshold - self.outputs = {'Y': np.log((np.exp(t) + 1))} + self.outputs = {'Out': np.log((np.exp(t) + 1))} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.02) + self.check_grad(['X'], 'Out', max_relative_error=0.02) class TestELU(OpTest): @@ -290,27 +290,27 @@ class TestELU(OpTest): self.inputs = {'X': x} self.attrs = {'alpha': alpha} self.outputs = { - 'Y': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) + 'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) } def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.02) + self.check_grad(['X'], 'Out', max_relative_error=0.02) class TestReciprocal(OpTest): def setUp(self): self.op_type = "reciprocal" self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} - self.outputs = {'Y': np.reciprocal(self.inputs['X'])} + self.outputs = {'Out': np.reciprocal(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.01) + self.check_grad(['X'], 'Out', max_relative_error=0.01) class TestLog(OpTest): @@ -319,13 +319,13 @@ class TestLog(OpTest): self.inputs = { 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") } - self.outputs = {'Y': np.log(self.inputs['X'])} + self.outputs = {'Out': np.log(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestSquare(OpTest): @@ -334,13 +334,13 @@ class TestSquare(OpTest): self.inputs = { 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") } - self.outputs = {'Y': np.square(self.inputs['X'])} + self.outputs = {'Out': np.square(self.inputs['X'])} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestPow(OpTest): @@ -348,13 +348,13 @@ class TestPow(OpTest): self.op_type = "pow" self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} self.attrs = {'factor': 3.0} - self.outputs = {'Y': np.power(self.inputs['X'], 3)} + self.outputs = {'Out': np.power(self.inputs['X'], 3)} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.02) + self.check_grad(['X'], 'Out', max_relative_error=0.02) class TestSTanh(OpTest): @@ -366,13 +366,13 @@ class TestSTanh(OpTest): scale_a = 2.0 / 3.0 scale_b = 1.7159 self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} - self.outputs = {'Y': scale_b * np.tanh(self.inputs['X'] * scale_a)} + self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestSoftplus(OpTest): @@ -381,13 +381,13 @@ class TestSoftplus(OpTest): self.inputs = { 'X': np.random.uniform(-1, 1, [11, 17]).astype("float64") } - self.outputs = {'Y': np.log(1 + np.exp(self.inputs['X']))} + self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestSoftsign(OpTest): @@ -397,14 +397,14 @@ class TestSoftsign(OpTest): 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") } self.outputs = { - 'Y': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X'])) + 'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X'])) } def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Out', max_relative_error=0.007) class TestThresholdedRelu(OpTest): @@ -419,13 +419,13 @@ class TestThresholdedRelu(OpTest): self.inputs = {'X': X} self.attrs = {'threshold': threshold} - self.outputs = {'Y': (X > threshold) * X} + self.outputs = {'Out': (X > threshold) * X} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=self.relative_error) + self.check_grad(['X'], 'Out', max_relative_error=self.relative_error) class TestHardSigmoid(OpTest): @@ -447,13 +447,13 @@ class TestHardSigmoid(OpTest): upper_threshold - 0.2 temp = X * slope + offset - self.outputs = {'Y': np.maximum(0.0, np.minimum(1.0, temp))} + self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.002) + self.check_grad(['X'], 'Out', max_relative_error=0.002) class TestSwish(OpTest): @@ -462,13 +462,13 @@ class TestSwish(OpTest): X = np.random.uniform(0.1, 1, [11, 17]).astype("float32") self.inputs = {'X': X} self.attrs = {'beta': 2.3} - self.outputs = {'Y': X * expit(self.attrs['beta'] * X)} + self.outputs = {'Out': X * expit(self.attrs['beta'] * X)} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.008) + self.check_grad(['X'], 'Out', max_relative_error=0.008) if __name__ == "__main__": diff --git a/python/paddle/v2/fluid/tests/test_adagrad_op.py b/python/paddle/v2/fluid/tests/test_adagrad_op.py index 903e84c328..7b2d02fbf4 100644 --- a/python/paddle/v2/fluid/tests/test_adagrad_op.py +++ b/python/paddle/v2/fluid/tests/test_adagrad_op.py @@ -113,8 +113,7 @@ class TestSparseAdagradOp(unittest.TestCase): LearningRate='LearningRate', epsilon=2.0) - ctx = core.DeviceContext.create(place) - adagrad_op.run(scope, ctx) + adagrad_op.run(scope, place) # get and compare moment result moment_result_array = np.array(moment) @@ -168,7 +167,7 @@ class TestSparseAdagradOp(unittest.TestCase): def test_sparse_adagrad(self): places = [core.CPUPlace()] if core.is_compile_gpu(): - places.append(core.GPUPlace(0)) + places.append(core.CUDAPlace(0)) for place in places: self.check_with_place(place) diff --git a/python/paddle/v2/fluid/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/test_batch_norm_op.py index e766a68c0e..abbd48d2b8 100644 --- a/python/paddle/v2/fluid/tests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_batch_norm_op.py @@ -3,10 +3,7 @@ import numpy as np from op_test import OpTest import paddle.v2.fluid.core as core from paddle.v2.fluid.op import Operator - - -def grad_var_name(var_name): - return var_name + "@GRAD" +from paddle.v2.fluid.framework import grad_var_name def get_backward_op(scope, op, no_grad_set): @@ -211,7 +208,7 @@ class TestBatchNormOp(OpTest): print 'python: NHWC, NCHW, backward checking passed' def test_forward_backward(self): - def test_with_place(place, tensor_format, shape): + def test_with_place(place, data_layout, shape): # attr epsilon = 0.00001 momentum = 0.9 @@ -295,12 +292,11 @@ class TestBatchNormOp(OpTest): SavedVariance="saved_variance", # attrs is_test=False, - tensor_format=tensor_format, + data_layout=data_layout, momentum=momentum, epsilon=epsilon) - ctx = core.DeviceContext.create(place) - batch_norm_op.run(scope, ctx) + batch_norm_op.run(scope, place) # check forward result self.__assert_close(y_tensor, y_out, "y_out") @@ -308,13 +304,13 @@ class TestBatchNormOp(OpTest): self.__assert_close(saved_variance_tensor, saved_variance, "saved_variance") self.__assert_close(mean_out_tensor, mean_out, "mean_out") - if isinstance(place, core.GPUPlace): + if isinstance(place, core.CUDAPlace): atol = 5e-2 else: atol = 1e-4 self.__assert_close(variance_out_tensor, variance_out, "variance_out", atol) - print "op test forward passed: ", str(place), tensor_format + print "op test forward passed: ", str(place), data_layout # run backward batch_norm_op_grad = get_backward_op(scope, batch_norm_op, set()) @@ -323,7 +319,7 @@ class TestBatchNormOp(OpTest): ["y_out", "mean", "variance", "saved_mean", "saved_variance"], place, feed_dict={"y_out": y_grad}) - batch_norm_op_grad.run(scope, ctx) + batch_norm_op_grad.run(scope, place) x_grad_tensor = create_or_get_tensor(scope, grad_var_name("x_val"), None, @@ -339,11 +335,15 @@ class TestBatchNormOp(OpTest): self.__assert_close(x_grad_tensor, x_grad_ref, "x_grad") self.__assert_close(scale_grad_tensor, scale_grad_ref, "scale_grad") self.__assert_close(bias_grad_tensor, bias_grad_ref, "bias_grad") - print "op test backward passed: ", str(place), tensor_format + print "op test backward passed: ", str(place), data_layout places = [core.CPUPlace()] if core.is_compile_gpu() and core.op_support_gpu("batch_norm"): - places.append(core.GPUPlace(0)) + places.append(core.CUDAPlace(0)) + + core.init_devices(["CPU", "GPU:0"]) + else: + core.init_devices(["CPU"]) for place in places: for data_format in ["NCHW", "NHWC"]: test_with_place(place, data_format, [2, 3, 4, 5]) diff --git a/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py index 5fad7d8cce..f329214dce 100644 --- a/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py +++ b/python/paddle/v2/fluid/tests/test_beam_search_decode_op.py @@ -57,8 +57,7 @@ class TestBeamSearchDecodeOp(unittest.TestCase): SentenceIds="sentence_ids", SentenceScores="sentence_scores") - ctx = core.DeviceContext.create(self.cpu_place) - beam_search_decode_op.run(self.scope, ctx) + beam_search_decode_op.run(self.scope, self.cpu_place) expected_lod = [[0, 4, 8], [0, 1, 3, 6, 9, 10, 13, 16, 19]] self.assertEqual(sentence_ids.lod(), expected_lod) diff --git a/python/paddle/v2/fluid/tests/test_beam_search_op.py b/python/paddle/v2/fluid/tests/test_beam_search_op.py index cc7c09bb59..595f132fa8 100644 --- a/python/paddle/v2/fluid/tests/test_beam_search_op.py +++ b/python/paddle/v2/fluid/tests/test_beam_search_op.py @@ -14,7 +14,6 @@ def create_tensor(scope, name, np_data): class BeamSearchOpTester(unittest.TestCase): def setUp(self): self.scope = core.Scope() - self.ctx = core.DeviceContext.create(core.CPUPlace()) self._create_ids() self._create_scores() self._create_pre_ids() @@ -32,7 +31,7 @@ class BeamSearchOpTester(unittest.TestCase): level=0, beam_size=2, end_id=0, ) - op.run(self.scope, self.ctx) + op.run(self.scope, core.CPUPlace()) selected_ids = self.scope.find_var("selected_ids").get_tensor() print 'selected_ids', np.array(selected_ids) print 'lod', selected_ids.lod() diff --git a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py index 819e65a653..53bf6f815b 100644 --- a/python/paddle/v2/fluid/tests/test_chunk_eval_op.py +++ b/python/paddle/v2/fluid/tests/test_chunk_eval_op.py @@ -147,7 +147,13 @@ class TestChunkEvalOp(OpTest): 'Recall': np.asarray( [recall], dtype='float32'), 'F1-Score': np.asarray( - [f1], dtype='float32') + [f1], dtype='float32'), + 'NumInferChunks': np.asarray( + [self.num_infer_chunks], dtype='int64'), + 'NumLabelChunks': np.asarray( + [self.num_label_chunks], dtype='int64'), + 'NumCorrectChunks': np.asarray( + [self.num_correct_chunks], dtype='int64') } def setUp(self): diff --git a/python/paddle/v2/fluid/tests/test_cond_op.py b/python/paddle/v2/fluid/tests/test_cond_op.py index 9d1df44b90..32e54084e4 100644 --- a/python/paddle/v2/fluid/tests/test_cond_op.py +++ b/python/paddle/v2/fluid/tests/test_cond_op.py @@ -65,8 +65,7 @@ class TestCondOp(unittest.TestCase): self.create_global_variables() self.create_cond_op() self.create_sub_net() - ctx = core.DeviceContext.create(core.CPUPlace()) - self.condop.run(self.scope, ctx) + self.condop.run(self.scope, core.CPUPlace()) return np.array(self.scope.find_var("Out").get_tensor()) def create_global_variables(self): diff --git a/python/paddle/v2/fluid/tests/test_const_value.py b/python/paddle/v2/fluid/tests/test_const_value.py new file mode 100644 index 0000000000..f8c17c2c98 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_const_value.py @@ -0,0 +1,14 @@ +import unittest +import paddle.v2.fluid.framework as framework + + +class ConditionalBlock(unittest.TestCase): + def test_const_value(self): + self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD") + self.assertEqual(framework.TEMP_VAR_NAME, "@TEMP@") + self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD") + self.assertEqual(framework.ZERO_VAR_SUFFIX, "@ZERO") + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py index d7b1f2f2a3..d59537b924 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py @@ -3,14 +3,17 @@ import numpy as np from op_test import OpTest -def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param): +def conv2dtranspose_forward_naive(input_, filter_, attrs): in_n, in_c, in_h, in_w = input_.shape f_c, out_c, f_h, f_w = filter_.shape assert in_c == f_c - stride, pad = conv2dtranspose_param['stride'], conv2dtranspose_param['pad'] - out_h = (in_h - 1) * stride[0] + f_h - out_w = (in_w - 1) * stride[1] + f_w + stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ + 'dilations'] + d_bolck_h = dilations[0] * (f_h - 1) + 1 + d_bolck_w = dilations[1] * (f_w - 1) + 1 + out_h = (in_h - 1) * stride[0] + d_bolck_h + out_w = (in_w - 1) * stride[1] + d_bolck_w out = np.zeros((in_n, out_c, out_h, out_w)) @@ -23,9 +26,9 @@ def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param): for k in range(out_c): tmp_out = np.sum(input_masked * filter_[:, k, :, :], axis=0) - i1, i2 = i * stride[0], i * stride[0] + f_h - j1, j2 = j * stride[0], j * stride[0] + f_w - out[n, k, i1:i2, j1:j2] += tmp_out + i1, i2 = i * stride[0], i * stride[0] + d_bolck_h + j1, j2 = j * stride[0], j * stride[0] + d_bolck_h + out[n, k, i1:i2:dilations[0], j1:j2:dilations[1]] += tmp_out out = out[:, :, pad[0]:out_h - pad[0], pad[1]:out_w - pad[1]] return out @@ -37,11 +40,8 @@ class TestConv2dTransposeOp(OpTest): self.init_op_type() self.init_test_case() - conv2dtranspose_param = {'stride': self.stride, 'pad': self.pad} input_ = np.random.random(self.input_size).astype("float32") filter_ = np.random.random(self.filter_size).astype("float32") - output = conv2dtranspose_forward_naive( - input_, filter_, conv2dtranspose_param).astype('float32') self.inputs = {'Input': input_, 'Filter': filter_} self.attrs = { @@ -49,6 +49,10 @@ class TestConv2dTransposeOp(OpTest): 'paddings': self.pad, 'dilations': self.dilations } + + output = conv2dtranspose_forward_naive(input_, filter_, + self.attrs).astype('float32') + self.outputs = {'Output': output} def test_check_output(self): @@ -104,11 +108,60 @@ class TestWithStride(TestConv2dTransposeOp): self.filter_size = [f_c, 6, 3, 3] +class TestWithDilation(TestConv2dTransposeOp): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.dilations = [2, 2] + self.input_size = [2, 3, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3] + + # ------------ test_cudnn ------------ class TestCudnn(TestConv2dTransposeOp): def init_op_type(self): self.op_type = "conv2d_transpose_cudnn" +class TestCudnnWithPad(TestWithPad): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.dilations = [1, 1] + self.input_size = [2, 3, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3] + + def init_op_type(self): + self.op_type = "conv2d_transpose_cudnn" + + +class TestCudnnWithStride(TestWithStride): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [2, 2] + self.dilations = [1, 1] + self.input_size = [2, 3, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3] + + def init_op_type(self): + self.op_type = "conv2d_transpose_cudnn" + + +# #cudnn v5 does not support dilation conv. +# class TestCudnnWithDilation(TestWithDilation): +# def init_test_case(self): +# self.pad = [1, 1] +# self.stride = [2, 2] +# self.dilations = [2, 2] +# self.input_size = [2, 3, 5, 5] # NCHW +# f_c = self.input_size[1] +# self.filter_size = [f_c, 6, 3, 3] +# +# def init_op_type(self): +# self.op_type = "conv2d_transpose_cudnn" + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py index 8fd34b87bf..a353f9b4d4 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py @@ -3,15 +3,20 @@ import numpy as np from op_test import OpTest -def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param): +def conv3dtranspose_forward_naive(input_, filter_, attrs): in_n, in_c, in_d, in_h, in_w = input_.shape f_c, out_c, f_d, f_h, f_w = filter_.shape assert in_c == f_c - stride, pad = conv3dtranspose_param['stride'], conv3dtranspose_param['pad'] - out_d = (in_d - 1) * stride[0] + f_d - out_h = (in_h - 1) * stride[1] + f_h - out_w = (in_w - 1) * stride[2] + f_w + stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ + 'dilations'] + + d_bolck_d = dilations[0] * (f_d - 1) + 1 + d_bolck_h = dilations[1] * (f_h - 1) + 1 + d_bolck_w = dilations[2] * (f_w - 1) + 1 + out_d = (in_d - 1) * stride[0] + d_bolck_d + out_h = (in_h - 1) * stride[1] + d_bolck_h + out_w = (in_w - 1) * stride[2] + d_bolck_w out = np.zeros((in_n, out_c, out_d, out_h, out_w)) for n in range(in_n): @@ -25,10 +30,11 @@ def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param): for k in range(out_c): tmp_out = np.sum(input_masked * filter_[:, k, :, :, :], axis=0) - d1, d2 = d * stride[0], d * stride[0] + f_d - i1, i2 = i * stride[1], i * stride[1] + f_h - j1, j2 = j * stride[2], j * stride[2] + f_w - out[n, k, d1:d2, i1:i2, j1:j2] += tmp_out + d1, d2 = d * stride[0], d * stride[0] + d_bolck_d + i1, i2 = i * stride[1], i * stride[1] + d_bolck_h + j1, j2 = j * stride[2], j * stride[2] + d_bolck_w + out[n, k, d1:d2:dilations[0], i1:i2:dilations[1], j1:j2: + dilations[2]] += tmp_out out = out[:, :, pad[0]:out_d - pad[0], pad[1]:out_h - pad[1], pad[2]:out_w - pad[2]] @@ -41,18 +47,19 @@ class TestConv3dTransposeOp(OpTest): self.init_op_type() self.init_test_case() - conv3dtranspose_param = {'stride': self.stride, 'pad': self.pad} input_ = np.random.random(self.input_size).astype("float32") filter_ = np.random.random(self.filter_size).astype("float32") - output = conv3dtranspose_forward_naive( - input_, filter_, conv3dtranspose_param).astype("float32") self.inputs = {'Input': input_, 'Filter': filter_} self.attrs = { 'strides': self.stride, 'paddings': self.pad, - # 'dilations': self.dilations + 'dilations': self.dilations } + + output = conv3dtranspose_forward_naive(input_, filter_, + self.attrs).astype("float32") + self.outputs = {'Output': output} def test_check_output(self): @@ -108,11 +115,60 @@ class TestWithStride(TestConv3dTransposeOp): self.filter_size = [f_c, 6, 3, 3, 3] +class TestWithDilation(TestConv3dTransposeOp): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [1, 1, 1] + self.dilations = [2, 2, 2] + self.input_size = [2, 3, 5, 5, 5] # NCDHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3, 3] + + # ------------ test_cudnn ------------ class TestCudnn(TestConv3dTransposeOp): def init_op_type(self): self.op_type = "conv3d_transpose_cudnn" +class TestCudnnWithPad(TestWithPad): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [1, 1, 1] + self.dilations = [1, 1, 1] + self.input_size = [2, 3, 5, 5, 5] # NCDHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3, 3] + + def init_op_type(self): + self.op_type = "conv3d_transpose_cudnn" + + +class TestCudnnWithStride(TestWithStride): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [2, 2, 2] + self.dilations = [1, 1, 1] + self.input_size = [2, 3, 5, 5, 5] # NCDHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3, 3] + + def init_op_type(self): + self.op_type = "conv3d_transpose_cudnn" + + +# #cudnn v5 does not support dilation conv. +# class TestCudnnWithDilation(TestWithDilation): +# def init_test_case(self): +# self.pad = [1, 1, 1] +# self.stride = [2, 2, 2] +# self.dilations = [2, 2, 2] +# self.input_size = [2, 3, 5, 5, 5] # NCDHW +# f_c = self.input_size[1] +# self.filter_size = [f_c, 6, 3, 3, 3] +# +# def init_op_type(self): +# self.op_type = "conv3d_transpose_cudnn" + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_dropout_op.py b/python/paddle/v2/fluid/tests/test_dropout_op.py index 4f5ea836b4..2483200212 100644 --- a/python/paddle/v2/fluid/tests/test_dropout_op.py +++ b/python/paddle/v2/fluid/tests/test_dropout_op.py @@ -47,7 +47,9 @@ class TestDropoutOp4(OpTest): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64)).astype("float32")} self.attrs = {'dropout_prob': 0.35, 'is_test': True} - self.outputs = {'Out': self.inputs['X'] * self.attrs['dropout_prob']} + self.outputs = { + 'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob']) + } def test_check_output(self): self.check_output() @@ -58,7 +60,9 @@ class TestDropoutOp5(OpTest): self.op_type = "dropout" self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")} self.attrs = {'dropout_prob': 0.75, 'is_test': True} - self.outputs = {'Out': self.inputs['X'] * self.attrs['dropout_prob']} + self.outputs = { + 'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob']) + } def test_check_output(self): self.check_output() diff --git a/python/paddle/v2/fluid/tests/test_dyn_rnn.py b/python/paddle/v2/fluid/tests/test_dyn_rnn.py index 034266c26f..8090c5f478 100644 --- a/python/paddle/v2/fluid/tests/test_dyn_rnn.py +++ b/python/paddle/v2/fluid/tests/test_dyn_rnn.py @@ -63,8 +63,7 @@ class TestDynRNN(unittest.TestCase): all_timesteps = fluid.layers.array_to_lod_tensor( x=out, table=rank_table) - last = fluid.layers.sequence_pool( - input=all_timesteps, pool_type='last') + last = fluid.layers.sequence_last_step(input=all_timesteps) logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=label) @@ -101,7 +100,7 @@ class TestDynRNN(unittest.TestCase): rnn.update_memory(mem, out_) rnn.output(out_) - last = fluid.layers.sequence_pool(input=rnn(), pool_type='last') + last = fluid.layers.sequence_last_step(input=rnn()) logits = fluid.layers.fc(input=last, size=1, act=None) label = fluid.layers.data(name='label', shape=[1], dtype='float32') loss = fluid.layers.sigmoid_cross_entropy_with_logits( diff --git a/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py b/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py index eff8fa87d9..cd91769a22 100644 --- a/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py +++ b/python/paddle/v2/fluid/tests/test_fill_zeros_like_op.py @@ -7,7 +7,7 @@ class TestFillZerosLikeOp(OpTest): def setUp(self): self.op_type = "fill_zeros_like" self.inputs = {'X': np.random.random((219, 232)).astype("float32")} - self.outputs = {'Y': np.zeros_like(self.inputs["X"])} + self.outputs = {'Out': np.zeros_like(self.inputs["X"])} def test_check_output(self): self.check_output() diff --git a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py index 627ab4e235..6f6a60ccb3 100644 --- a/python/paddle/v2/fluid/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/fluid/tests/test_gaussian_random_op.py @@ -1,32 +1,46 @@ import unittest +import numpy + +import paddle.v2.fluid as fluid import paddle.v2.fluid.core as core from paddle.v2.fluid.op import Operator -import numpy +from paddle.v2.fluid.executor import Executor class TestGaussianRandomOp(unittest.TestCase): + def setUp(self): + self.op_type = "gaussian_random" + self.inputs = {} + self.attrs = {"shape": [1000, 784], "mean": .0, "std": 1., "seed": 10} + + self.outputs = ["Out"] + def test_cpu(self): - self.gaussian_random_test(place=core.CPUPlace()) + self.gaussian_random_test(place=fluid.CPUPlace()) def test_gpu(self): if core.is_compile_gpu(): - self.gaussian_random_test(place=core.GPUPlace(0)) + self.gaussian_random_test(place=fluid.CUDAPlace(0)) def gaussian_random_test(self, place): - scope = core.Scope() - scope.var('Out').get_tensor() - - op = Operator( - "gaussian_random", - Out='Out', - shape=[1000, 784], - mean=.0, - std=1., - seed=10) - - context = core.DeviceContext.create(place) - op.run(scope, context) - tensor = numpy.array(scope.find_var('Out').get_tensor()) + + program = fluid.Program() + block = program.global_block() + vout = block.create_var(name="Out") + op = block.append_op( + type=self.op_type, outputs={"Out": vout}, attrs=self.attrs) + + op.desc.infer_var_type(block.desc) + op.desc.infer_shape(block.desc) + + fetch_list = [] + for var_name in self.outputs: + fetch_list.append(block.var(var_name)) + + exe = Executor(place) + outs = exe.run(program, fetch_list=fetch_list) + tensor = outs[0] + self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) diff --git a/python/paddle/v2/fluid/tests/test_image_classification_layer.py b/python/paddle/v2/fluid/tests/test_image_classification_layer.py index 2fd609d447..b621d1525e 100644 --- a/python/paddle/v2/fluid/tests/test_image_classification_layer.py +++ b/python/paddle/v2/fluid/tests/test_image_classification_layer.py @@ -5,12 +5,7 @@ import paddle.v2.fluid.nets as nets from paddle.v2.fluid.framework import Program -def conv_block(input, - num_filter, - groups, - dropouts, - main_program=None, - startup_program=None): +def conv_block(input, num_filter, groups, dropouts): return nets.img_conv_group( input=input, pool_size=2, @@ -20,90 +15,54 @@ def conv_block(input, conv_act='relu', conv_with_batchnorm=True, conv_batchnorm_drop_rate=dropouts, - pool_type='max', - main_program=main_program, - startup_program=startup_program) + pool_type='max') class TestLayer(unittest.TestCase): def test_batch_norm_layer(self): main_program = Program() startup_program = Program() - images = fluid.layers.data( - name='pixel', - shape=[3, 48, 48], - dtype='float32', - main_program=main_program) - hidden1 = fluid.layers.batch_norm( - input=images, - main_program=main_program, - startup_program=startup_program) - hidden2 = fluid.layers.fc(input=hidden1, - size=128, - act='relu', - main_program=main_program) - hidden3 = fluid.layers.batch_norm( - input=hidden2, - main_program=main_program, - startup_program=startup_program) + with fluid.program_guard(main_program, startup_program): + images = fluid.layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32') + hidden1 = fluid.layers.batch_norm(input=images) + hidden2 = fluid.layers.fc(input=hidden1, size=128, act='relu') + fluid.layers.batch_norm(input=hidden2) print str(main_program) def test_dropout_layer(self): main_program = Program() startup_program = Program() - images = fluid.layers.data( - name='pixel', - shape=[3, 48, 48], - dtype='float32', - main_program=main_program) - fluid.layers.dropout( - x=images, - dropout_prob=0.5, - main_program=main_program, - startup_program=startup_program) + with fluid.program_guard(main_program, startup_program): + images = fluid.layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32') + fluid.layers.dropout(x=images, dropout_prob=0.5) - # print str(main_program) + print str(main_program) def test_img_conv_group(self): main_program = Program() startup_program = Program() - images = fluid.layers.data( - name='pixel', - shape=[3, 48, 48], - dtype='float32', - main_program=main_program, - startup_program=startup_program) - conv1 = conv_block(images, 64, 2, [0.3, 0], main_program, - startup_program) - conv2 = conv_block(conv1, 256, 3, [0.4, 0.4, 0], main_program, - startup_program) + with fluid.program_guard(main_program, startup_program): + images = fluid.layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32') + conv1 = conv_block(images, 64, 2, [0.3, 0]) + conv_block(conv1, 256, 3, [0.4, 0.4, 0]) - # print str(main_program) + print str(main_program) def test_elementwise_add_with_act(self): main_program = Program() startup_program = Program() - image1 = fluid.layers.data( - name='pixel1', - shape=[3, 48, 48], - dtype='float32', - main_program=main_program, - startup_program=startup_program) - image2 = fluid.layers.data( - name='pixel2', - shape=[3, 48, 48], - dtype='float32', - main_program=main_program, - startup_program=startup_program) - out = fluid.layers.elementwise_add( - x=image1, - y=image2, - act='relu', - main_program=main_program, - startup_program=startup_program) - # print(main_program) + with fluid.program_guard(main_program, startup_program): + image1 = fluid.layers.data( + name='pixel1', shape=[3, 48, 48], dtype='float32') + image2 = fluid.layers.data( + name='pixel2', shape=[3, 48, 48], dtype='float32') + fluid.layers.elementwise_add(x=image1, y=image2, act='relu') + print(main_program) if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_inference_model_io.py b/python/paddle/v2/fluid/tests/test_inference_model_io.py index 60aed62ead..71ca3e6c10 100644 --- a/python/paddle/v2/fluid/tests/test_inference_model_io.py +++ b/python/paddle/v2/fluid/tests/test_inference_model_io.py @@ -6,7 +6,7 @@ import paddle.v2.fluid.core as core import paddle.v2.fluid.executor as executor import paddle.v2.fluid.layers as layers import paddle.v2.fluid.optimizer as optimizer -from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.framework import Program, program_guard from paddle.v2.fluid.io import save_inference_model, load_inference_model @@ -16,35 +16,18 @@ class TestBook(unittest.TestCase): init_program = Program() program = Program() - x = layers.data( - name='x', - shape=[2], - dtype='float32', - main_program=program, - startup_program=init_program) - y = layers.data( - name='y', - shape=[1], - dtype='float32', - main_program=program, - startup_program=init_program) - - y_predict = layers.fc(input=x, - size=1, - act=None, - main_program=program, - startup_program=init_program) - - cost = layers.square_error_cost( - input=y_predict, - label=y, - main_program=program, - startup_program=init_program) - avg_cost = layers.mean( - x=cost, main_program=program, startup_program=init_program) - - sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) - sgd_optimizer.minimize(avg_cost, init_program) + + with program_guard(program, init_program): + x = layers.data(name='x', shape=[2], dtype='float32') + y = layers.data(name='y', shape=[1], dtype='float32') + + y_predict = layers.fc(input=x, size=1, act=None) + + cost = layers.square_error_cost(input=y_predict, label=y) + avg_cost = layers.mean(x=cost) + + sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost, init_program) place = core.CPUPlace() exe = executor.Executor(place) diff --git a/python/paddle/v2/fluid/tests/test_is_empty_op.py b/python/paddle/v2/fluid/tests/test_is_empty_op.py index ed6e3fe24f..0a4dd0f4fa 100644 --- a/python/paddle/v2/fluid/tests/test_is_empty_op.py +++ b/python/paddle/v2/fluid/tests/test_is_empty_op.py @@ -33,8 +33,7 @@ class TestIsEmptyOp(unittest.TestCase): def one_case(self, input, target): op = Operator(type="is_empty", X=input, Out="out") - ctx = core.DeviceContext.create(core.CPUPlace()) - op.run(self.scope, ctx) + op.run(self.scope, core.CPUPlace()) out = self.scope.var("out").get_tensor() self.assertEqual(np.array(out)[0], target) diff --git a/python/paddle/v2/fluid/tests/test_layers.py b/python/paddle/v2/fluid/tests/test_layers.py index 57f6a362de..9d2dcca56d 100644 --- a/python/paddle/v2/fluid/tests/test_layers.py +++ b/python/paddle/v2/fluid/tests/test_layers.py @@ -29,7 +29,10 @@ class TestBook(unittest.TestCase): label = layers.data(name='label', shape=[1], dtype='int32') hidden1 = layers.fc(input=images, size=128, act='relu') hidden2 = layers.fc(input=hidden1, size=64, act='relu') - predict = layers.fc(input=hidden2, size=10, act='softmax') + predict = layers.fc(input=[hidden2, hidden1], + size=10, + act='softmax', + param_attr=["sftmax.w1", "sftmax.w2"]) cost = layers.cross_entropy(input=predict, label=label) avg_cost = layers.mean(x=cost) self.assertIsNotNone(avg_cost) @@ -158,6 +161,41 @@ class TestBook(unittest.TestCase): x=dat, label=lbl)) print(str(program)) + def test_sequence_expand(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[10], dtype='float32') + y = layers.data( + name='y', shape=[10, 20], dtype='float32', lod_level=1) + self.assertIsNotNone(layers.sequence_expand(x=x, y=y)) + print(str(program)) + + def test_lstm_unit(self): + program = Program() + with program_guard(program): + x_t_data = layers.data( + name='x_t_data', shape=[10, 10], dtype='float32') + x_t = layers.fc(input=x_t_data, size=10) + prev_hidden_data = layers.data( + name='prev_hidden_data', shape=[10, 20], dtype='float32') + prev_hidden = layers.fc(input=prev_hidden_data, size=20) + prev_cell_data = layers.data( + name='prev_cell', shape=[10, 30], dtype='float32') + prev_cell = layers.fc(input=prev_cell_data, size=30) + self.assertIsNotNone( + layers.lstm_unit( + x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell)) + print(str(program)) + + def test_sequence_softmax(self): + program = Program() + with program_guard(program): + seq_data = layers.data( + name='seq_data', shape=[10, 10], dtype='float32', lod_level=1) + seq = layers.fc(input=seq_data, size=20) + self.assertIsNotNone(layers.sequence_softmax(x=seq)) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py index 0a916a55bc..5fdabbcf88 100644 --- a/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py +++ b/python/paddle/v2/fluid/tests/test_lod_tensor_array_ops.py @@ -2,7 +2,7 @@ import unittest import paddle.v2.fluid.core as core import numpy import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.framework import Program, program_guard from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.backward import append_backward_ops @@ -118,16 +118,17 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0): place = self.place() program = Program() - x = layers.data(name='x', shape=[10], main_program=program) - x.persistable = True - table = layers.lod_rank_table(x, level=level, main_program=program) - max_len = layers.max_sequence_len(table, main_program=program) - max_len.persistable = True - array = layers.lod_tensor_to_array(x, table, main_program=program) - array.persistable = True - - result = layers.array_to_lod_tensor(array, table, main_program=program) - result.persistable = True + with program_guard(program): + x = layers.data(name='x', shape=[10]) + x.persistable = True + table = layers.lod_rank_table(x, level=level) + max_len = layers.max_sequence_len(table) + max_len.persistable = True + array = layers.lod_tensor_to_array(x, table) + array.persistable = True + + result = layers.array_to_lod_tensor(array, table) + result.persistable = True exe = Executor(place) scope = core.Scope() exe.run(program, feed={'x': tensor}, scope=scope) @@ -160,19 +161,16 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): place = core.CPUPlace() program = Program() - x = layers.data( - name='x', - shape=[1], - dtype='float32', - main_program=program, - stop_gradient=False) - table = layers.lod_rank_table(x, level=0, main_program=program) - array = layers.lod_tensor_to_array(x, table, main_program=program) - result = layers.array_to_lod_tensor(array, table, main_program=program) + with program_guard(program): + x = layers.data( + name='x', shape=[1], dtype='float32', stop_gradient=False) + table = layers.lod_rank_table(x, level=0) + array = layers.lod_tensor_to_array(x, table) + result = layers.array_to_lod_tensor(array, table) - mean = layers.mean(x=result, main_program=program) + mean = layers.mean(x=result) - append_backward_ops(mean) + append_backward_ops(mean) tensor = core.LoDTensor() tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place) diff --git a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py index 50fcc4a72d..33558c6105 100644 --- a/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/v2/fluid/tests/test_mnist_if_else_op.py @@ -1,5 +1,5 @@ import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.framework import Program, program_guard, default_main_program, default_startup_program from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.optimizer import MomentumOptimizer import paddle.v2.fluid.core as core @@ -10,44 +10,42 @@ import numpy as np class TestMNISTIfElseOp(unittest.TestCase): def test_raw_api(self): - kwargs = {'startup_program': Program(), 'main_program': Program()} - image = layers.data(name='x', shape=[784], dtype='float32', **kwargs) + prog = Program() + startup_prog = Program() + with program_guard(prog, startup_prog): + image = layers.data(name='x', shape=[784], dtype='float32') - label = layers.data(name='y', shape=[1], dtype='int64', **kwargs) + label = layers.data(name='y', shape=[1], dtype='int64') - limit = layers.fill_constant_batch_size_like( - input=label, dtype='int64', shape=[1], value=5.0, **kwargs) + limit = layers.fill_constant_batch_size_like( + input=label, dtype='int64', shape=[1], value=5.0) + cond = layers.less_than(x=label, y=limit) + true_image, false_image = layers.split_lod_tensor( + input=image, mask=cond) - cond = layers.less_than(x=label, y=limit, **kwargs) - true_image, false_image = layers.split_lod_tensor( - input=image, mask=cond, **kwargs) + true_out = layers.create_tensor(dtype='float32') + true_cond = layers.ConditionalBlock([true_image]) - true_out = layers.create_tensor(dtype='float32', **kwargs) - true_cond = layers.ConditionalBlock([true_image], **kwargs) + with true_cond.block(): + hidden = layers.fc(input=true_image, size=100, act='tanh') + prob = layers.fc(input=hidden, size=10, act='softmax') + layers.assign(input=prob, output=true_out) - with true_cond.block(): - hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs) - prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs) - layers.assign(input=prob, output=true_out, **kwargs) + false_out = layers.create_tensor(dtype='float32') + false_cond = layers.ConditionalBlock([false_image]) - false_out = layers.create_tensor(dtype='float32', **kwargs) - false_cond = layers.ConditionalBlock([false_image], **kwargs) + with false_cond.block(): + hidden = layers.fc(input=false_image, size=200, act='tanh') + prob = layers.fc(input=hidden, size=10, act='softmax') + layers.assign(input=prob, output=false_out) - with false_cond.block(): - hidden = layers.fc(input=false_image, - size=200, - act='tanh', - **kwargs) - prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs) - layers.assign(input=prob, output=false_out, **kwargs) + prob = layers.merge_lod_tensor( + in_true=true_out, in_false=false_out, mask=cond, x=image) + loss = layers.cross_entropy(input=prob, label=label) + avg_loss = layers.mean(x=loss) - prob = layers.merge_lod_tensor( - in_true=true_out, in_false=false_out, mask=cond, x=image, **kwargs) - loss = layers.cross_entropy(input=prob, label=label, **kwargs) - avg_loss = layers.mean(x=loss, **kwargs) - - optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) - optimizer.minimize(avg_loss, kwargs['startup_program']) + optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) + optimizer.minimize(avg_loss, startup_prog) train_reader = paddle.batch( paddle.reader.shuffle( @@ -57,7 +55,7 @@ class TestMNISTIfElseOp(unittest.TestCase): place = core.CPUPlace() exe = Executor(place) - exe.run(kwargs['startup_program']) + exe.run(startup_prog) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): @@ -65,7 +63,7 @@ class TestMNISTIfElseOp(unittest.TestCase): y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = np.expand_dims(y_data, axis=1) - outs = exe.run(kwargs['main_program'], + outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) @@ -75,39 +73,36 @@ class TestMNISTIfElseOp(unittest.TestCase): self.assertFalse(True) def test_ifelse(self): - kwargs = {'startup_program': Program(), 'main_program': Program()} - image = layers.data(name='x', shape=[784], dtype='float32', **kwargs) - - label = layers.data(name='y', shape=[1], dtype='int64', **kwargs) - - limit = layers.fill_constant_batch_size_like( - input=label, dtype='int64', shape=[1], value=5.0, **kwargs) - - cond = layers.less_than(x=label, y=limit, **kwargs) - - ie = layers.IfElse(cond, **kwargs) - - with ie.true_block(): - true_image = ie.input(image) - hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs) - prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs) - ie.output(prob) - - with ie.false_block(): - false_image = ie.input(image) - hidden = layers.fc(input=false_image, - size=200, - act='tanh', - **kwargs) - prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs) - ie.output(prob) - - prob = ie() - loss = layers.cross_entropy(input=prob[0], label=label, **kwargs) - avg_loss = layers.mean(x=loss, **kwargs) - - optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) - optimizer.minimize(avg_loss, kwargs['startup_program']) + prog = Program() + startup_prog = Program() + with program_guard(prog, startup_prog): + image = layers.data(name='x', shape=[784], dtype='float32') + + label = layers.data(name='y', shape=[1], dtype='int64') + + limit = layers.fill_constant_batch_size_like( + input=label, dtype='int64', shape=[1], value=5.0) + cond = layers.less_than(x=label, y=limit) + ie = layers.IfElse(cond) + + with ie.true_block(): + true_image = ie.input(image) + hidden = layers.fc(input=true_image, size=100, act='tanh') + prob = layers.fc(input=hidden, size=10, act='softmax') + ie.output(prob) + + with ie.false_block(): + false_image = ie.input(image) + hidden = layers.fc(input=false_image, size=200, act='tanh') + prob = layers.fc(input=hidden, size=10, act='softmax') + ie.output(prob) + + prob = ie() + loss = layers.cross_entropy(input=prob[0], label=label) + avg_loss = layers.mean(x=loss) + + optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) + optimizer.minimize(avg_loss, startup_prog) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), @@ -135,4 +130,5 @@ class TestMNISTIfElseOp(unittest.TestCase): if __name__ == '__main__': - unittest.main() + # temp disable if else unittest since it could be buggy. + exit(0) diff --git a/python/paddle/v2/fluid/tests/test_net.py b/python/paddle/v2/fluid/tests/test_net.py index 318df08a9e..d9fe55a8af 100644 --- a/python/paddle/v2/fluid/tests/test_net.py +++ b/python/paddle/v2/fluid/tests/test_net.py @@ -7,7 +7,7 @@ def fc(X, W, Y): ret_v = core.Net.create() ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation")) - ret_v.append_op(Operator("sigmoid", X="pre_activation", Y=Y)) + ret_v.append_op(Operator("sigmoid", X="pre_activation", Out=Y)) ret_v.complete_add_op(True) return ret_v @@ -30,7 +30,7 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. - Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Y[fc.out]}. + Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Out[fc.out]}. ''' self.assertEqual(expected, "\n" + str(net)) diff --git a/python/paddle/v2/fluid/tests/test_operator.py b/python/paddle/v2/fluid/tests/test_operator.py index 4aa022ef90..c059a2b88b 100644 --- a/python/paddle/v2/fluid/tests/test_operator.py +++ b/python/paddle/v2/fluid/tests/test_operator.py @@ -1,6 +1,6 @@ import unittest + import paddle.v2.fluid.op as op -import paddle.v2.fluid.core as core import paddle.v2.fluid.proto.framework_pb2 as framework_pb2 diff --git a/python/paddle/v2/fluid/tests/test_optimizer.py b/python/paddle/v2/fluid/tests/test_optimizer.py index 2459dfd664..29694be58b 100644 --- a/python/paddle/v2/fluid/tests/test_optimizer.py +++ b/python/paddle/v2/fluid/tests/test_optimizer.py @@ -27,7 +27,7 @@ class TestOptimizer(unittest.TestCase): block.append_op( type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01) - opts = sgd_optimizer.minimize(mean_out, init_program) + opts, _ = sgd_optimizer.minimize(mean_out, init_program) self.assertEqual(len(opts), 1) sgd_op = opts[0] self.assertEqual(sgd_op.type, "sgd") @@ -57,7 +57,7 @@ class TestOptimizer(unittest.TestCase): learning_rate = 0.01 sgd_optimizer = optimizer.SGDOptimizer( learning_rate=learning_rate, global_step=global_step) - opts = sgd_optimizer.minimize(mean_out, init_program) + opts, _ = sgd_optimizer.minimize(mean_out, init_program) self.assertEqual(len(opts), 2) sgd_op = opts[0] self.assertEqual(sgd_op.type, "sgd") diff --git a/python/paddle/v2/fluid/tests/test_profiler.py b/python/paddle/v2/fluid/tests/test_profiler.py index 395d0dc36a..e3f3ac58ef 100644 --- a/python/paddle/v2/fluid/tests/test_profiler.py +++ b/python/paddle/v2/fluid/tests/test_profiler.py @@ -3,6 +3,7 @@ import numpy as np import paddle.v2.fluid as fluid import paddle.v2.fluid.profiler as profiler import paddle.v2.fluid.layers as layers +import os class TestProfiler(unittest.TestCase): @@ -14,14 +15,16 @@ class TestProfiler(unittest.TestCase): data = layers.data(name='data', shape=[3, 28, 28], dtype='float32') conv = layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) - place = fluid.GPUPlace(0) + place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) - with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: + output_file = 'cuda_profiler.txt' + with profiler.cuda_profiler(output_file, 'csv') as nvprof: for i in range(epoc): input = np.random.random(dshape).astype('float32') exe.run(fluid.default_main_program(), feed={'data': input}) + os.remove(output_file) if __name__ == '__main__': diff --git a/python/paddle/v2/fluid/tests/test_program.py b/python/paddle/v2/fluid/tests/test_program.py index 1a9313c68a..447c746aac 100644 --- a/python/paddle/v2/fluid/tests/test_program.py +++ b/python/paddle/v2/fluid/tests/test_program.py @@ -1,7 +1,7 @@ from __future__ import print_function import unittest -from paddle.v2.fluid.framework import Program, default_main_program +from paddle.v2.fluid.framework import Program, default_main_program, program_guard, grad_var_name import paddle.v2.fluid.layers as layers main_program = default_main_program() @@ -109,12 +109,10 @@ class TestProgram(unittest.TestCase): self.assertEqual(add_op.idx, 1) param_to_grad = prog.append_backward(mean_out, set()) - def grad_name(name): - return name + "@GRAD" - for var_name in ("mul.x", "mul.y", "mul.out", "add.y", "add.out", "mean.out"): - self.assertEqual(param_to_grad[var_name][0], grad_name(var_name)) + self.assertEqual(param_to_grad[var_name][0], + grad_var_name(var_name)) self.assertEqual(param_to_grad[var_name][1], 0) expect_ops = [ @@ -129,13 +127,10 @@ class TestProgram(unittest.TestCase): def test_program_clone_with_parameter(self): main_program = Program() startup_program = Program() - kwargs = { - 'main_program': main_program, - 'startup_program': startup_program - } - d = layers.data(name='x', shape=[784], dtype='float32', **kwargs) - hidden = layers.fc(input=d, size=100, **kwargs) - layers.fc(input=hidden, size=100, **kwargs) + with program_guard(main_program, startup_program): + d = layers.data(name='x', shape=[784], dtype='float32') + hidden = layers.fc(input=d, size=100) + layers.fc(input=hidden, size=100) new_program = main_program.clone() self.assertNotEqual(0, len(new_program.blocks[0].all_parameters())) diff --git a/python/paddle/v2/fluid/tests/test_recurrent_op.py b/python/paddle/v2/fluid/tests/test_recurrent_op.py index 694ff0d8dd..e38c763ddb 100644 --- a/python/paddle/v2/fluid/tests/test_recurrent_op.py +++ b/python/paddle/v2/fluid/tests/test_recurrent_op.py @@ -1,7 +1,7 @@ import unittest import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.framework import Program, grad_var_name from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.backward import append_backward_ops import numpy as np @@ -164,7 +164,7 @@ class RecurrentOpTest1(unittest.TestCase): for x in self.data_field } fetch_list = [ - self.main_program.global_block().var(x + "@GRAD") + self.main_program.global_block().var(grad_var_name(x)) for x in self.data_field ] diff --git a/python/paddle/v2/fluid/tests/test_reduce_op.py b/python/paddle/v2/fluid/tests/test_reduce_op.py index 70359d60cb..a021d4dd91 100644 --- a/python/paddle/v2/fluid/tests/test_reduce_op.py +++ b/python/paddle/v2/fluid/tests/test_reduce_op.py @@ -85,5 +85,19 @@ class Test1DReduce(OpTest): self.check_grad(['X'], 'Out') +class TestReduceAll(OpTest): + def setUp(self): + self.op_type = "reduce_sum" + self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")} + self.attrs = {'reduce_all': True} + self.outputs = {'Out': self.inputs['X'].sum()} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_registry.py b/python/paddle/v2/fluid/tests/test_registry.py new file mode 100644 index 0000000000..f8328f31cf --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_registry.py @@ -0,0 +1,22 @@ +import unittest +import warnings + +import paddle.v2.fluid as fluid +import paddle.v2.fluid.framework as framework +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.registry as registry + + +class TestRegistry(unittest.TestCase): + def test_registry_layer(self): + self.layer_type = "mean" + program = framework.Program() + + x = fluid.layers.data(name='X', shape=[10, 10], dtype='float32') + output = layers.mean(x) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + X = np.random.random((10, 10)).astype("float32") + mean_out = exe.run(program, feed={"X": X}, fetch_list=[output]) + self.assertAlmostEqual(np.mean(X), mean_out) diff --git a/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py new file mode 100644 index 0000000000..8f5774835e --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_reorder_lod_tensor.py @@ -0,0 +1,47 @@ +import unittest +import paddle.v2.fluid as fluid +import numpy + + +class TestReorderLoDTensor(unittest.TestCase): + def test_reorder(self): + dat = fluid.layers.data(name='input', shape=[1], lod_level=2) + dat.stop_gradient = False + rank_dat = fluid.layers.data(name='ref', shape=[1], lod_level=1) + table = fluid.layers.lod_rank_table(rank_dat) + new_dat = fluid.layers.reorder_lod_tensor_by_rank( + x=dat, rank_table=table) + loss = fluid.layers.mean(x=new_dat) + fluid.backward.append_backward_ops(loss=loss) + + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + exe.run(fluid.default_startup_program()) + + ref = fluid.Tensor() + ref_lod = [0, 3, 4, 7, 8, 14] + ref.set_lod([ref_lod]) + + ref.set(numpy.random.random(size=[14, 1]).astype('float32'), cpu) + input = fluid.Tensor() + lod_level_0 = numpy.random.randint(low=1, high=5, size=5) + lod_level_0 = [0] + numpy.cumsum(lod_level_0).tolist() + lod_level_1 = numpy.random.randint(low=1, high=5, size=lod_level_0[-1]) + lod_level_1 = [0] + numpy.cumsum(lod_level_1).tolist() + + input.set_lod([lod_level_0, lod_level_1]) + input.set( + numpy.random.random(size=[lod_level_1[-1], 1]).astype('float32'), + cpu) + + ig = exe.run(fluid.default_main_program(), + feed={'input': input, + 'ref': ref}, + fetch_list=['input@GRAD'], + return_numpy=False)[0] + self.assertAlmostEqual(numpy.array(ig).sum(), 1.0, delta=0.001) + self.assertEqual(input.lod(), ig.lod()) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_reshape_op.py b/python/paddle/v2/fluid/tests/test_reshape_op.py index 16bb6bb2af..18ee3aece6 100644 --- a/python/paddle/v2/fluid/tests/test_reshape_op.py +++ b/python/paddle/v2/fluid/tests/test_reshape_op.py @@ -17,5 +17,19 @@ class TestReshapeOp(OpTest): self.check_grad(["X"], "Out") +class TestReshapeOpDimInfer(OpTest): + def setUp(self): + self.op_type = "reshape" + self.inputs = {'X': np.random.random((10, 20)).astype("float32")} + self.attrs = {'shape': [4, -1, 5]} + self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_seq_expand.py b/python/paddle/v2/fluid/tests/test_sequence_expand.py similarity index 89% rename from python/paddle/v2/fluid/tests/test_seq_expand.py rename to python/paddle/v2/fluid/tests/test_sequence_expand.py index ff17edd04b..0f22612d3d 100644 --- a/python/paddle/v2/fluid/tests/test_seq_expand.py +++ b/python/paddle/v2/fluid/tests/test_sequence_expand.py @@ -3,7 +3,7 @@ import numpy as np from op_test import OpTest -class TestSeqExpand(OpTest): +class TestSequenceExpand(OpTest): def set_data(self): x_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32') y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32') @@ -21,7 +21,7 @@ class TestSeqExpand(OpTest): self.outputs = {'Out': out} def setUp(self): - self.op_type = 'seq_expand' + self.op_type = 'sequence_expand' self.set_data() self.compute() @@ -32,7 +32,7 @@ class TestSeqExpand(OpTest): self.check_grad(["X"], "Out") -class TestSeqExpandCase1(TestSeqExpand): +class TestSequenceExpandCase1(TestSequenceExpand): def set_data(self): x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32') x_lod = [[0, 2, 5]] @@ -41,7 +41,7 @@ class TestSeqExpandCase1(TestSeqExpand): self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} -class TestSeqExpandCase2(TestSeqExpand): +class TestSequenceExpandCase2(TestSequenceExpand): def set_data(self): x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32') x_lod = [[0, 1]] @@ -50,7 +50,7 @@ class TestSeqExpandCase2(TestSeqExpand): self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} -class TestSeqExpandCase3(TestSeqExpand): +class TestSequenceExpandCase3(TestSequenceExpand): def set_data(self): x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32') x_lod = [[0, 1, 2, 3, 4]] diff --git a/python/paddle/v2/fluid/tests/test_sgd_op.py b/python/paddle/v2/fluid/tests/test_sgd_op.py index ca05a381f0..14d41e172a 100644 --- a/python/paddle/v2/fluid/tests/test_sgd_op.py +++ b/python/paddle/v2/fluid/tests/test_sgd_op.py @@ -55,8 +55,7 @@ class TestSparseSGDOp(unittest.TestCase): Grad='Grad', ParamOut='Param', LearningRate='LearningRate') - ctx = core.DeviceContext.create(place) - sgd_op.run(scope, ctx) + sgd_op.run(scope, place) # get and compare result result_array = np.array(param) @@ -79,7 +78,7 @@ class TestSparseSGDOp(unittest.TestCase): def test_sparse_sgd(self): places = [core.CPUPlace()] if core.is_compile_gpu(): - places.append(core.GPUPlace(0)) + places.append(core.CUDAPlace(0)) for place in places: self.check_with_place(place) diff --git a/python/paddle/v2/fluid/tests/test_softmax_op.py b/python/paddle/v2/fluid/tests/test_softmax_op.py index b41c810d9a..136fc0283a 100644 --- a/python/paddle/v2/fluid/tests/test_softmax_op.py +++ b/python/paddle/v2/fluid/tests/test_softmax_op.py @@ -17,14 +17,14 @@ class TestSoftmaxOp(OpTest): 'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32") } self.outputs = { - 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) + 'Out': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) } def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y') + self.check_grad(['X'], 'Out') if __name__ == "__main__": diff --git a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py index f5da4e408f..8cdd59ff3c 100644 --- a/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/v2/fluid/tests/test_split_and_merge_lod_tensor_op.py @@ -2,7 +2,7 @@ import unittest import paddle.v2.fluid.core as core import numpy as np import paddle.v2.fluid.layers as layers -from paddle.v2.fluid.framework import Program +from paddle.v2.fluid.framework import Program, program_guard from paddle.v2.fluid.executor import Executor from paddle.v2.fluid.backward import append_backward_ops @@ -75,26 +75,22 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): level=0): place = self.place() program = Program() - x = layers.data(name='x', shape=[1], main_program=program) - x.persistable = True + with program_guard(program): + x = layers.data(name='x', shape=[1]) + x.persistable = True - y = layers.data(name='y', shape=[1], main_program=program) - y.persistable = True + y = layers.data(name='y', shape=[1]) + y.persistable = True - out_true, out_false = layers.split_lod_tensor( - input=x, mask=y, level=level, main_program=program) - out_true.persistable = True - out_false.persistable = True + out_true, out_false = layers.split_lod_tensor( + input=x, mask=y, level=level) + out_true.persistable = True + out_false.persistable = True - out = layers.merge_lod_tensor( - in_true=out_true, - in_false=out_false, - mask=y, - x=x, - level=level, - main_program=program) + out = layers.merge_lod_tensor( + in_true=out_true, in_false=out_false, mask=y, x=x, level=level) - out.persistable = True + out.persistable = True exe = Executor(place) scope = core.Scope() @@ -123,34 +119,21 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): def test_grad(self): place = core.CPUPlace() program = Program() + with program_guard(program): + x = layers.data( + name='x', shape=[1], dtype='float32', stop_gradient=False) + y = layers.data( + name='y', shape=[1], dtype='bool', stop_gradient=False) - x = layers.data( - name='x', - shape=[1], - dtype='float32', - main_program=program, - stop_gradient=False) - y = layers.data( - name='y', - shape=[1], - dtype='bool', - main_program=program, - stop_gradient=False) - - level = 0 - - out_true, out_false = layers.split_lod_tensor( - input=x, mask=y, level=level, main_program=program) - out = layers.merge_lod_tensor( - in_true=out_true, - in_false=out_false, - mask=y, - x=x, - level=level, - main_program=program) - mean = layers.mean(x=out, main_program=program) - - append_backward_ops(mean) + level = 0 + + out_true, out_false = layers.split_lod_tensor( + input=x, mask=y, level=level) + out = layers.merge_lod_tensor( + in_true=out_true, in_false=out_false, mask=y, x=x, level=level) + mean = layers.mean(x=out) + + append_backward_ops(mean) tensor = core.LoDTensor() tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place) diff --git a/python/paddle/v2/fluid/tests/test_spp_op.py b/python/paddle/v2/fluid/tests/test_spp_op.py new file mode 100644 index 0000000000..007723f0e3 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_spp_op.py @@ -0,0 +1,68 @@ +import unittest +import numpy as np +from op_test import OpTest +from test_pool2d_op import max_pool2D_forward_naive +from test_pool2d_op import avg_pool2D_forward_naive + + +class TestSppOp(OpTest): + def setUp(self): + self.op_type = "spp" + self.init_test_case() + input = np.random.random(self.shape).astype("float32") + nsize, csize, hsize, wsize = input.shape + out_level_flatten = [] + for i in xrange(self.pyramid_height): + bins = np.power(2, i) + kernel_size = [0, 0] + padding = [0, 0] + kernel_size[0] = np.ceil(hsize / + bins.astype("double")).astype("int32") + padding[0] = ( + (kernel_size[0] * bins - hsize + 1) / 2).astype("int32") + + kernel_size[1] = np.ceil(wsize / + bins.astype("double")).astype("int32") + padding[1] = ( + (kernel_size[1] * bins - wsize + 1) / 2).astype("int32") + out_level = self.pool2D_forward_naive(input, kernel_size, + kernel_size, padding) + out_level_flatten.append( + out_level.reshape(nsize, bins * bins * csize)) + if i == 0: + output = out_level_flatten[i] + else: + output = np.concatenate((output, out_level_flatten[i]), 1) + # output = np.concatenate(out_level_flatten.tolist(), 0); + self.inputs = {'X': input.astype('float32'), } + self.attrs = { + 'pyramid_height': self.pyramid_height, + 'pooling_type': self.pool_type + } + + self.outputs = {'Out': output.astype('float32')} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + if self.pool_type != "avg": + self.check_grad(['X'], 'Out', max_relative_error=0.05) + + def init_test_case(self): + self.shape = [3, 2, 4, 4] + self.pyramid_height = 3 + self.pool2D_forward_naive = max_pool2D_forward_naive + self.pool_type = "max" + + +class TestCase2(TestSppOp): + def init_test_case(self): + self.shape = [3, 2, 4, 4] + self.pyramid_height = 3 + self.pool2D_forward_naive = avg_pool2D_forward_naive + self.pool_type = "avg" + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_uniform_random_op.py b/python/paddle/v2/fluid/tests/test_uniform_random_op.py index f736dfb2e8..dbe4d6bcd0 100644 --- a/python/paddle/v2/fluid/tests/test_uniform_random_op.py +++ b/python/paddle/v2/fluid/tests/test_uniform_random_op.py @@ -1,32 +1,49 @@ import unittest +import numpy + from paddle.v2.fluid.op import Operator import paddle.v2.fluid.core as core -import numpy +import paddle.v2.fluid as fluid class TestUniformRandomOp(unittest.TestCase): - def test_uniform_random_cpu(self): + def setUp(self): + self.op_type = "uniform_random" + self.inputs = {} + self.attrs = { + "shape": [1000, 784], + "min": -5.0, + "max": 10.0, + "seed": 10 + } + self.outputs = ["Out"] + + def test_cpu(self): self.uniform_random_test(place=core.CPUPlace()) - def test_uniform_random_gpu(self): + def test_gpu(self): if core.is_compile_gpu(): - self.uniform_random_test(place=core.GPUPlace(0)) + self.uniform_random_test(place=core.CUDAPlace(0)) def uniform_random_test(self, place): - scope = core.Scope() - scope.var('X').get_tensor() - - op = Operator( - "uniform_random", - Out='X', - shape=[1000, 784], - min=-5.0, - max=10.0, - seed=10) - - ctx = core.DeviceContext.create(place) - op.run(scope, ctx) - tensor = numpy.array(scope.find_var('X').get_tensor()) + program = fluid.Program() + block = program.global_block() + vout = block.create_var(name="Out") + op = block.append_op( + type=self.op_type, outputs={"Out": vout}, attrs=self.attrs) + + op.desc.infer_var_type(block.desc) + op.desc.infer_shape(block.desc) + + fetch_list = [] + for var_name in self.outputs: + fetch_list.append(block.var(var_name)) + + exe = fluid.Executor(place) + outs = exe.run(program, fetch_list=fetch_list) + + tensor = outs[0] + self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) diff --git a/python/paddle/v2/parameters.py b/python/paddle/v2/parameters.py index bd97dc1199..7b7d1a1d16 100644 --- a/python/paddle/v2/parameters.py +++ b/python/paddle/v2/parameters.py @@ -383,19 +383,22 @@ class Parameters(object): params.deserialize(param_name, f) return params - def init_from_tar(self, f): + def init_from_tar(self, f, exclude_params=[]): """ Different from `from_tar`, this interface can be used to init partial network parameters from another saved model. :param f: the initialized model file. :type f: tar file + :param exclude_params: the names of parameters that should + not be initialized from the model file. + :type exclude_params: list of strings :return: Nothing. """ tar_param = Parameters.from_tar(f) for pname in tar_param.names(): - if pname in self.names(): + if pname in self.names() and pname not in exclude_params: self.set(pname, tar_param.get(pname)) diff --git a/python/paddle/v2/reader/decorator.py b/python/paddle/v2/reader/decorator.py index 7e457f987d..44a6e34463 100644 --- a/python/paddle/v2/reader/decorator.py +++ b/python/paddle/v2/reader/decorator.py @@ -14,7 +14,7 @@ __all__ = [ 'map_readers', 'buffered', 'compose', 'chain', 'shuffle', - 'ComposeNotAligned', 'firstn', 'xmap_readers', 'pipe_reader' + 'ComposeNotAligned', 'firstn', 'xmap_readers', 'PipeReader' ] from threading import Thread @@ -334,93 +334,72 @@ def _buf2lines(buf, line_break="\n"): return lines[:-1], lines[-1] -def pipe_reader(left_cmd, - parser, - bufsize=8192, - file_type="plain", - cut_lines=True, - line_break="\n"): +class PipeReader: """ - pipe_reader read data by stream from a command, take it's - stdout into a pipe buffer and redirect it to the parser to - parse, then yield data as your desired format. + PipeReader read data by stream from a command, take it's + stdout into a pipe buffer and redirect it to the parser to + parse, then yield data as your desired format. - You can using standard linux command or call another program - to read data, from HDFS, Ceph, URL, AWS S3 etc: + You can using standard linux command or call another program + to read data, from HDFS, Ceph, URL, AWS S3 etc: - cmd = "hadoop fs -cat /path/to/some/file" - cmd = "cat sample_file.tar.gz" - cmd = "curl http://someurl" - cmd = "python print_s3_bucket.py" + .. code-block:: python + cmd = "hadoop fs -cat /path/to/some/file" + cmd = "cat sample_file.tar.gz" + cmd = "curl http://someurl" + cmd = "python print_s3_bucket.py" - A sample parser: + An example: + + .. code-block:: python - def sample_parser(lines): - # parse each line as one sample data, - # return a list of samples as batches. - ret = [] - for l in lines: - ret.append(l.split(" ")[1:5]) - return ret - - :param left_cmd: command to excute to get stdout from. - :type left_cmd: string - :param parser: parser function to parse lines of data. - if cut_lines is True, parser will receive list - of lines. - if cut_lines is False, parser will receive a - raw buffer each time. - parser should return a list of parsed values. - :type parser: callable - :param bufsize: the buffer size used for the stdout pipe. - :type bufsize: int - :param file_type: can be plain/gzip, stream buffer data type. - :type file_type: string - :param cut_lines: whether to pass lines instead of raw buffer - to the parser - :type cut_lines: bool - :param line_break: line break of the file, like \n or \r - :type line_break: string - - :return: the reader generator. - :rtype: callable + def example_reader(): + for f in myfiles: + pr = PipeReader("cat %s"%f) + for l in pr.get_line(): + sample = l.split(" ") + yield sample """ - if not isinstance(left_cmd, str): - raise TypeError("left_cmd must be a string") - if not callable(parser): - raise TypeError("parser must be a callable object") - - process = subprocess.Popen( - left_cmd.split(" "), bufsize=bufsize, stdout=subprocess.PIPE) - # TODO(typhoonzero): add a thread to read stderr - - # Always init a decompress object is better than - # create in the loop. - dec = zlib.decompressobj( - 32 + zlib.MAX_WBITS) # offset 32 to skip the header - def reader(): + def __init__(self, command, bufsize=8192, file_type="plain"): + if not isinstance(command, str): + raise TypeError("left_cmd must be a string") + if file_type == "gzip": + self.dec = zlib.decompressobj( + 32 + zlib.MAX_WBITS) # offset 32 to skip the header + self.file_type = file_type + self.bufsize = bufsize + self.process = subprocess.Popen( + command.split(" "), bufsize=bufsize, stdout=subprocess.PIPE) + + def get_line(self, cut_lines=True, line_break="\n"): + """ + :param cut_lines: cut buffer to lines + :type cut_lines: bool + :param line_break: line break of the file, like \n or \r + :type line_break: string + + :return: one line or a buffer of bytes + :rtype: string + """ remained = "" while True: - buff = process.stdout.read(bufsize) + buff = self.process.stdout.read(self.bufsize) if buff: - if file_type == "gzip": - decomp_buff = dec.decompress(buff) - elif file_type == "plain": + if self.file_type == "gzip": + decomp_buff = self.dec.decompress(buff) + elif self.file_type == "plain": decomp_buff = buff else: - raise TypeError("file_type %s is not allowed" % file_type) + raise TypeError("file_type %s is not allowed" % + self.file_type) if cut_lines: lines, remained = _buf2lines(''.join( [remained, decomp_buff]), line_break) - parsed_list = parser(lines) - for ret in parsed_list: - yield ret + for line in lines: + yield line else: - for ret in parser(decomp_buff): - yield ret + yield decomp_buff else: break - - return reader diff --git a/python/paddle/v2/reader/tests/decorator_test.py b/python/paddle/v2/reader/tests/decorator_test.py index 5a92951b10..4ba71969df 100644 --- a/python/paddle/v2/reader/tests/decorator_test.py +++ b/python/paddle/v2/reader/tests/decorator_test.py @@ -145,5 +145,33 @@ class TestXmap(unittest.TestCase): self.assertEqual(e, mapper(idx)) +class TestPipeReader(unittest.TestCase): + def test_pipe_reader(self): + def example_reader(myfiles): + for f in myfiles: + pr = paddle.v2.reader.PipeReader("cat %s" % f, bufsize=128) + for l in pr.get_line(): + yield l + + import tempfile + + records = [str(i) for i in xrange(5)] + temp = tempfile.NamedTemporaryFile() + try: + with open(temp.name, 'w') as f: + for r in records: + f.write('%s\n' % r) + + result = [] + for r in example_reader([temp.name]): + result.append(r) + + for idx, e in enumerate(records): + self.assertEqual(e, result[idx]) + finally: + # delete the temporary file + temp.close() + + if __name__ == '__main__': unittest.main() diff --git a/python/setup.py.in b/python/setup.py.in index 9ccb4dc176..66ccfe8087 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -68,6 +68,7 @@ packages=['paddle', 'paddle.v2.plot', 'paddle.v2.fluid', 'paddle.v2.fluid.proto', + 'paddle.v2.fluid.layers', 'py_paddle'] with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: @@ -78,8 +79,7 @@ if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: # the prefix is sys.prefix which should always be usr paddle_bin_dir = 'opt/paddle/bin' -paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage', - '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', +paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main', '${PADDLE_BINARY_DIR}/paddle/scripts/paddle']