Merge branch 'develop' into feature/clean_memcpy_async

intel-update-authors
Yu Yang 7 years ago committed by GitHub
commit deabc8ca0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -49,7 +49,11 @@ ENV PATH=${PATH}:${GOROOT}/bin:${GOPATH}/bin
RUN curl -s -q https://glide.sh/get | sh
# Install TensorRT
# The unnecessary files has been removed to make the library small. It only contains include and lib now.
# following TensorRT.tar.gz is not the default official one, we do two miny changes:
# 1. Remove the unnecessary files to make the library small. TensorRT.tar.gz only contains include and lib now,
# and its size is only one-third of the official one.
# 2. Manually add ~IPluginFactory() in IPluginFactory class of NvInfer.h, otherwise, it couldn't work in paddle.
# See https://github.com/PaddlePaddle/Paddle/issues/10129 for details.
RUN wget -qO- http://paddlepaddledeps.bj.bcebos.com/TensorRT-4.0.0.3.Ubuntu-16.04.4.x86_64-gnu.cuda-8.0.cudnn7.0.tar.gz | \
tar -xz -C /usr/local && \
cp -rf /usr/local/TensorRT/include /usr && \

@ -21,7 +21,7 @@ import argparse
import time
import distutils.util
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework

@ -20,7 +20,7 @@ import numpy as np
import argparse
import time
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler

@ -23,7 +23,7 @@ import time
import cProfile, pstats, StringIO
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.profiler as profiler

@ -23,10 +23,10 @@ import random
import time
import numpy
import paddle.v2 as paddle
import paddle.v2.dataset.imdb as imdb
import paddle
import paddle.dataset.imdb as imdb
import paddle.fluid as fluid
from paddle.v2 import batch
import paddle.batch as batch
import paddle.fluid.profiler as profiler

@ -17,7 +17,7 @@ from __future__ import print_function
import sys
import time
import numpy as np
import paddle.v2 as paddle
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import argparse

@ -30,4 +30,6 @@ if(TENSORRT_FOUND)
message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
"Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
include_directories(${TENSORRT_INCLUDE_DIR})
list(APPEND EXTERNAL_LIBS ${TENSORRT_LIBRARY})
endif()

@ -0,0 +1,10 @@
==================================
Data Reader Interface and DataSets
==================================
.. toctree::
:maxdepth: 1
data/data_reader.rst
data/image.rst
data/dataset.rst

@ -0,0 +1,72 @@
=====================
Data Reader Interface
=====================
DataTypes
=========
.. autofunction:: paddle.v2.data_type.dense_array
:noindex:
.. autofunction:: paddle.v2.data_type.integer_value
:noindex:
.. autofunction:: paddle.v2.data_type.integer_value_sequence
:noindex:
.. autofunction:: paddle.v2.data_type.integer_value_sub_sequence
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_binary_vector
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_binary_vector_sequence
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_binary_vector_sub_sequence
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_float_vector
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_float_vector_sequence
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_float_vector_sub_sequence
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_non_value_slot
:noindex:
.. autofunction:: paddle.v2.data_type.sparse_value_slot
:noindex:
.. autoclass:: paddle.v2.data_type.InputType
:members:
:noindex:
DataFeeder
==========
.. automodule:: paddle.v2.data_feeder
:members:
:noindex:
Reader
======
.. automodule:: paddle.reader
:members:
:noindex:
.. automodule:: paddle.reader.creator
:members:
:noindex:
minibatch
=========
.. automodule:: paddle.v2.minibatch
:members:
:noindex:

@ -0,0 +1,82 @@
Dataset
=======
.. automodule:: paddle.dataset
:members:
:noindex:
mnist
+++++
.. automodule:: paddle.dataset.mnist
:members:
:noindex:
cifar
+++++
.. automodule:: paddle.dataset.cifar
:members:
:noindex:
conll05
+++++++
.. automodule:: paddle.dataset.conll05
:members: get_dict,get_embedding,test
:noindex:
imdb
++++
.. automodule:: paddle.dataset.imdb
:members:
:noindex:
imikolov
++++++++
.. automodule:: paddle.dataset.imikolov
:members:
:noindex:
movielens
+++++++++
.. automodule:: paddle.dataset.movielens
:members:
:noindex:
.. autoclass:: paddle.dataset.movielens.MovieInfo
:noindex:
.. autoclass:: paddle.dataset.movielens.UserInfo
:noindex:
sentiment
+++++++++
.. automodule:: paddle.dataset.sentiment
:members:
:noindex:
uci_housing
+++++++++++
.. automodule:: paddle.dataset.uci_housing
:members:
:noindex:
wmt14
+++++
.. automodule:: paddle.dataset.wmt14
:members:
:noindex:
wmt16
+++++
.. automodule:: paddle.dataset.wmt16
:members:
:noindex:

@ -0,0 +1,5 @@
Image Interface
===============
.. automodule:: paddle.v2.image
:members:

@ -16,3 +16,4 @@ Fluid
profiler.rst
regularizer.rst
io.rst
data.rst

@ -479,6 +479,13 @@ label_smooth
.. autofunction:: paddle.fluid.layers.label_smooth
:noindex:
roi_pool
---------
.. autofunction:: paddle.fluid.layers.roi_pool
:noindex:
ops
===
@ -820,3 +827,5 @@ topk
.. autofunction:: paddle.fluid.layers.topk
:noindex:

@ -3,7 +3,7 @@
## Why float16
Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range.
When high precision computation is not required, using float16 data type could potentially
When high precision computation is not required (which is usually the case at least in the deep learning inference stage), using float16 data type could potentially
- reduce storage space, memory bandwidth, and power usages;
- increase the chance of data fitting into a smaller cache of lower latency;
@ -12,7 +12,7 @@ When high precision computation is not required, using float16 data type could p
## Survey of current float16 support
A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info.
The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernel. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier.
The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernels. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier.
### Compiler
- nvcc supports `__half` data type after CUDA 7.5.
@ -95,11 +95,89 @@ float half_to_float(float16 h);
```
which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion.
## To do
After float16 class is available, some of the future items are below:
## float16 inference
In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one.
- Update pybind/tensor_py.h to bind c++ float16 with numpy float16.
### Operator level requirement
Each operator has many kernels for different data types, devices, and library types. The operator will select the appropriate kernel to run based on, among other things, the data type of the input variables. By default, every Fluid operator has a float data type kernel that takes float variables as input and generates float output.
- Modify `GetKernelType()` method in `framework/operator.h` to make it compatible with float16.
This means that if we provide float input to the first operator in a program, then each opeartor will use float kernel to compute float output and send it as input to the next operator to trigger the float kernel. Overall, the program will run in float mode and give us a final output of float data type.
- Create a type-casting operator that can convert the data type in tensor between float16 and other types.
The same principle applies if we want a program to run in float16 mode. We provide input variable of float16 data type to the first operator, and then one by one, each operator in the program will run the float16 kernel (provided that each operator in this program has float16 kernels registered) until we finally obtain a float16 output variable.
So the preliminary requirement for float16 inference is to add float16 kernel to operators that are needed in a specific kind of program. For example, float16 inference on an image classification neural network like Vgg or Resnet, typically requires the following operators to have float16 kernels: convolution, pooling, multiplication, addition, batch norm, dropout, relu, and softmax. Please refer to [new_op_en](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/new_op_en.md) for details of how to add new kernels to an operator.
### Variable level requirement
Operators including convolution and multiplication (used in fully-connected layers) takes as input not only the variables generated by the preceding operators but also [parameter](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#parameter) variables, which contains the trained weights to apply to the input data. These weights are obtained in the Fluid training process and are by default of float data type.
When these operators are running in float16 mode, the float16 kernel requires those parameter variables to contain weights of Fluid float16 data type. Thus, we need a convenient way to convert the original float weights to float16 weights.
In Fluid, we use tensor to hold actual data for a variable on the c++ end. [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h) is used to bind c++ tensors of certain data type with numpy array of the correponding numpy data type on the Python end. Each common c++ built-in data type has a corresponding numpy data type of the same name. However, since there is no built-in float16 type in c++, we cannot directly bind numpy float16 data type with the Fluid float16 class. Since both Fluid float16 and numpy float16 use uint16 as the internal data storage type, we use c++ built-in type `uint16_t` and the corresponding numpy uint16 data type to bridge the gap via [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h).
The following code demonstrates how to do the tensor conversion.
```Python
# var is the variable of float weights
# tensor is a numpy array of data copied from the tensor data in var
# fp16_var is the variable that will contain float16 weights converted from var
tensor = numpy.array(var.get_tensor())
fp16_tensor = fp16_var.get_tensor()
# After the original tensor data is converted to numpy float16 data type,
# view(numpy.uint16) is used so that the internal memory of the numpy array
# will be reinterpreted to be of uint16 data type, which is binded to
# Fluid float16 class via pybind with the help of uint16_t built-in c++ type
fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace)
```
### Consistent API requirement
The basic inference in float16 mode requires users to feed input and obtain output both of float16 data type. However, in this way, the inference APIs are not consistent between float16 mode and float mode, and users may find it confusing and diffcult to use float16 inference since they need to do extra steps to provide float16 input data and convert float16 output data back to float. To have consistent API for different inference modes, we need to transpile the program desc in some way so that we can run float16 inference by feeding and fetching variables of float data type.
This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode.
### float16 transpiler
Put all the above requirements in mind, we designed a float16 inference transpiler that can tranpile a float32 mode inference program desc to a float16 mode one.
Given a float inference program and the corresponding variables of float32 weights in the [scope](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/scope.md),
this transpiler mainly does the following modifications:
1. Insert cast operators at the beginning of the program so that the input float data will be converted to float16 data type before feeding to subsequent operators to invoke the float16 kernel.
2. Insert cast operators at the end of the program so that the output float16 data will be converted back to float data type before users obtain the result.
3. For each parameter variable of float weights, create in the scope a corresponding variable of float16 weights which are converted from the corresponding float weights and add this new float16 variable to the program.
4. Update the operator information in the program so that each relevant operator use the newly created float16 variable instead of its float counterpart.
Below is an example of usage:
```Python
# Get the float inference program
[float_inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Prepare the float input data
tensor_img = numpy.random.rand(1, 3, 32, 32).astype(numpy.float32)
# Running inference_program in float mode
float_results = exe.run(float_inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
# Use float16 transpiler to speedup
float16_inference_program = float_inference_program.clone()
t = fluid.InferenceTranspiler()
t.float16_transpile(float16_inference_program, GPUPlace)
# Running
float16_results = exe.run(float16_inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
```
As we can see from the example above, users can simply use the `float16_transpile` method provided by the infernece transpiler class on an existing float inference program to run inference in float16 mode.
### Speedup on GPU
Currently, Fluid inference in float16 mode is only supported on Nvidia GPU device. There is no motivation to support float16 inference on non-ARM CPUs because float16 is not natively supported there and float16 calculation will only be slower than its float counterpart.
Nvidia started to support its native float16 data type (which has the same internal memory representation as Fluid float16 class) on CUDA 7.5. Moreover, float16 speedups on common computational intensive tasks including GEMM (general matrix-matrix multiplication) and convolution are supported since cublas 7.5 and cuDNN 5.0.
Recently, the introduction of [tensor core](https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/) in volta architecture GPUs and the support of tensor core calculation in CUDA 9.0 and cuDNN 7.0 make float16 truly superior to float in certain deep learning applications. Please refer to this [benchmark report](https://github.com/kexinzhao/Paddle_benchmark/blob/master/float16_benchmark.md) for more details.

@ -0,0 +1,175 @@
# Varient Length supported RNN Design
For the learning of variable length sequences, the existing mainstream frameworks such as tensorflow, pytorch, caffe2, mxnet and so on all use padding.
Different-length sequences in a mini-batch will be padded with zeros and transformed to same length.
The existing RNN implementations of the PaddlePaddle is `RecurrentLayerGroup`,
which supports the variable length sequences without padding.
This doc will design fluid's RNN based on this idea.
## Multi-layer sequence data format `LODTensor`
At present, Paddle stores data in one mini-batch in one-dimensional array.
`Argument.sequenceStartPositions` is used to store information for each sentence.
In Paddle, `Argument.subSequenceStartPositions` is used to store 2 levels of sequence information, while higher dimensional sequences can not be supported.
In order to support the storage of `N-level` sequences, we define sequence information as the following data structure.
```c++
std::shared_ptr<std::vector<std::vector<int>>> lod_start_pos_;
```
Or more clearly defined here
```c++
typedef std::vector<int> level_t;
std::vector<level_t> lod_start_pos;
```
Each `level_t` here stores a level of offset information consistent with paddle's current practice.
In order to transmit sequence information more transparently, we have introduced a new tensor called `LODTensor`[1].
Its tensor-related interfaces all inherit directly from `Tensor`, but it also adds serial-related interfaces.
Thus, when working with a `LODTensor`, ordinary `Op` is used directly as `Tensor`.
The `Op` of the operation sequence will additionally operate the relevant interface of the `LODTensor` variable-length sequence operation.
The definition of `LODTensor` is as follows:
```c++
class LODTensor : public Tensor {
public:
size_t Levels() const { return seq_start_positions_.size(); }
size_t Elements(int level = 0) const {
return seq_start_positions_[level].size();
}
// slice of level[elem_begin: elem_end]
// NOTE low performance in slice seq_start_positions_.
// TODO should call Tensor's Slice.
LODTensor LODSlice(int level, int elem_begin, int elem_end) const;
// slice with tensor's data shared with this.
LODTensor LODSliceShared(int level, int elem_begin, int elem_end) const;
// copy other's lod_start_pos_, to share LOD info.
// NOTE the LOD info sould not be changed.
void ShareConstLODFrom(const LODTensor &other) {
lod_start_pos_ = other.lod_start_pos_;
}
// copy other's lod_start_pos_'s content, free to mutate.
void ShareMutableLODFrom(const LODTensor &other) {
lod_start_pos_ = std::make_shared <
std::vector<std::vector<int>>(other.lod_start_pos_.begin(),
other.lod_start_pos_.end());
}
private:
std::shared_ptr<std::vector<std::vector<int>>> lod_start_pos_;
};
```
Among them, `lod_start_pos_` uses `shared_ptr` to reduce the cost of storage and replication.
`LODTensor` can be thought as an extension of `Tensor`, which is almost completely compatible with the original `Tensor`.
## How to support the framework
### Replace `Tensor` with `LoDTensor`
To implement the passing of `LODTensor`, most `Tensor` in the framework need to be replaced with `LODTensor`.
Simple implementation, directly **replace all previous `Tensor` with `LODTensor`** , where you can directly modify the `Tensor` interface created in `pybind.cc`.
In addition, the user may need to perceive the existence of a sequence (such as the sequence of the visualization needs to parse the output sequence in the model), so some of the serial operation APIs also need to be exposed to the python layer.
### Transmit `lod_start_pos` along with the Op call chain
`lod_start_pos` is passed along with the Op call chain
The framework needs to support the following features to implement the transmit of `lod_start_pos`:
1. Implement the transfer as `shared_ptr`
- Do not modify the contents of `lod_start_pos` as a consumer
- Modify producer of `lod_start_pos` as producer
- Conventions consumer only needs to copy `shared_ptr` passed over
- producer needs to create its own independent memory to store its own independent modifications and expose `shared_ptr` to subsequent consumer
- Since the transfer process is implemented by copying `shared_ptr`, the framework only needs to pass `lod_start_pos` once.
2. Op is transparent enough not to sense `lod_start_pos`
3. Producer Op that needs to modify `lod_start_pos` can update its `lod_start_pos` data when `Run`
## sorted by length
After sorting by length, the batch size from the forward time step will naturally decrement, and you can directly plug it into Net to do the batch calculation.
For example, the original input:
```
origin:
xxxx
xx
xxx
-> sorted:
xxxx
xxx
xx
```
After `SegmentInputs`, there will be 4 time steps, the input of each time step is as follows (vertical arrangement)
```
0 1 2 3
x x x x
x x x
x x
```
In order to track the changes before and after sorting, use here
```c++
struct SortedSeqItem {
void *start{nullptr};
void *end{nullptr};
};
std::vector<SortedSeqItem> sorted_seqs;
```
To track the position of the sequence after sorting, and add a new interface
```c++
std::vector<SortedSeqItem> SortBySeqLen(const LODTensor& tensor);
```
Due to the sequence of input sequences, the following existing interfaces need to be modified:
- InitMemories, memory needs to be rearranged according to `sorted_seqs`
- SetmentInputs
- ConcatOutputs
In addition, because `sorted_seqs` needs to be multiplexed with `RecurrentGradientOp`, it will become a new output of `RecurrentOp`.
It is passed in as an input to `RecurrentGradientOp`.
## InitMemories
Due to the sequence change, the order of the elements on the `boot_memories` batch also needs to be rearranged accordingly.
## SegmentInputs
`SegmentInputs` relies on the information of `sorted_seqs` to cut the original sequence from the horizontal to the input of each step in the sorted sequence order.
the transition is as follows:
```
origin:
xxxx
xx
xxx
|
|
\ /
!
0 1 2 3
x x x x
x x x
x x
```
## ConcatOutputs
`ConcatOutputs` needs
- Restore the output of each time step back to the original input sequence order (to prevent the order of Infer phase from being upset)
- Concat each sequence as a regular mini-batch representation
## references
1. [Level of details](https://en.wikipedia.org/wiki/Level_of_detail)

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

@ -0,0 +1,131 @@
# Background
[ONNX (Open Neural Network Exchange)](https://github.com/onnx/onnx) bridges different deep learning frameworks by providing an open source graph format for models. The models trained in other frameworks can be converted into the ONNX format to execute inference by utilizing the built-in operators in ONNX - this is called a **frontend**. With the inverse conversion (called a **backend**), different frameworks can share any models supported by ONNX in principle. Now most mainstream frameworks have joined the ONNX community, e.g. Caffe2, PyTorch, and MXNet etc. And there is a momentum driving more and more vendors to begin supporting ONNX or even choose ONNX as the only machine learning runtime in their devices.
Therefore, it is necessary to enable the conversion between PaddlePaddle and ONNX. This design doc is aimed at implementing a convertor, mainly for converting between **Fluid** models and ONNX (it is very likely that we may support older v2 models in the future). A complete convertor should be bidirectional - with a frontend AND a backend, but considering the importance, the we will start with the frontend i.e. Fluid models to ONNX models.
# How it works
ONNX has a [working list of operators](https://github.com/onnx/onnx/blob/master/docs/Operators.md) which is versioned.
When prioritizing implementation of a frontend over a backend, choice of coverage of Fluid -> ONNX operators comes down to choices of models to be supported (see section `Supported models`). Eventually, this will allow us to reach a really-wide coverage of all operators.
Here are a few major considerations when it comes to converting models:
- **Op-level conversion**: How to map the inputs, attributes, and outputs of each Paddle operator to those of the ONNX operator. In several cases, these require transformations. For each direction (frontend vs. backend), a different conversion mapping is needed.
- **Parameters (weights) initialization**: Setting initial parameters on different nodes.
- **Tensor data type mapping** (Note: Some ONNX data types are not supported in Fluid)
- **Network representation adaption**: Fluid `ProgramDesc` include nested blocks. Since ONNX is free of nesting, the `ProgramDesc` ops need to be traversed to only include ops from the global scope in the root block. The variables used as inputs and outputs should also be in this scope.
- **Model validation**: There are two kinds of validations that are necessary:
1. We need to ensure that the inference outputs of the ops in run inside a model are the same as those when running the ONNX converted ops through an alternative ONNX backend.
2. Checking to see if the generated nodes on the graph are validated by the internal ONNX checkers.
- **Versioning**: ONNX versions its op listing over versions. In fact, it has versioning on 3 different levels: ops, graphs, and ONNX models. This requires that we are conscious about versioning the convertor and updating tests and op convertor logic for each release. It also implies that we release pre-trained ONNX models upon each version release.
One thing that makes this conversion more feasible in Fluid's case is the use of a static IR - the `ProgramDesc` - as opposed to a dynamic graph, as created in the cases of frameworks like PyTorch.
# Project structure
<p align="center">
<img src="./images/project_structure.png"/>
</p>
The project contains four important parts:
* **fluid**: The directory that contains wrappers for fluid related APIs. Fluid has provided some low-level APIs to parse or generate the inference model. However, directly using these low-level APIs makes the code tediously long. This module wraps low-level APIs to provide simplified interfaces.
* **onnx**: This is a Python package provided by ONNX containing helpers for creating nodes, graphs, and eventually binary protobuf models with initializer parameters.
* **onnx_fluid**: Contains two-way mapping (Fluid -> ONNX ops and ONNX -> Fluid ops). Called from `convert.py`, the program uses this mapping along with modifier functions to construct ONNX nodes with the help of ONNX's `make_node` helper. It also contains mapping between datatypes and tensor deprecation / amplification logic.
* **convert.py**: The interface exposed to users. This will traverse the global program blocks/variables and construct the write-able model.
# Usage
The converter should be designed to very easy-to-use. Bidirectional conversion between a Fluid inference model and an ONNX binary model will be supported. Model validation will also provided to verify the correctness of converted model.
* Convert Fluid inference model to ONNX binary model
```
python convert.py --fluid_model <fluid inference model> --onnx_model <ONNX model> validate True
```
* Validate the converted model
```
python validate.py --fluid_model <fluid inference model> --onnx_model <ONNX model>
```
The conversion and model validation will be completed consecutively, finally output a readable model structure description. And for the converse conversion, users only need to exchange the input and output.
# Challenges and mitigation
## Cycles
Cycles are unsupported in ONNX. In Paddle, the `while` op is the most prominent example of a cycle.
*Resolution*: We won't support models with `while`s which can't be substituted until ONNX adds support for such ops.
## Sequences
Sequence processing operators like `sequence_expand`, `sequence_reshape`, `sequence_concat`, and `sequence_pool` are not supported by ONNX as well, because they do not support non-padded datatypes like LoDTensors.
*Resolution*: Since the runtimes using our ONNX exported graphs won't be using LoDTensors in the first place, such sequence operators should be mapped to ONNX ops that will do the necessary transposing ops with the knowledge of the padding and shape of the Tensors.
## Ops that can't easily be mapped
There are ops that just aren't possible to map today:
**Control flow operators**
Paddle supports control flow ops like `If/Else` and `Switch` (if we ignore the CSP operations like `select` for now). ONNX has `If` support in the experimental phase.
*Resolution*: Map Paddle's `If/Else` to ONNX's `If`, but ignore other control flow operators until ONNX brings support for them.
**Non-existent in Fluid**
There are several ONNX operators that are not available in Fluid today, e.g. `InstanceNormalization`, `RandomUniform`, `Unsqueeze`, etc.
*Resolution*: For the initial phase, we can choose to not support ops that our models don't care for and are subsequently not available in Fluid. However, for ops that we think might be necessary for Fluid users also, we must implement them on our side and support the ONNX conversion to them. This list is TBD.
**Concurrency**
ONNX does not have any considerations for concurrency right now.
*Resolution*: There are two ways to approach this:
a. We choose to not support concurrent models.
b. We only support `go_op`s (basically threads) shallowly. This could mean that we enqueue `go_op` ops prior to gradient calculations OR even prior to the entire graph, and that's it - since `go_op`s do not have support for backprop anyways. One of the core target use cases of `go_op`: batch reading - can be handled through this approach.
**Overloaded in Fluid**
There are ops in ONNX whose job can't be accomplished by a single corresponding Paddle operator (e.g. ), but a collection of operators.
*Resolution*: Chain multiple Paddle operators.
## Lack of LoDTensors
As stated above, ONNX only supports simple Tensor values.
*Resolution*: Deprecate to plain old numpy-able tensors.
## Reconstruction from deprecated ONNX ops
For higher-level Fluid ops, such as a few offered by the `nn` layer that do not have direct corresponding mappings but can be converted to ONNX by chaining a series of ops without cycles, it would be useful to map them back to the higher-level Fluid ops once converted back from the deprecated ONNX graphs.
*Resolution*: Graphs that have the deprecation from Paddle -> ONNX. When converting back from ONNX, if we encounter the identical graphs by doing a forward search, we can replace the subgraphs with the matching ONNX op.
# Supported models
As mentioned above, potential risks may come from the conversion of sequence-related models, including the LodTensor, ```if/else``` and ```while``` operator. So a good choice is to focus on some important feedforward models first, then implement some simple recurrent models.
- Feedforward models: common models selected in PaddleBook, e.g. VGG, ResNet and some other models proposed by application teams.
- Recurrent models: language model, stacked LSTMs etc.

@ -56,11 +56,11 @@ DataFeeder
Reader
======
.. automodule:: paddle.v2.reader
.. automodule:: paddle.reader
:members:
:noindex:
.. automodule:: paddle.v2.reader.creator
.. automodule:: paddle.reader.creator
:members:
:noindex:

@ -1,82 +1,82 @@
Dataset
=======
.. automodule:: paddle.v2.dataset
.. automodule:: paddle.dataset
:members:
:noindex:
mnist
+++++
.. automodule:: paddle.v2.dataset.mnist
.. automodule:: paddle.dataset.mnist
:members:
:noindex:
cifar
+++++
.. automodule:: paddle.v2.dataset.cifar
.. automodule:: paddle.dataset.cifar
:members:
:noindex:
conll05
+++++++
.. automodule:: paddle.v2.dataset.conll05
.. automodule:: paddle.dataset.conll05
:members: get_dict,get_embedding,test
:noindex:
imdb
++++
.. automodule:: paddle.v2.dataset.imdb
.. automodule:: paddle.dataset.imdb
:members:
:noindex:
imikolov
++++++++
.. automodule:: paddle.v2.dataset.imikolov
.. automodule:: paddle.dataset.imikolov
:members:
:noindex:
movielens
+++++++++
.. automodule:: paddle.v2.dataset.movielens
.. automodule:: paddle.dataset.movielens
:members:
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.MovieInfo
.. autoclass:: paddle.dataset.movielens.MovieInfo
:noindex:
.. autoclass:: paddle.v2.dataset.movielens.UserInfo
.. autoclass:: paddle.dataset.movielens.UserInfo
:noindex:
sentiment
+++++++++
.. automodule:: paddle.v2.dataset.sentiment
.. automodule:: paddle.dataset.sentiment
:members:
:noindex:
uci_housing
+++++++++++
.. automodule:: paddle.v2.dataset.uci_housing
.. automodule:: paddle.dataset.uci_housing
:members:
:noindex:
wmt14
+++++
.. automodule:: paddle.v2.dataset.wmt14
.. automodule:: paddle.dataset.wmt14
:members:
:noindex:
wmt16
+++++
.. automodule:: paddle.v2.dataset.wmt16
.. automodule:: paddle.dataset.wmt16
:members:
:noindex:

@ -6,6 +6,7 @@ PaddlePaddle adheres to the following three sections of code and document specif
PaddlePaddle uses git for version control and Docker is used for building and testing environment. The code includes Cuda, C++, Python, Shell and other programming languageswhich comply with Google C++ Style, Pep-8, and the code base includes style checking by an automatic inspection tool. Code comments need to follow the Doxygen specification. The code that does not meet the style requirements will fail to compile. We provide the following guidelines for the use of Git, build tests and code development.
.. toctree::
:maxdepth: 1

@ -1,19 +1,35 @@
Use different clusters
======================
PaddlePaddle supports running jobs on several platforms including:
- `Kubernetes <http://kubernetes.io>`_ open-source system for automating deployment, scaling, and management of containerized applications from Google.
- `OpenMPI <https://www.open-mpi.org>`_ Mature high performance parallel computing framework.
- `Fabric <http://www.fabfile.org>`_ A cluster management tool. Write scripts to submit jobs or manage the cluster.
The user's cluster environment is not the same. To facilitate everyone's deployment, we provide a variety of cluster deployment methods to facilitate the submission of cluster training tasks, which will be introduced as follows:
We'll introduce cluster job management on these platforms. The examples can be found under `cluster_train_v2 <https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/scripts/cluster_train_v2>`_ .
`Kubernetes <http://kubernetes.io>`_ is a scheduling framework of Google open source container cluster, supporting a complete cluster solution for large-scale cluster production environment. The following guidelines show PaddlePaddle's support for Kubernetes:
These cluster platforms provide API or environment variables for training processes, when the job is dispatched to different nodes. Like node ID, IP or total number of nodes etc.
.. toctree::
:maxdepth: 1
k8s_cn.md
k8s_distributed_cn.md
`OpenMPI <https://www.open-mpi.org>`_ is a mature high-performance parallel computing framework, which is widely used in the field of HPC. The following guide describes how to use OpenMPI to build PaddlePaddle's cluster training task:
.. toctree::
:maxdepth: 1
fabric_en.md
openmpi_en.md
k8s_en.md
k8s_aws_en.md
openmpi_cn.md
`Fabric <http://www.fabfile.org>`_ is a convenient tool for program deployment and management. We provide a way to deploy and manage with Fabric. If you want to know more about it, please read the following guidelines:
.. toctree::
:maxdepth: 1
fabric_cn.md
We also support the deployment of PaddlePaddle on AWS. Learn more about:
.. toctree::
:maxdepth: 1
k8s_aws_cn.md
The examples can be found under `cluster_train_v2 <https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/scripts/cluster_train_v2>`_ .

@ -0,0 +1,74 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <condition_variable> // NOLINT
#include <deque>
#include <mutex> // NOLINT
#include <utility>
namespace paddle {
namespace framework {
template <typename T>
class BlockingQueue {
public:
void Push(const T &item) {
{
std::lock_guard<std::mutex> g(mutex_);
q_.emplace_back(item);
}
cv_.notify_one();
}
template <typename U>
void Extend(const U &items) {
{
std::lock_guard<std::mutex> g(mutex_);
for (auto &item : items) {
q_.emplace_back(item);
}
}
cv_.notify_all();
}
std::deque<T> PopAll(size_t ms, bool *timeout) {
auto time =
std::chrono::system_clock::now() + std::chrono::milliseconds(ms);
std::unique_lock<std::mutex> lock(mutex_);
*timeout = !cv_.wait_until(lock, time, [this] { return !q_.empty(); });
std::deque<T> ret;
if (!*timeout) {
std::swap(ret, q_);
}
return ret;
}
T Pop() {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [=] { return !q_.empty(); });
T rc(std::move(q_.front()));
q_.pop_front();
return rc;
}
private:
std::mutex mutex_;
std::condition_variable cv_;
std::deque<T> q_;
};
} // namespace framework
} // namespace paddle

@ -63,16 +63,16 @@ void DataTransform(const OpKernelType& expected_kernel_type,
}
void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor,
Variable& out_var) {
Variable* out_var) {
if (in_var.IsType<LoDTensor>()) {
auto& in_lod_tensor = in_var.Get<LoDTensor>();
auto* tran_lod_tensor = out_var.GetMutable<LoDTensor>();
auto* tran_lod_tensor = out_var->GetMutable<LoDTensor>();
tran_lod_tensor->set_lod(in_lod_tensor.lod());
tran_lod_tensor->set_layout(in_lod_tensor.layout());
tran_lod_tensor->ShareDataWith(tensor);
} else if (in_var.IsType<SelectedRows>()) {
auto& in_selected_rows = in_var.Get<SelectedRows>();
auto* trans_selected_rows = out_var.GetMutable<SelectedRows>();
auto* trans_selected_rows = out_var->GetMutable<SelectedRows>();
trans_selected_rows->set_height(in_selected_rows.height());
trans_selected_rows->set_rows(in_selected_rows.rows());
trans_selected_rows->mutable_value()->ShareDataWith(tensor);

@ -35,7 +35,7 @@ void DataTransform(const OpKernelType& expected_kernel_type,
const Tensor& input_tensor, Tensor* out);
void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor,
Variable& out_var);
Variable* out_var);
} // namespace framework
} // namespace paddle

@ -139,7 +139,7 @@ struct TestBroadcastOpHandle {
PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal.");
f::Tensor result_tensor;
f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor);
f::TensorCopySync(out_tensor, cpu_place, &result_tensor);
float* ct = result_tensor.mutable_data<float>(cpu_place);
for (int64_t i = 0; i < f::product(kDims); ++i) {
@ -185,7 +185,7 @@ struct TestBroadcastOpHandle {
}
f::Tensor result_tensor;
f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor);
f::TensorCopySync(rt, cpu_place, &result_tensor);
float* ct = result_tensor.data<float>();
for (int64_t i = 0; i < f::product(kDims); ++i) {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save