From bd66d056c8404915d21b5214e27ea1b2ffdd2892 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Fri, 8 Sep 2017 15:09:45 +0800 Subject: [PATCH 001/342] fix row buffer mem free --- paddle/math/RowBuffer.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/paddle/math/RowBuffer.h b/paddle/math/RowBuffer.h index dbb829c4e2..9ef5b89680 100644 --- a/paddle/math/RowBuffer.h +++ b/paddle/math/RowBuffer.h @@ -99,7 +99,11 @@ public: /** * @brief clear local buffer. It only affect auto-growth buffer. */ - inline void clear() { rowStore_.clear(); } + inline void clear() { + // swap an empty vector to it to free the memory. + std::vector> empty; + rowStore_.swap(empty); + } /** * @brief get current number of rows. From 345499b0feeff2a09394529fe74f5a705bbdc5e2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 12 Sep 2017 19:17:48 -0700 Subject: [PATCH 002/342] Add Skeleton of Python API design --- doc/design/python_api.md | 104 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 doc/design/python_api.md diff --git a/doc/design/python_api.md b/doc/design/python_api.md new file mode 100644 index 0000000000..395341d37e --- /dev/null +++ b/doc/design/python_api.md @@ -0,0 +1,104 @@ +# Design Doc: Python API + + + +The top level user API in Python should be as same as API in `paddle.v2` after refactoring Paddle from a layer based framework to an operator based framework. There are many new classes in CPP in [compile time] for describing neural networks, such as `Variable`, `Operator`, `Block`. The issue about current design is how to give a proper way to wrap the C++ API to `paddle.v2` API and writing layers in Python. + + + +This implementation of Python API includes two steps. + +1. Implement the Python API using current C++ runtime concepts. +2. Replace the implementation by using compile-time concepts when they are completed. + +... + + +## Python Class about compile-time concepts + + +| Python Class | Compile-time protobuf | +| --- | --- | +| Block | BlockDesc | +| Operator | OpDesc | +| Variable | VarDesc | + + +### Block + + + +```python +class Block(objects): + def __init__(self, parent=None): + self.vars_ = map() + self.ops_ = vector() + if parent is None: + self.global_vars = map() + self.parent=None + else: + self.parent = parent + self.global_vars = None + + def create_global_vars(...): + if self.parent is not None: + return self.parent.create_global_vars(...) + else: + return self.global_vars.new() +``` + + +### Operator + + + +```python +class Operator(object): + def __init__(self, type, inputs, outputs, attrs): + # create OpDesc in Python + op_desc = ... + self.cpp_op_desc_ptr = cpp.to_cpp_op_desc(op_desc) + cpp.infer_shapes(self.cpp_op_desc_ptr, inputs, outputs) + outputs.op = self + + def type(self): + return self.cpp_op_desc_ptr.type() +``` + +### Variable + + + +```python +class Variable(object): + def __init__(self, shape, dtype="float32", name=None, block=None): + if name is None: + if prefix is not None: + name = unique_name_generator(prefix) + else: + name = unique_name_generator("unknown") + self.name = name + self.block = block + self.cpp_var_desc_ptr = ... + self.op = None + + def shape(self): + cpp_shape = self.cpp_var_desc_ptr.shape() + return [None if elem < 0 else elem for elem in cpp_shape] +``` + +### Parameter + + + + + +```python +class Parameter(Variable): + def __init__(self, trainable, initialize_attrs, optimize_attrs): + pass +``` + +## Layer Functions + + From 4b623c1e1178a2ee7dc27243cee2fbae6ea9eea4 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 12 Sep 2017 20:08:42 -0700 Subject: [PATCH 003/342] Update design --- doc/design/python_api.md | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 395341d37e..e314d1049c 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -1,22 +1,20 @@ # Design Doc: Python API - - The top level user API in Python should be as same as API in `paddle.v2` after refactoring Paddle from a layer based framework to an operator based framework. There are many new classes in CPP in [compile time] for describing neural networks, such as `Variable`, `Operator`, `Block`. The issue about current design is how to give a proper way to wrap the C++ API to `paddle.v2` API and writing layers in Python. - - This implementation of Python API includes two steps. 1. Implement the Python API using current C++ runtime concepts. 2. Replace the implementation by using compile-time concepts when they are completed. -... +The implementation of the first step is a temporary implementation. We should design our Python API concepts based on `compile-time` concepts. We just use `runtime` classes to implement it for now. + + +## Python Class and compile-time protobuf +As we design our Python API concepts based on `compile-time`, we try to map our Python classes to every compile-time result, i.e., the protobuf messages. They are: -## Python Class about compile-time concepts - | Python Class | Compile-time protobuf | | --- | --- | | Block | BlockDesc | From ea22f838290f2d44887b4e9da2b8b5f73669fd63 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 12 Sep 2017 20:41:21 -0700 Subject: [PATCH 004/342] Update design --- doc/design/python_api.md | 42 +++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index e314d1049c..02bfcde04e 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -12,7 +12,7 @@ The implementation of the first step is a temporary implementation. We should de ## Python Class and compile-time protobuf -As we design our Python API concepts based on `compile-time`, we try to map our Python classes to every compile-time result, i.e., the protobuf messages. They are: +Since we design our Python API concepts based on `compile-time`, we try to map our Python classes to every compile-time result, i.e., the protobuf messages. They are: | Python Class | Compile-time protobuf | @@ -24,27 +24,43 @@ As we design our Python API concepts based on `compile-time`, we try to map our ### Block - +Block is just like programming languages `{}`, which contains many operators and variables. There are two data fields in `Block`. 1) An associate map, whose key is variable name and value is variable itself; 2) A list of operators. + +The block is hierarchical because PaddlePaddle supports RNN and IfElse. For example, RNN is like `for-loop` in programming languages. There is new `block` inside a `for-loop`. To represent hierarchies, `Block` stores the `parent Block` inside. If `parent=None`, the `Block` is the outermost block, i.e., the `global` block. + ```python class Block(objects): def __init__(self, parent=None): - self.vars_ = map() - self.ops_ = vector() - if parent is None: - self.global_vars = map() - self.parent=None - else: - self.parent = parent - self.global_vars = None + self.vars = map() + self.ops = vector() + self.parent = parent + + def create_var(self, ...): + # create variable in `self.vars` + return Variable(...) - def create_global_vars(...): + + def create_global_var(self, ...): if self.parent is not None: - return self.parent.create_global_vars(...) + return self.parent.create_global_var(...) else: - return self.global_vars.new() + return self.create_var(...) + + def create_parameter(self, ...): + return self.create_global_var(...) + + def append_operator(self, ...): + self.ops.append(...) + + def prepend_operator(self, ...): + self.ops.prepend(...) ``` +Users are able to create a global variable inside any block since they many create parameters inside a RNN or IfElseOp. All parameters should be stored in the global block, not the step block in RNN. + +Users can create local variables for outputs of operators. Users can also append and prepend an operator in current block. Prepending `random initialize` operator or `load` operator is very useful to initialize parameters before training. + ### Operator From c33a9bddfdd86a273a342c1be857423d0e331476 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 12 Sep 2017 20:52:22 -0700 Subject: [PATCH 005/342] Update doc --- doc/design/python_api.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 02bfcde04e..0a036d1613 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -1,6 +1,6 @@ # Design Doc: Python API -The top level user API in Python should be as same as API in `paddle.v2` after refactoring Paddle from a layer based framework to an operator based framework. There are many new classes in CPP in [compile time] for describing neural networks, such as `Variable`, `Operator`, `Block`. The issue about current design is how to give a proper way to wrap the C++ API to `paddle.v2` API and writing layers in Python. +The top level user API in Python should be as same as API in `paddle.v2` after refactoring Paddle from a layer based framework to an operator based framework. There are many new classes in C++ in [compile time] for describing neural networks, such as `Variable`, `Operator`, `Block`. The issue about current design is how to give a proper way to wrap the C++ API to `paddle.v2` API and writing layers in Python. This implementation of Python API includes two steps. @@ -64,21 +64,22 @@ Users can create local variables for outputs of operators. Users can also append ### Operator - +Operator class will take inputs, outputs and attributes of the operator into `protobuf` OpDesc and create a C++ `OpDesc` instance. The `infer_shape` perform on C++ objects. ```python class Operator(object): def __init__(self, type, inputs, outputs, attrs): # create OpDesc in Python op_desc = ... - self.cpp_op_desc_ptr = cpp.to_cpp_op_desc(op_desc) - cpp.infer_shapes(self.cpp_op_desc_ptr, inputs, outputs) - outputs.op = self + self.cpp_op_desc_ptr = core.OpDesc(op_desc) + cpp.infer_shape(self.cpp_op_desc_ptr, inputs, outputs) def type(self): return self.cpp_op_desc_ptr.type() ``` +After creating a C++ `OpDesc`, `Operator` in Python can only reads the attribute from C++ side. + ### Variable From 5f2bb1fdcb8435c39d8e74e10dc54083d7cb8f55 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 12 Sep 2017 20:55:26 -0700 Subject: [PATCH 006/342] Tab to space --- doc/design/python_api.md | 67 ++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 0a036d1613..08255db6e9 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -31,30 +31,30 @@ The block is hierarchical because PaddlePaddle supports RNN and IfElse. For exam ```python class Block(objects): - def __init__(self, parent=None): - self.vars = map() - self.ops = vector() - self.parent = parent - - def create_var(self, ...): - # create variable in `self.vars` - return Variable(...) - - - def create_global_var(self, ...): - if self.parent is not None: - return self.parent.create_global_var(...) - else: - return self.create_var(...) - - def create_parameter(self, ...): - return self.create_global_var(...) - - def append_operator(self, ...): - self.ops.append(...) - - def prepend_operator(self, ...): - self.ops.prepend(...) + def __init__(self, parent=None): + self.vars = map() + self.ops = vector() + self.parent = parent + + def create_var(self, ...): + # create variable in `self.vars` + return Variable(...) + + + def create_global_var(self, ...): + if self.parent is not None: + return self.parent.create_global_var(...) + else: + return self.create_var(...) + + def create_parameter(self, ...): + return self.create_global_var(...) + + def append_operator(self, ...): + self.ops.append(...) + + def prepend_operator(self, ...): + self.ops.prepend(...) ``` Users are able to create a global variable inside any block since they many create parameters inside a RNN or IfElseOp. All parameters should be stored in the global block, not the step block in RNN. @@ -68,14 +68,14 @@ Operator class will take inputs, outputs and attributes of the operator into `pr ```python class Operator(object): - def __init__(self, type, inputs, outputs, attrs): - # create OpDesc in Python - op_desc = ... - self.cpp_op_desc_ptr = core.OpDesc(op_desc) - cpp.infer_shape(self.cpp_op_desc_ptr, inputs, outputs) - - def type(self): - return self.cpp_op_desc_ptr.type() + def __init__(self, type, inputs, outputs, attrs): + # create OpDesc in Python + op_desc = ... + self.cpp_op_desc_ptr = core.OpDesc(op_desc) + cpp.infer_shape(self.cpp_op_desc_ptr, inputs, outputs) + + def type(self): + return self.cpp_op_desc_ptr.type() ``` After creating a C++ `OpDesc`, `Operator` in Python can only reads the attribute from C++ side. @@ -108,8 +108,7 @@ class Variable(object): -```python -class Parameter(Variable): +```pythonVclass Parameter(Variable): def __init__(self, trainable, initialize_attrs, optimize_attrs): pass ``` From 889f5a41cacf155e89122df4f1722700ceb26464 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 12 Sep 2017 20:56:44 -0700 Subject: [PATCH 007/342] Update --- doc/design/python_api.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 08255db6e9..05875eeb21 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -108,7 +108,8 @@ class Variable(object): -```pythonVclass Parameter(Variable): +```python +class Parameter(Variable): def __init__(self, trainable, initialize_attrs, optimize_attrs): pass ``` From 6bf1283cbebba1b795896c6dea4eb7f5a407e159 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 13 Sep 2017 13:48:39 -0700 Subject: [PATCH 008/342] Add doc for `Variable` --- doc/design/python_api.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 05875eeb21..e515f8594d 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -82,18 +82,16 @@ After creating a C++ `OpDesc`, `Operator` in Python can only reads the attribute ### Variable - +Operators' intputs, outputs and parameters are all variables. In our design, a variable has four key attributes: its name(`name`), the block it belongs to(`block`), a pointer pointed to its C++ Protobuf object(`cpp_var_desc_ptr`), and the operator it is created by(`op`). All of these attributes are initialized in constructor, except the `op`. The `op` will keep being `None` till the variable is taken as an operator's output. ```python class Variable(object): def __init__(self, shape, dtype="float32", name=None, block=None): if name is None: - if prefix is not None: - name = unique_name_generator(prefix) - else: - name = unique_name_generator("unknown") + name = unique_name_generator() self.name = name self.block = block + # build C++ Protobuf object self.cpp_var_desc_ptr = ... self.op = None @@ -102,6 +100,10 @@ class Variable(object): return [None if elem < 0 else elem for elem in cpp_shape] ``` +Protobuf object should be created in C++ not Python because it is needed by infershape, and infershape is implementated by C++ code. The C++ Protobuf object is accessible for Python through the `cpp_var_desc_ptr` pointer. + +The user is allowed to build an variable without specifying its name. If so, it is going to be assigned with an automatically generated unique name. + ### Parameter From 216b87abd894d09177eca9b4c72c953ba0e5c451 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 13 Sep 2017 14:41:14 -0700 Subject: [PATCH 009/342] Add doc for `Parameter` --- doc/design/python_api.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index e515f8594d..4c012ddd19 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -82,7 +82,7 @@ After creating a C++ `OpDesc`, `Operator` in Python can only reads the attribute ### Variable -Operators' intputs, outputs and parameters are all variables. In our design, a variable has four key attributes: its name(`name`), the block it belongs to(`block`), a pointer pointed to its C++ Protobuf object(`cpp_var_desc_ptr`), and the operator it is created by(`op`). All of these attributes are initialized in constructor, except the `op`. The `op` will keep being `None` till the variable is taken as an operator's output. +Operators' inputs, outputs, and parameters are all variables. In our design, a variable has four key attributes: its name(`name`), the block it belongs to(`block`), a pointer pointed to its C++ Protobuf object(`cpp_var_desc_ptr`), and the operator it is created by(`op`). All of these attributes are initialized in the constructor, except the `op`. The `op` will keep being `None` till the variable is taken as an operator's output. ```python class Variable(object): @@ -100,15 +100,13 @@ class Variable(object): return [None if elem < 0 else elem for elem in cpp_shape] ``` -Protobuf object should be created in C++ not Python because it is needed by infershape, and infershape is implementated by C++ code. The C++ Protobuf object is accessible for Python through the `cpp_var_desc_ptr` pointer. +The Protobuf object should be created in C++ not Python because it is needed by infershape, and infershape is implemented by C++ code. The C++ Protobuf object is accessible for Python through the `cpp_var_desc_ptr`, just like how `shape()` function does. -The user is allowed to build an variable without specifying its name. If so, it is going to be assigned with an automatically generated unique name. +The user is allowed to build a variable without specifying its name. If so, it is going to be assigned with an automatically generated unique name. ### Parameter - - - +The parameter is a kind of special variable. They need to be initialized at the very beginning and updated after each batch training. So if a variable is a parameter, our compiler will add an initializer op and an optimizer op for it during the building process of computation graph. Apart from these, there is no more difference between variable and parameter. In other words, 'parameter' is only a label attached to variables, to tell the compiler these ones require additional processing. ```python class Parameter(Variable): @@ -116,6 +114,9 @@ class Parameter(Variable): pass ``` +The class `Parameter` is derived from class `Variable`. In addition to variables have, parameters are able to hold their initializing and updating information. A parameter's `self.op` will always be `None` because it can never be an operator's output. + + ## Layer Functions From 709a3481482b5b6f40e8df14949bfeb3c311738d Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 13 Sep 2017 16:07:20 -0700 Subject: [PATCH 010/342] Add doc for `Layer function` --- doc/design/python_api.md | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 4c012ddd19..2ca6dffae8 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -119,4 +119,38 @@ The class `Parameter` is derived from class `Variable`. In addition to variables ## Layer Functions - +A layer is a Python function. When it is invoked, it creates a serise of operators and variables then inserts them into the block. It is something like the macro in C++. It is called 'Layer' because the combination of added operators acts just like what a neural network layer does. + +Here are examples about how to write a date layer and FC layer: + +### Data Layer + +```python +def data_layer(name, type, block=None): + if block is None: + block = g_block + # type = dense_vector(size=10) / integer_value(range=10) + return block.create_global_var( + name=name, + shape=[None] + type.dims(), + dtype=type.dtype) + +``` + +Before building new variables, we need to specify which block to use. If we don't, the default one `g_block` will be used. In the above `data_layer` code, a variable is created and be inserted into the root block to make it global. This varibale is going to be used as input data of the whole network. + +### FC Layer + +```python +def fc_layer(input, size, block=None, ...): + if block is None: + block = g_block + w = block.create_parameter(...) + b = block.create_parameter(...) + out = stack.create_var() + op = block.append_operator(Operator("FC", X=input, W=w, b=b, Out=out)) + out.op = op + return out +``` + +In the `fc_layer` code, we create two parameters(`w` and `b`), one variable(`out`) and one operator(`FC operator`), then insert all of them into specify block. From 6b222d550bbeeac6d1278ee6b7a4145f7ba8d6d3 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 13 Sep 2017 16:10:24 -0700 Subject: [PATCH 011/342] Fix typo --- doc/design/python_api.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 2ca6dffae8..f0a300bed0 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -119,9 +119,9 @@ The class `Parameter` is derived from class `Variable`. In addition to variables ## Layer Functions -A layer is a Python function. When it is invoked, it creates a serise of operators and variables then inserts them into the block. It is something like the macro in C++. It is called 'Layer' because the combination of added operators acts just like what a neural network layer does. +A layer is a Python function. When it is invoked, it creates a series of operators and variables then inserts them into the block. It is something like the macro in C++. It is called 'Layer' because the combination of added operators acts just like what a neural network layer does. -Here are examples about how to write a date layer and FC layer: +Here are examples of how to write a data layer and FC layer: ### Data Layer @@ -137,7 +137,7 @@ def data_layer(name, type, block=None): ``` -Before building new variables, we need to specify which block to use. If we don't, the default one `g_block` will be used. In the above `data_layer` code, a variable is created and be inserted into the root block to make it global. This varibale is going to be used as input data of the whole network. +Before building new variables, we need to specify which block to use. If we don't, the default one `g_block` will be used. In the above `data_layer` code, a variable is created and be inserted into the root block to make it global. This variable is going to be used as input data of the whole network. ### FC Layer @@ -153,4 +153,4 @@ def fc_layer(input, size, block=None, ...): return out ``` -In the `fc_layer` code, we create two parameters(`w` and `b`), one variable(`out`) and one operator(`FC operator`), then insert all of them into specify block. +In the `fc_layer` code, we create two parameters(`w` and `b`), one variable(`out`) and one operator(`FC operator`), then insert all of them into the specified block. From 9e7c0b5ef7a1c6854be4c6a4db130a2d717d9d89 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 20 Sep 2017 16:25:41 +0800 Subject: [PATCH 012/342] Add pooling2d(max, ave) and pooling3d(max, ave) functor --- paddle/operators/math/pooling.cc | 317 +++++++++++++++++++++++ paddle/operators/math/pooling.cu | 423 +++++++++++++++++++++++++++++++ paddle/operators/math/pooling.h | 99 ++++++++ 3 files changed, 839 insertions(+) create mode 100644 paddle/operators/math/pooling.cc create mode 100644 paddle/operators/math/pooling.cu create mode 100644 paddle/operators/math/pooling.h diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc new file mode 100644 index 0000000000..6146e710fc --- /dev/null +++ b/paddle/operators/math/pooling.cc @@ -0,0 +1,317 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/pooling.h" + +namespace paddle { +namespace operators { +namespace math { + +template +class Pool2dForwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& output, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_process, + platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context->GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + T ele = pool_process.initial(); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + pool_process.process(ele, input_data[h * input_width + w]); + } + } + int pool_size = (hend - hstart) * (wend - wstart); + pool_process.finalize(ele, (static_cast(pool_size))); + output_data[ph * output_width + pw] = ele; + } + } + input_data += input_stride; + output_data += output_stride; + } + } + } +}; + +template +class Pool2dBackwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings, + PoolProcess pool_process, platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context->GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + int pool_size = (hend - hstart) * (wend - wstart); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + pool_process.gradProcess( + input_data[h * input_width + w], + output_data[ph * output_width + pw], + output_grad_data[ph * output_width + pw], + input_grad_data[h * input_width + w], + static_cast(pool_size)); + } + } + } + } + input_data += input_stride; + output_data += output_stride; + input_grad_data += input_stride; + output_grad_data += output_stride; + } + } + } +}; + +template class Pool2dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool2dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool2dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool2dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool2dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool2dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, double>; +template class Pool2dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool2dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, double>; + +template +class Pool3dForwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& output, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_process, + platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + + const int input_stride = input_depth * input_height * input_width; + const int output_stride = output_depth * output_height * output_width; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context->GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int pd = 0; pd < output_depth; ++pd) { + int dstart = pd * stride_depth - padding_depth; + int dend = std::min(dstart + ksize_depth, input_depth); + dstart = std::max(dstart, 0); + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + int output_idx = (pd * output_height + ph) * output_width + pw; + T ele = pool_process.initial(); + for (int d = dstart; d < dend; ++d) { + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + pool_process.process( + ele, + input_data[(d * input_height + h) * input_width + w]); + } + } + } + int pool_size = + (dend - dstart) * (hend - hstart) * (wend - wstart); + pool_process.finalize(ele, static_cast(pool_size)); + output_data[output_idx] = ele; + } + } + } + input_data += input_stride; + output_data += output_stride; + } + } + } +}; + +template +class Pool3dBackwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings, + PoolProcess pool_process, platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + + const int input_stride = input_depth * input_height * input_width; + const int output_stride = output_depth * output_height * output_width; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context->GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int pd = 0; pd < output_depth; ++pd) { + int dstart = pd * stride_depth - padding_depth; + int dend = std::min(dstart + ksize_depth, input_depth); + dstart = std::max(dstart, 0); + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + + int pool_size = + (dend - dstart) * (hend - hstart) * (wend - wstart); + for (int d = dstart; d < dend; ++d) { + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int input_idx = (d * input_height + h) * input_width + w; + int output_idx = + (pd * output_height + ph) * output_width + pw; + pool_process.gradProcess( + input_data[input_idx], output_data[output_idx], + output_grad_data[output_idx], + input_grad_data[input_idx], static_cast(pool_size)); + } + } + } + } + } + input_data += input_stride; + output_data += output_stride; + input_grad_data += input_stride; + output_grad_data += output_stride; + } + } + } + } +}; + +template class Pool3dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool3dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool3dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool3dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool3dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool3dForwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, double>; +template class Pool3dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool3dBackwardFunctor< + platform::CPUPlace, paddle::operators::math::pool::avePool, double>; +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu new file mode 100644 index 0000000000..87bfb43c47 --- /dev/null +++ b/paddle/operators/math/pooling.cu @@ -0,0 +1,423 @@ +/* Copyright (c) 2016 paddlepaddle Authors. All Rights +Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/pooling.h" + +namespace paddle { +namespace operators { +namespace math { + +template +__global__ void KernelPool2dForward( + const int nthreads, const T* input_data, T* output_data, const int channels, + const int input_height, const int input_width, const int output_height, + const int output_width, const int ksize_height, const int ksize_width, + const int stride_height, const int stride_width, const int padding_height, + const int padding_width, PoolProcess pool_process) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int pw = index % output_width; + int ph = (index / output_width) % output_height; + int c = (index / output_width / output_height) % channels; + int batch_idx = index / output_width / output_height / channels; + + int hstart = ph * stride_height - padding_height; + int hend = min(hstart + ksize_height, input_height); + hstart = max(hstart, 0); + + int wstart = pw * stride_width - padding_width; + int wend = min(wstart + ksize_width, input_width); + wstart = max(wstart, 0); + + input_data += (batch_idx * channels + c) * input_height * input_width; + T ele = pool_process.initial(); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + pool_process.process(ele, input_data[h * input_width + w]); + } + } + int pool_size = (hend - hstart) * (wend - wstart); + pool_process.finalize(ele, (static_cast(pool_size))); + output_data[index] = ele; + } +} + +template +__global__ void KernelPool2dBackward( + const int nthreads, const T* input_data, const T* output_data, + const T* output_grad, T* input_grad, const int channels, + const int input_height, const int input_width, const int output_height, + const int output_width, const int ksize_height, const int ksize_width, + const int stride_height, const int stride_width, const int padding_height, + const int padding_width, PoolProcess pool_process) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int offsetW = index % input_width + padding_width; + int offsetH = (index / input_width) % input_height + padding_height; + int offsetC = (index / input_width / input_height) % channels; + int batch_idx = index / input_width / input_height / channels; + + int phstart = (offsetH < ksize_height) + ? 0 + : (offsetH - ksize_height) / stride_height + 1; + int pwstart = (offsetW < ksize_width) + ? 0 + : (offsetW - ksize_width) / stride_width + 1; + int phend = min(offsetH / stride_height + 1, output_height); + int pwend = min(offsetW / stride_width + 1, output_width); + T gradient = 0; + T input = input_data[index]; + int output_idx = + (batch_idx * channels + offsetC) * output_height * output_width; + output_data += output_idx; + output_grad += output_idx; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + int hstart = ph * stride_height - padding_height; + int wstart = pw * stride_width - padding_width; + int hend = min(hstart + ksize_height, input_height); + int wend = min(wstart + ksize_width, input_width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + int pool_size = (hend - hstart) * (wend - wstart); + int output_sub_idx = ph * output_width + pw; + pool_process.gradProcess(input, output_data[output_sub_idx], + output_grad[output_sub_idx], gradient, + static_cast(pool_size)); + } + } + input_grad[index] = gradient; + } +} + +template +class Pool2dForwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& output, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_process, + platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context->GetPlace()); + + int nthreads = batch_size * output_channels * output_height * output_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelPool2dForward<<>>( + nthreads, input_data, output_data, input_channels, input_height, + input_width, output_height, output_width, ksize_height, ksize_width, + stride_height, stride_width, padding_height, padding_width, + pool_process); + + // CHECK_SYNC("Pool2dForwardKernel failed"); + } +}; + +template +class Pool2dBackwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings, + PoolProcess pool_process, platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context->GetPlace()); + + int nthreads = batch_size * input_channels * input_height * input_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelPool2dBackward<<>>( + nthreads, input_data, output_data, output_grad_data, input_grad_data, + input_channels, input_height, input_width, output_height, output_width, + ksize_height, ksize_width, stride_height, stride_width, padding_height, + padding_width, pool_process); + + // CHECK_SYNC("KernelPool2dBackward failed"); + } +}; + +template class Pool2dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool2dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool2dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool2dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool2dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool2dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, double>; +template class Pool2dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool2dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, double>; + +template +__global__ void KernelPool3DForward( + const int nthreads, const T* input_data, T* output_data, const int channels, + const int input_depth, const int input_height, const int input_width, + const int output_depth, const int output_height, const int output_width, + const int ksize_depth, const int ksize_height, const int ksize_width, + const int stride_depth, const int stride_height, const int stride_width, + const int padding_depth, const int padding_height, const int padding_width, + PoolProcess pool_process) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + index += blockDim.x * gridDim.x) { + int pw = index % output_width; + int ph = (index / output_width) % output_height; + int pd = (index / output_width / output_height) % output_depth; + int c = (index / output_width / output_height / output_depth) % channels; + int batch_idx = + index / output_width / output_height / output_depth / channels; + int dstart = pd * stride_depth - padding_depth; + int hstart = ph * stride_height - padding_height; + int wstart = pw * stride_width - padding_width; + int dend = min(dstart + ksize_depth, input_depth); + int hend = min(hstart + ksize_height, input_height); + int wend = min(wstart + ksize_width, input_width); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + T ele = pool_process.initial(); + input_data += + (batch_idx * channels + c) * input_depth * input_height * input_width; + for (int d = dstart; d < dend; ++d) { + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + pool_process.process( + ele, input_data[(d * input_height + h) * input_width + w]); + } + } + } + int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + pool_process.finalize(ele, static_cast(pool_size)); + + output_data[index] = ele; + } +} + +template +__global__ void KernelPool3DBackward( + const int nthreads, const T* input_data, const T* output_data, + const T* output_grad, T* input_grad, const int channels, + const int input_depth, const int input_height, const int input_width, + const int output_depth, const int output_height, const int output_width, + const int ksize_depth, const int ksize_height, const int ksize_width, + const int stride_depth, const int stride_height, const int stride_width, + const int padding_depth, const int padding_height, const int padding_width, + PoolProcess pool_process) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + index += blockDim.x * gridDim.x) { + int offsetW = index % input_width + padding_width; + int offsetH = (index / input_width) % input_height + padding_height; + int offsetD = + (index / input_width / input_height) % input_depth + padding_depth; + int offsetC = (index / input_width / input_height / input_depth) % channels; + int batch_idx = index / input_width / input_height / input_depth / channels; + + int pdstart = (offsetD < ksize_depth) + ? 0 + : (offsetD + ksize_depth) / stride_depth + 1; + int phstart = (offsetH < ksize_height) + ? 0 + : (offsetH - ksize_height) / stride_height + 1; + int pwstart = (offsetW < ksize_width) + ? 0 + : (offsetW - ksize_width) / stride_width + 1; + int pdend = min((offsetD) / stride_depth + 1, output_depth); + int phend = min((offsetH) / stride_height + 1, output_height); + int pwend = min((offsetW) / stride_width + 1, output_width); + + T gradient = 0; + T input = input_data[index]; + int output_idx = (batch_idx * channels + offsetC) * output_depth * + output_height * output_width; + output_data += output_idx; + output_grad += output_idx; + + for (int pd = pdstart; pd < pdend; ++pd) { + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + // figure out the pooling size + int dstart = pd * stride_depth - padding_depth; + int hstart = ph * stride_height - padding_height; + int wstart = pw * stride_width - padding_width; + int dend = min(dstart + ksize_depth, input_depth); + int hend = min(hstart + ksize_height, input_height); + int wend = min(wstart + ksize_width, input_width); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + int output_sub_idx = ph * output_width + pw; + pool_process.gradProcess(input, output_data[output_sub_idx], + output_grad[output_sub_idx], gradient, + static_cast(pool_size)); + } + } + } + input_grad[index] = gradient; + } +} + +template +class Pool3dForwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& output, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_process, + platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context->GetPlace()); + + int nthreads = batch_size * output_channels * output_depth * output_height * + output_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelPool3DForward<<>>( + nthreads, input_data, output_data, input_channels, input_depth, + input_height, input_width, output_depth, output_height, output_width, + ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, + stride_width, padding_depth, padding_height, padding_width, + pool_process); + + // CHECK_SYNC("Pool2dForwardKernel failed"); + } +}; + +template +class Pool3dBackwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings, + PoolProcess pool_process, platform::DeviceContext* context) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context->GetPlace()); + + int nthreads = batch_size * input_channels * input_height * input_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelPool3DBackward<<>>( + nthreads, input_data, output_data, output_grad_data, input_grad_data, + input_channels, input_depth, input_height, input_width, output_depth, + output_height, output_width, ksize_depth, ksize_height, ksize_width, + stride_depth, stride_height, stride_width, padding_depth, + padding_height, padding_width, pool_process); + + // CHECK_SYNC("KernelPool2dBackward failed"); + } +}; + +template class Pool3dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool3dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool3dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; +template class Pool3dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, float>; +template class Pool3dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool3dForwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, double>; +template class Pool3dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; +template class Pool3dBackwardFunctor< + platform::GPUPlace, paddle::operators::math::pool::avePool, double>; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h new file mode 100644 index 0000000000..69c86c6187 --- /dev/null +++ b/paddle/operators/math/pooling.h @@ -0,0 +1,99 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { + +////////////////////// +#ifdef __NVCC__ +#define HL_DEVICE __device__ +#else +#define HL_DEVICE +#endif +#define FLT_MAX __FLT_MAX__ +///////////////////// + +namespace pool { +template +class maxPool { + public: + HL_DEVICE inline T initial() { return -(T)(FLT_MAX); } + HL_DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; } + HL_DEVICE inline void finalize(T& y, const T& poo_size) {} + HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, + T scale) { + dx += dy * (x == y); + } +}; + +template +class avePool { + public: + HL_DEVICE inline T initial() { return 0; } + HL_DEVICE inline void process(T& y, const T& x) { y += x; } + HL_DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } + HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, + T scale) { + dx += (scale * dy); + } +}; +} // namespace pool + +template +class Pool2dForwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& output, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_process, + platform::DeviceContext* context); +}; + +template +class Pool2dBackwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings, + PoolProcess pool_process, platform::DeviceContext* context); +}; + +template +class Pool3dForwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& output, + std::vector& ksize, std::vector& strides, + std::vector& paddings, PoolProcess pool_process, + platform::DeviceContext* context); +}; + +template +class Pool3dBackwardFunctor { + public: + void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings, + PoolProcess pool_process, platform::DeviceContext* context); +}; + +} // namespace math +} // namespace operators +} // namespace paddle From 3c63b0bc4bf90befcd08fe7620ed0329f79e5fdd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 20 Sep 2017 10:25:54 -0700 Subject: [PATCH 013/342] Change typo --- doc/design/python_api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index f0a300bed0..74d4d3239a 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -1,6 +1,6 @@ # Design Doc: Python API -The top level user API in Python should be as same as API in `paddle.v2` after refactoring Paddle from a layer based framework to an operator based framework. There are many new classes in C++ in [compile time] for describing neural networks, such as `Variable`, `Operator`, `Block`. The issue about current design is how to give a proper way to wrap the C++ API to `paddle.v2` API and writing layers in Python. +The top level user API in Python should be as same as API in `paddle.v2` after refactoring Paddle from a layer based framework to an operator based framework. There are many new classes in C++ in [compile time] for describing neural networks, such as `Variable`, `Operator`, `Block`. The issue about current design is how to give a proper way to wrap the C++ API to `paddle.v2` API and write layers in Python. This implementation of Python API includes two steps. @@ -57,7 +57,7 @@ class Block(objects): self.ops.prepend(...) ``` -Users are able to create a global variable inside any block since they many create parameters inside a RNN or IfElseOp. All parameters should be stored in the global block, not the step block in RNN. +Users are able to create a global variable inside any block since they many create parameters inside a RNN or IfElse. All parameters should be stored in the global block, not the step block in RNN. Users can create local variables for outputs of operators. Users can also append and prepend an operator in current block. Prepending `random initialize` operator or `load` operator is very useful to initialize parameters before training. @@ -147,7 +147,7 @@ def fc_layer(input, size, block=None, ...): block = g_block w = block.create_parameter(...) b = block.create_parameter(...) - out = stack.create_var() + out = block.create_var() op = block.append_operator(Operator("FC", X=input, W=w, b=b, Out=out)) out.op = op return out From 531455651b720e8bd3b8eabf7b461e8d4138c3a9 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 20 Sep 2017 17:40:15 -0700 Subject: [PATCH 014/342] Add `Program` to Python API design doc --- doc/design/python_api.md | 56 ++++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 13 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 74d4d3239a..3122f5bfd8 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -17,24 +17,55 @@ Since we design our Python API concepts based on `compile-time`, we try to map o | Python Class | Compile-time protobuf | | --- | --- | +| Program | ProgramDesc | | Block | BlockDesc | | Operator | OpDesc | | Variable | VarDesc | +### Program + +`Program` is the description of the whole training process and there can only be one `Program` object, which is created automatically by the system at the very beginning. `Program` is formed by a series of `Block`. + +```python +class Program(objects): + def __init__(self): + self.blocks = vector() + self.blocks.append(Block(None)) + self.current_block_idx = 0 + + def get_block(block_idx): + return self.blocks[block_idx] + + def current_block(): + return self.get_block(self.current_block_idx) + + def fallback_current_block(): + self.current_block_idx = self.current_block().parent_idx + + + def create_block(): + new_block_idx = len(self.block) + self.blocks.append(Block(parent_idx=self.current_block_idx, + idx=new_block_idx)) + self.current_block_idx = new_block_idx +``` + +`Program` will create the first block in its constructor. The first block is called 'global block'. It is where all parameters are stored. ### Block Block is just like programming languages `{}`, which contains many operators and variables. There are two data fields in `Block`. 1) An associate map, whose key is variable name and value is variable itself; 2) A list of operators. -The block is hierarchical because PaddlePaddle supports RNN and IfElse. For example, RNN is like `for-loop` in programming languages. There is new `block` inside a `for-loop`. To represent hierarchies, `Block` stores the `parent Block` inside. If `parent=None`, the `Block` is the outermost block, i.e., the `global` block. +The block is hierarchical because PaddlePaddle supports RNN and IfElse. For example, RNN is like `for-loop` in programming languages. There is new `block` inside a `for-loop`. To represent hierarchies, `Block` stores the index of `parent Block` inside. The 'index' means the block's position in `Program`'s `blocks`. If `parent_idx=None`, the block itself is the outermost block, i.e., the 'global block'. ```python class Block(objects): - def __init__(self, parent=None): + def __init__(self, parent_idx, idx): self.vars = map() self.ops = vector() - self.parent = parent + self.idx = idx + self.parent_idx = parent_idx def create_var(self, ...): # create variable in `self.vars` @@ -42,8 +73,9 @@ class Block(objects): def create_global_var(self, ...): - if self.parent is not None: - return self.parent.create_global_var(...) + if self.parent_idx is not None: + parent_block = program.get_block(parent_idx) + return parent_block.create_global_var(...) else: return self.create_var(...) @@ -126,9 +158,8 @@ Here are examples of how to write a data layer and FC layer: ### Data Layer ```python -def data_layer(name, type, block=None): - if block is None: - block = g_block +def data_layer(name, type): + block = program.current_block() # type = dense_vector(size=10) / integer_value(range=10) return block.create_global_var( name=name, @@ -137,14 +168,13 @@ def data_layer(name, type, block=None): ``` -Before building new variables, we need to specify which block to use. If we don't, the default one `g_block` will be used. In the above `data_layer` code, a variable is created and be inserted into the root block to make it global. This variable is going to be used as input data of the whole network. +All the new variables and operators will be built in the `current block`. In the above `data_layer` code, a variable is created and be inserted into the root block to make it global. This variable is going to be used as input data of the whole network. ### FC Layer ```python -def fc_layer(input, size, block=None, ...): - if block is None: - block = g_block +def fc_layer(input, size, ...): + block = program.current_block() w = block.create_parameter(...) b = block.create_parameter(...) out = block.create_var() @@ -153,4 +183,4 @@ def fc_layer(input, size, block=None, ...): return out ``` -In the `fc_layer` code, we create two parameters(`w` and `b`), one variable(`out`) and one operator(`FC operator`), then insert all of them into the specified block. +In the `fc_layer` code, we create two parameters(`w` and `b`), one variable(`out`) and one operator(`FC operator`), then insert all of them into the `current block`. From f14a7966b0f982a703f24940b1e9ae5325ee83c9 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 21 Sep 2017 03:18:42 +0000 Subject: [PATCH 015/342] Initialize the sequence softmax operator. --- paddle/operators/sequence_avg_pool_op.cc | 2 +- paddle/operators/sequence_softmax_op.cc | 82 +++++++++++++++++++ paddle/operators/sequence_softmax_op.cu | 25 ++++++ paddle/operators/sequence_softmax_op.h | 62 ++++++++++++++ .../tests/test_sequence_softmax_op.py | 35 ++++++++ 5 files changed, 205 insertions(+), 1 deletion(-) create mode 100644 paddle/operators/sequence_softmax_op.cc create mode 100644 paddle/operators/sequence_softmax_op.cu create mode 100644 paddle/operators/sequence_softmax_op.h create mode 100644 python/paddle/v2/framework/tests/test_sequence_softmax_op.py diff --git a/paddle/operators/sequence_avg_pool_op.cc b/paddle/operators/sequence_avg_pool_op.cc index 9815b8f3a8..ff354c62b6 100644 --- a/paddle/operators/sequence_avg_pool_op.cc +++ b/paddle/operators/sequence_avg_pool_op.cc @@ -36,7 +36,7 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GE( dims[0], /*batch size = */ static_cast(lod[0].size() - 1), - "The first dimension of Input(X) must be large than batch size."); + "The first dimension of Input(X) must be larger than batch size."); dims[0] = lod[0].size() - 1; ctx.Output("Out")->Resize({dims}); } diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc new file mode 100644 index 0000000000..0a99717440 --- /dev/null +++ b/paddle/operators/sequence_softmax_op.cc @@ -0,0 +1,82 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/sequence_softmax_op.h" + +namespace paddle { +namespace operators { + +class SequenceSoftmaxOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("X"), "Input(X) of SequenceSoftmaxOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Out"), + "Output(Out) of SequenceSoftmaxOp should not be null."); + + auto *x = ctx.Input("X"); + auto dims = x->dims(); + auto lod = x->lod(); + PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); + PADDLE_ENFORCE_GE( + dims[0], + /* batch_size */ static_cast(lod[0].size() - 1), + "The first dimension of Input(X) should be larger than batch size."); + PADDLE_ENFORCE_EQ(x->numel(), static_cast(lod[0].size() - 1), + "The width of each timestep in Input(X) of " + "SequenceSoftmaxOp should be 1."); + + dims[0] = lod[0].size() - 1; + ctx.Output("Out")->Resize({dims}); + } +}; + +class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SequenceSoftmaxOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(LoDTensor)"); + AddOutput("Out", "(LoDTensor)"); + AddComment(R"DOC( +Softmax of Sequence. +)DOC"); + } +}; + +class SequenceSoftmaxGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(sequence_softmax, ops::SequenceSoftmaxOp, + ops::SequenceSoftmaxOpMaker, sequence_softmax_grad, + ops::SequenceSoftmaxGradOp); +REGISTER_OP_CPU_KERNEL( + sequence_softmax, + ops::SequenceSoftmaxKernel); +REGISTER_OP_CPU_KERNEL( + sequence_softmax_grad, + ops::SequenceSoftmaxGradKernel); diff --git a/paddle/operators/sequence_softmax_op.cu b/paddle/operators/sequence_softmax_op.cu new file mode 100644 index 0000000000..f2a1e3d5e3 --- /dev/null +++ b/paddle/operators/sequence_softmax_op.cu @@ -0,0 +1,25 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU + +#include "paddle/operators/sequence_softmax_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + sequence_softmax, + ops::SequenceSoftmaxKernel) +REGISTER_OP_GPU_KERNEL( + sequence_softmax_grad, + ops::SequenceSoftmaxGradKernel); diff --git a/paddle/operators/sequence_softmax_op.h b/paddle/operators/sequence_softmax_op.h new file mode 100644 index 0000000000..54d8265271 --- /dev/null +++ b/paddle/operators/sequence_softmax_op.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/softmax_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +template +using EigenVector = framework::EigenVector; +template +using EigenMatrix = framework::EigenMatrix; + +template +class SequenceSoftmaxKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* x = ctx.Input("X"); + auto* out = ctx.Output("Out"); + + auto lod = x->lod(); + const size_t level = lod.size(); + + out->mutable_data(ctx.GetPlace()); + for (int i = 0; i < static_cast(lod[level].size()) - 1; ++i) { + int start_pos = static_cast(lod[level][i]); + int end_pos = static_cast(lod[level][i + 1]); + Tensor x_i = x->Slice(start_pos, end_pos); + Tensor out_i = out->Slice(start_pos, end_pos); + + math::SoftmaxFunctor()(&x_i, &out_i, ctx); + } + } +}; + +template +class SequenceSoftmaxGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override {} +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_sequence_softmax_op.py b/python/paddle/v2/framework/tests/test_sequence_softmax_op.py new file mode 100644 index 0000000000..d0667c1308 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_sequence_softmax_op.py @@ -0,0 +1,35 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def stable_softmax(x): + """Compute the softmax of vector x in a numerically stable way.""" + shiftx = x - np.max(x) + exps = np.exp(shiftx) + return exps / np.sum(exps) + + +class TestSequenceSoftmaxOp(OpTest): + def setUp(self): + self.op_type = "sequence_softmax" + x = np.random.uniform(0.1, 1, (11, 1)).astype("float32") + lod = [[0, 4, 5, 8, 11]] + + out = np.zeros((11, 1)).astype("float32") + for i in range(4): + sub_x = x[lod[0][i]:lod[0][i + 1], :] + sub_x = sub_x.reshape(1, lod[0][i + 1] - lod[0][i]) + sub_out = stable_softmax(sub_x) + out[lod[0][i]:lod[0][i + 1], :] = sub_out.reshape( + lod[0][i + 1] - lod[0][i], 1) + + self.inputs = {"X": (x, lod)} + self.outputs = {"Out": out} + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() From a8477381ac87bb7369dfbd540011e1bbd5264998 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 20 Sep 2017 16:26:24 +0800 Subject: [PATCH 016/342] Add pooling2d(max, ave) and pooling3d(max, ave) Op --- paddle/operators/CMakeLists.txt | 7 ++ paddle/operators/math/CMakeLists.txt | 7 +- paddle/operators/math/pooling.cc | 15 +-- paddle/operators/math/pooling.cu | 11 +- paddle/operators/pool_op.cc | 165 +++++++++++++++++++++++++++ paddle/operators/pool_op.cu | 26 +++++ paddle/operators/pool_op.h | 157 +++++++++++++++++++++++++ 7 files changed, 372 insertions(+), 16 deletions(-) create mode 100644 paddle/operators/pool_op.cc create mode 100644 paddle/operators/pool_op.cu create mode 100644 paddle/operators/pool_op.h diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e3e934bccc..429e5526db 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -55,6 +55,13 @@ function(op_library TARGET) set(pybind_flag 1) endif() + # activation_op contains several operators + if ("${TARGET}" STREQUAL "pool_op") + set(pybind_flag 1) + # It's enough to just adding one operator to pybind + file(APPEND ${pybind_file} "USE_OP(pool2d);\n") + endif() + # pybind USE_NO_KERNEL_OP file(READ ${TARGET}.cc TARGET_CONTENT) string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}") diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index f8333f34f7..185708cdaa 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,9 +1,8 @@ - if(WITH_GPU) - nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc - im2col.cu DEPS cblas device_context) + nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc + im2col.cu pooling.cc pooling.cu DEPS cblas device_context) else() - cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context) + cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc DEPS cblas device_context) endif() nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 6146e710fc..a78c5f929c 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -111,6 +111,7 @@ class Pool2dBackwardFunctor { int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); + float scale = 1.0 / pool_size; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.gradProcess( @@ -118,7 +119,7 @@ class Pool2dBackwardFunctor { output_data[ph * output_width + pw], output_grad_data[ph * output_width + pw], input_grad_data[h * input_width + w], - static_cast(pool_size)); + static_cast(scale)); } } } @@ -244,7 +245,6 @@ class Pool3dBackwardFunctor { const int padding_depth = paddings[0]; const int padding_height = paddings[1]; const int padding_width = paddings[2]; - const int input_stride = input_depth * input_height * input_width; const int output_stride = output_depth * output_height * output_width; @@ -271,6 +271,7 @@ class Pool3dBackwardFunctor { int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); + float scale = 1.0 / pool_size; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { @@ -280,17 +281,17 @@ class Pool3dBackwardFunctor { pool_process.gradProcess( input_data[input_idx], output_data[output_idx], output_grad_data[output_idx], - input_grad_data[input_idx], static_cast(pool_size)); + input_grad_data[input_idx], static_cast(scale)); } } } } } - input_data += input_stride; - output_data += output_stride; - input_grad_data += input_stride; - output_grad_data += output_stride; } + input_data += input_stride; + output_data += output_stride; + input_grad_data += input_stride; + output_grad_data += output_stride; } } } diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 87bfb43c47..0a399f7ca0 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -95,7 +95,7 @@ __global__ void KernelPool2dBackward( int output_sub_idx = ph * output_width + pw; pool_process.gradProcess(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, - static_cast(pool_size)); + static_cast(1.0 / pool_size)); } } input_grad[index] = gradient; @@ -264,7 +264,7 @@ __global__ void KernelPool3DBackward( int pdstart = (offsetD < ksize_depth) ? 0 - : (offsetD + ksize_depth) / stride_depth + 1; + : (offsetD - ksize_depth) / stride_depth + 1; int phstart = (offsetH < ksize_height) ? 0 : (offsetH - ksize_height) / stride_height + 1; @@ -296,10 +296,10 @@ __global__ void KernelPool3DBackward( hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); - int output_sub_idx = ph * output_width + pw; + int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.gradProcess(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, - static_cast(pool_size)); + static_cast(1.0 / pool_size)); } } } @@ -385,7 +385,8 @@ class Pool3dBackwardFunctor { const T* output_grad_data = output_grad.data(); T* input_grad_data = input_grad.mutable_data(context->GetPlace()); - int nthreads = batch_size * input_channels * input_height * input_width; + int nthreads = + batch_size * input_channels * input_depth * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc new file mode 100644 index 0000000000..984cbefabf --- /dev/null +++ b/paddle/operators/pool_op.cc @@ -0,0 +1,165 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/pool_op.h" + +namespace paddle { +namespace operators { + +int outputSize(int input_size, int filter_size, int padding, int stride) { + int output_size = (input_size - filter_size + 2 * padding) / stride + 1; + return output_size; +} + +class PoolOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Input"), + "Input(Input) of Pooling should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Output"), + "Output(Output) of Pooling should not be null."); + // PADDLE_ENFORCE_NOT_NULL(Attr("pooling_type"), + // "pooling_type should not be null."); + // PADDLE_ENFORCE_NOT_NULL(Attr>("ksize"), "ksize should + // not be null."); + auto input = ctx.Input("Input"); + auto output = ctx.Output("Output"); + int global_pooling = Attr("global_pooling"); + std::string pooling_type = Attr("pooling_type"); + std::vector ksize = Attr>("ksize"); + std::vector strides = Attr>("strides"); + std::vector paddings = Attr>("paddings"); + + PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "ave", + "pooling_type should be 'max' or 'ave'"); + PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3, + "Pooling ksize should be 2-D or 3-D"); + + if (global_pooling == 1) { + for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = input->dims()[i + 2]; + } + if (ksize.size() == 2) { + PADDLE_ENFORCE_EQ(input->dims().size(), 4, + "Pool2DOp intput should be 4-D."); + PADDLE_ENFORCE_EQ(strides.size(), 2, "Pool2DOp strides should be 2-D."); + PADDLE_ENFORCE_EQ(paddings.size(), 2, "Pool2DOp paddings should be 2-D."); + } else { + PADDLE_ENFORCE_EQ(input->dims().size(), 5, + "Pool3DOp intput should be 5-D."); + PADDLE_ENFORCE_EQ(strides.size(), 3, "Pool3DOp strides should be 3-D."); + PADDLE_ENFORCE_EQ(paddings.size(), 3, "Pool3DOp paddings should be 3-D."); + } + std::vector output_shape({input->dims()[0], input->dims()[1]}); + for (size_t i = 0; i < ksize.size(); ++i) { + output_shape.push_back( + outputSize(input->dims()[i + 2], ksize[i], paddings[i], strides[i])); + } + output->Resize(framework::make_ddim(output_shape)); + } +}; + +class PoolOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto in = ctx.Input("Input"); + auto d_in = + ctx.Output(framework::GradVarName("Input")); + if (d_in) d_in->Resize(in->dims()); + } +}; + +class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { + public: + Pool3dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "Input", + "The input tensor of pooling operator. " + "The format of input tensor is NCDHW. Where N is batch size, C is the " + "number of channels, D, H and W is the depth, height and width of " + "image."); + AddOutput("Output", + "The output tensor of pooling operator." + "The format of output tensor is also NCDHW."); + + AddAttr("pooling_type", + "pooling_type of pooling operator.['max' or 'ave']"); + AddAttr>("ksize", "strides of pooling operator."); + AddAttr("global_pooling", "whether to use the global_pooling.") + .SetDefault(0); + AddAttr>("strides", "strides of pooling operator.") + .SetDefault({1, 1, 1}); + AddAttr>("paddings", "paddings of pooling operator.") + .SetDefault({0, 0, 0}); + AddComment(R"DOC( +The pooling3d operation calculates the output based on +the input, pooling_type and ksize, strides, paddings parameters. +)DOC"); + } +}; + +class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { + public: + Pool2dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "Input", + "The input tensor of pooling operator. " + "The format of input tensor is NCHW. Where N is batch size, C is the " + "number of channels, H and W is the height and width of image."); + AddOutput("Output", + "The output tensor of pooling operator." + "The format of output tensor is also NCHW."); + + AddAttr("pooling_type", + "pooling_type of pooling operator.['max' or 'ave']"); + AddAttr>("ksize", "strides of pooling operator."); + AddAttr("global_pooling", "whether to use the global_pooling.") + .SetDefault(0); + AddAttr>("strides", "strides of pooling operator.") + .SetDefault({1, 1}); + AddAttr>("paddings", "paddings of pooling operator.") + .SetDefault({0, 0}); + AddComment(R"DOC( +The pooling2d operation calculates the output based on +the input, pooling_type and ksize, strides, paddings parameters. +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP(pool2d, ops::PoolOp, ops::Pool2dOpMaker, pool2d_grad, + ops::PoolOpGrad); + +REGISTER_OP_CPU_KERNEL(pool2d, + ops::PoolKernel); +REGISTER_OP_CPU_KERNEL(pool2d_grad, + ops::PoolGradKernel) + +REGISTER_OP(pool3d, ops::PoolOp, ops::Pool3dOpMaker, pool3d_grad, + ops::PoolOpGrad); + +REGISTER_OP_CPU_KERNEL(pool3d, + ops::PoolKernel); +REGISTER_OP_CPU_KERNEL(pool3d_grad, + ops::PoolGradKernel); diff --git a/paddle/operators/pool_op.cu b/paddle/operators/pool_op.cu new file mode 100644 index 0000000000..b011d20da5 --- /dev/null +++ b/paddle/operators/pool_op.cu @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/pool_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL(pool2d, + ops::PoolKernel); +REGISTER_OP_GPU_KERNEL(pool2d_grad, + ops::PoolGradKernel); + +REGISTER_OP_GPU_KERNEL(pool3d, + ops::PoolKernel); +REGISTER_OP_GPU_KERNEL(pool3d_grad, + ops::PoolGradKernel); diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h new file mode 100644 index 0000000000..aca1c5a137 --- /dev/null +++ b/paddle/operators/pool_op.h @@ -0,0 +1,157 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/pooling.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class PoolKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* input = context.Input("Input"); + Tensor* output = context.Output("Output"); + + int global_pooling = context.Attr("global_pooling"); + std::string pooling_type = context.Attr("pooling_type"); + std::vector ksize = context.Attr>("ksize"); + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + if (global_pooling == 1) { + for (size_t i = 0; i < ksize.size(); ++i) { + ksize[i] = input->dims()[i + 2]; + } + } + auto* device_context = + const_cast(context.device_context_); + + switch (ksize.size()) { + case 2: { + if (pooling_type == "max") { + paddle::operators::math::Pool2dForwardFunctor< + Place, paddle::operators::math::pool::maxPool, T> + pool2d_forward; + paddle::operators::math::pool::maxPool pool_process; + pool2d_forward(*input, *output, ksize, strides, paddings, + pool_process, device_context); + + } else if (pooling_type == "ave") { + paddle::operators::math::Pool2dForwardFunctor< + Place, paddle::operators::math::pool::avePool, T> + pool2d_forward; + paddle::operators::math::pool::avePool pool_process; + pool2d_forward(*input, *output, ksize, strides, paddings, + pool_process, device_context); + } + } break; + case 3: { + if (pooling_type == "max") { + paddle::operators::math::Pool3dForwardFunctor< + Place, paddle::operators::math::pool::maxPool, T> + pool3d_forward; + paddle::operators::math::pool::maxPool pool_process; + pool3d_forward(*input, *output, ksize, strides, paddings, + pool_process, device_context); + } else if (pooling_type == "ave") { + paddle::operators::math::Pool3dForwardFunctor< + Place, paddle::operators::math::pool::avePool, T> + pool3d_forward; + paddle::operators::math::pool::avePool pool_process; + pool3d_forward(*input, *output, ksize, strides, paddings, + pool_process, device_context); + } + } break; + } + } +}; + +template +class PoolGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* input = context.Input("Input"); + const Tensor* output = context.Input("Output"); + const Tensor* output_grad = + context.Input(framework::GradVarName("Output")); + Tensor* input_grad = + context.Output(framework::GradVarName("Input")); + + int global_pooling = context.Attr("global_pooling"); + std::string pooling_type = context.Attr("pooling_type"); + std::vector ksize = context.Attr>("ksize"); + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + + if (global_pooling == 1) { + for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = input->dims()[i + 2]; + } + auto* device_context = + const_cast(context.device_context_); + + if (input_grad) { + input_grad->mutable_data(context.GetPlace()); + auto temp = framework::EigenVector::Flatten(*input_grad); + temp.device(context.GetEigenDevice()) = + temp.constant(static_cast(0)); + + switch (ksize.size()) { + case 2: { + if (pooling_type == "max") { + paddle::operators::math::Pool2dBackwardFunctor< + Place, paddle::operators::math::pool::maxPool, T> + pool2d_backward; + paddle::operators::math::pool::maxPool pool_process; + pool2d_backward(*input, *input_grad, *output, *output_grad, ksize, + strides, paddings, pool_process, device_context); + } else if (pooling_type == "ave") { + paddle::operators::math::Pool2dBackwardFunctor< + Place, paddle::operators::math::pool::avePool, T> + pool2d_backward; + paddle::operators::math::pool::avePool pool_process; + pool2d_backward(*input, *input_grad, *output, *output_grad, ksize, + strides, paddings, pool_process, device_context); + } + } break; + case 3: { + if (pooling_type == "max") { + paddle::operators::math::Pool3dBackwardFunctor< + Place, paddle::operators::math::pool::maxPool, T> + pool3d_backward; + paddle::operators::math::pool::maxPool pool_process; + pool3d_backward(*input, *input_grad, *output, *output_grad, ksize, + strides, paddings, pool_process, device_context); + } else if (pooling_type == "ave") { + paddle::operators::math::Pool3dBackwardFunctor< + Place, paddle::operators::math::pool::avePool, T> + pool3d_backward; + paddle::operators::math::pool::avePool pool_process; + pool3d_backward(*input, *input_grad, *output, *output_grad, ksize, + strides, paddings, pool_process, device_context); + } + } break; + } + } + } +}; + +} // namespace operators +} // namespace paddle From 33d99998900950e114f8d6d16496f59d534de976 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 21 Sep 2017 15:48:23 +0800 Subject: [PATCH 017/342] Add pool2d unit test --- .../v2/framework/tests/test_pool2d_op.py | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 python/paddle/v2/framework/tests/test_pool2d_op.py diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py new file mode 100644 index 0000000000..549898ec94 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -0,0 +1,98 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0]): + + N, C, H, W = x.shape + H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 + W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + out = np.zeros((N, C, H_out, W_out)) + for i in xrange(H_out): + for j in xrange(W_out): + r_start = np.max((i * strides[0] - paddings[0], 0)) + r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) + c_start = np.max((j * strides[1] - paddings[1], 0)) + c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) + x_masked = x[:, :, r_start:r_end, c_start:c_end] + + out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) + return out + + +def ave_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0]): + + N, C, H, W = x.shape + H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 + W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + out = np.zeros((N, C, H_out, W_out)) + for i in xrange(H_out): + for j in xrange(W_out): + r_start = np.max((i * strides[0] - paddings[0], 0)) + r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) + c_start = np.max((j * strides[1] - paddings[1], 0)) + c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) + x_masked = x[:, :, r_start:r_end, c_start:c_end] + + out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / ( + (r_end - r_start) * (c_end - c_start)) + return out + + +class TestPool2d_Op(OpTest): + def setUp(self): + self.initTestCase() + self.op_type = "pool2d" + input = np.random.random(self.shape).astype("float32") + output = self.pool2D_forward_naive(input, self.ksize, self.strides, + self.paddings) + self.inputs = {'Input': input} + + self.attrs = { + 'strides': self.strides, + 'paddings': self.paddings, + 'ksize': self.ksize, + 'pooling_type': self.pool_type, + } + + self.outputs = {'Output': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) + + def initTestCase(self): + self.pool_type = "ave" + self.pool2D_forward_naive = ave_pool2D_forward_naive + self.shape = [2, 3, 5, 5] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [0, 0] + + +class TestCase2(TestPool2d_Op): + def initTestCase(self): + self.op_type = "pool2d" + self.pool_type = "ave" + self.pool2D_forward_naive = ave_pool2D_forward_naive + self.shape = [2, 3, 5, 5] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [1, 1] + + +# class TestCase1(TestPool2d_Op): +# def initTestCase(self): +# self.op_type = "pool2d" +# self.pool_type = "max" +# self.pool2D_forward_naive = max_pool2D_forward_naive +# self.shape = [2, 3, 5, 5] +# self.ksize = [3, 3] +# self.strides = [1, 1] +# self.paddings = [1, 1] + +if __name__ == '__main__': + unittest.main() From 510f00800a43c8df18ac4551d2b35fba0a400d5d Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 21 Sep 2017 15:49:16 +0800 Subject: [PATCH 018/342] Add pool3d unit test --- .../v2/framework/tests/test_pool3d_op.py | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 python/paddle/v2/framework/tests/test_pool3d_op.py diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py new file mode 100644 index 0000000000..f8e9a768e0 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -0,0 +1,108 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0]): + + N, C, D, H, W = x.shape + D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 + H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + out = np.zeros((N, C, D_out, H_out, W_out)) + for k in xrange(D_out): + d_start = np.max((k * strides[0] - paddings[0], 0)) + d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) + for i in xrange(H_out): + h_start = np.max((i * strides[0] - paddings[0], 0)) + h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) + for j in xrange(W_out): + w_start = np.max((j * strides[1] - paddings[1], 0)) + w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) + + x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] + + out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) + return out + + +def ave_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0]): + + N, C, D, H, W = x.shape + D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 + H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + out = np.zeros((N, C, D_out, H_out, W_out)) + for k in xrange(D_out): + d_start = np.max((k * strides[0] - paddings[0], 0)) + d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) + for i in xrange(H_out): + h_start = np.max((i * strides[0] - paddings[0], 0)) + h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) + for j in xrange(W_out): + w_start = np.max((j * strides[1] - paddings[1], 0)) + w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) + + x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] + + out[:, :, k, i, j] = np.sum(x_masked, axis=(2, 3, 4)) / ( + (d_end - d_start) * (h_end - h_start) * (w_end - w_start)) + return out + + +class TestPool3d_Op(OpTest): + def setUp(self): + self.initTestCase() + self.op_type = "pool3d" + input = np.random.random(self.shape).astype("float32") + output = self.pool3D_forward_naive(input, self.ksize, self.strides, + self.paddings) + self.inputs = {'Input': input} + + self.attrs = { + 'strides': self.strides, + 'paddings': self.paddings, + 'ksize': self.ksize, + 'pooling_type': self.pool_type, + } + + self.outputs = {'Output': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) + + def initTestCase(self): + self.pool_type = "ave" + self.pool3D_forward_naive = ave_pool3D_forward_naive + self.shape = [2, 3, 5, 5, 5] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [0, 0, 0] + + +class TestCase1(TestPool3d_Op): + def initTestCase(self): + self.op_type = "pool3d" + self.pool_type = "ave" + self.pool3D_forward_naive = ave_pool3D_forward_naive + self.shape = [2, 3, 7, 7, 7] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + + +# class TestCase2(TestPool3d_Op): +# def initTestCase(self): +# self.op_type = "pool3d" +# self.pool_type = "max" +# self.pool3D_forward_naive = max_pool3D_forward_naive +# self.shape = [2, 3, 5, 5, 5] +# self.ksize = [3, 3, 3] +# self.strides = [1, 1, 1] +# self.paddings = [1, 1, 1] + +if __name__ == '__main__': + unittest.main() From 50b8ec0564534b07fe5fdacd91db6a21fafd0559 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 21 Sep 2017 19:21:12 +0800 Subject: [PATCH 019/342] fix unit test --- .../v2/framework/tests/test_pool2d_op.py | 22 ++++++++++--------- .../v2/framework/tests/test_pool3d_op.py | 22 ++++++++++--------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index 549898ec94..cf327508f7 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -62,7 +62,8 @@ class TestPool2d_Op(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) + if self.pool_type != "max": + self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) def initTestCase(self): self.pool_type = "ave" @@ -84,15 +85,16 @@ class TestCase2(TestPool2d_Op): self.paddings = [1, 1] -# class TestCase1(TestPool2d_Op): -# def initTestCase(self): -# self.op_type = "pool2d" -# self.pool_type = "max" -# self.pool2D_forward_naive = max_pool2D_forward_naive -# self.shape = [2, 3, 5, 5] -# self.ksize = [3, 3] -# self.strides = [1, 1] -# self.paddings = [1, 1] +class TestCase1(TestPool2d_Op): + def initTestCase(self): + self.op_type = "pool2d" + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + self.shape = [2, 3, 5, 5] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [1, 1] + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index f8e9a768e0..cfd0ced150 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -72,7 +72,8 @@ class TestPool3d_Op(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) + if self.pool_type != "max": + self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) def initTestCase(self): self.pool_type = "ave" @@ -94,15 +95,16 @@ class TestCase1(TestPool3d_Op): self.paddings = [1, 1, 1] -# class TestCase2(TestPool3d_Op): -# def initTestCase(self): -# self.op_type = "pool3d" -# self.pool_type = "max" -# self.pool3D_forward_naive = max_pool3D_forward_naive -# self.shape = [2, 3, 5, 5, 5] -# self.ksize = [3, 3, 3] -# self.strides = [1, 1, 1] -# self.paddings = [1, 1, 1] +class TestCase2(TestPool3d_Op): + def initTestCase(self): + self.op_type = "pool3d" + self.pool_type = "max" + self.pool3D_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 5, 5, 5] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + if __name__ == '__main__': unittest.main() From 84a2512b90f39854386fb03f437ac8a92a486437 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 22 Sep 2017 10:06:03 +0800 Subject: [PATCH 020/342] fix parameter name and function define --- paddle/operators/math/pooling.cc | 24 ++--- paddle/operators/math/pooling.cu | 24 ++--- paddle/operators/math/pooling.h | 24 ++--- paddle/operators/pool_op.cc | 87 +++++++++++-------- paddle/operators/pool_op.h | 72 ++++++++------- .../v2/framework/tests/test_pool2d_op.py | 8 +- .../v2/framework/tests/test_pool3d_op.py | 8 +- 7 files changed, 129 insertions(+), 118 deletions(-) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 671bead1b4..5ce748ff08 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -21,10 +21,10 @@ namespace math { template class Pool2dForwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& output, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process, - const platform::DeviceContext& context) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -75,12 +75,12 @@ class Pool2dForwardFunctor { template class Pool2dBackwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process, - const platform::DeviceContext& context) { + PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -154,10 +154,10 @@ template class Pool2dBackwardFunctor< template class Pool3dForwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& output, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process, - const platform::DeviceContext& context) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -224,12 +224,12 @@ class Pool3dForwardFunctor { template class Pool3dBackwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process, - const platform::DeviceContext& context) { + PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index ce0a01776a..124006942c 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -105,10 +105,10 @@ __global__ void KernelPool2dBackward( template class Pool2dForwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& output, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process, - const platform::DeviceContext& context) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -148,12 +148,12 @@ class Pool2dForwardFunctor { template class Pool2dBackwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process, - const platform::DeviceContext& context) { + PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -319,10 +319,10 @@ __global__ void KernelPool3DBackward( template class Pool3dForwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& output, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process, - const platform::DeviceContext& context) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -369,12 +369,12 @@ class Pool3dForwardFunctor { template class Pool3dBackwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process, - const platform::DeviceContext& context) { + PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index aad5a1837b..3f01c9dacb 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -59,41 +59,41 @@ class avePool { template class Pool2dForwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& output, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process, - const platform::DeviceContext& context); + std::vector& paddings, PoolProcess pool_process); }; template class Pool2dBackwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process, - const platform::DeviceContext& context); + PoolProcess pool_process); }; template class Pool3dForwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& output, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process, - const platform::DeviceContext& context); + std::vector& paddings, PoolProcess pool_process); }; template class Pool3dBackwardFunctor { public: - void operator()(const framework::Tensor& input, framework::Tensor& input_grad, + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process, - const platform::DeviceContext& context); + PoolProcess pool_process); }; } // namespace math diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index 1d79629d73..c23adf63bf 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -28,18 +28,18 @@ class PoolOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Input"), - "Input(Input) of Pooling should not be null."); - PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Output"), - "Output(Output) of Pooling should not be null."); - // PADDLE_ENFORCE_NOT_NULL(Attr("pooling_type"), + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "X(Input) of Pooling should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Out(Output) of Pooling should not be null."); + // PADDLE_ENFORCE_NOT_NULL(Attr("poolingType"), // "pooling_type should not be null."); // PADDLE_ENFORCE_NOT_NULL(Attr>("ksize"), "ksize should // not be null."); - auto input = ctx.Input("Input"); - auto output = ctx.Output("Output"); - int global_pooling = Attr("global_pooling"); - std::string pooling_type = Attr("pooling_type"); + auto in_X = ctx.Input("X"); + auto out = ctx.Output("Out"); + int global_pooling = Attr("globalPooling"); + std::string pooling_type = Attr("poolingType"); std::vector ksize = Attr>("ksize"); std::vector strides = Attr>("strides"); std::vector paddings = Attr>("paddings"); @@ -50,25 +50,25 @@ class PoolOp : public framework::OperatorWithKernel { "Pooling ksize should be 2-D or 3-D"); if (global_pooling == 1) { - for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = input->dims()[i + 2]; + for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = in_X->dims()[i + 2]; } if (ksize.size() == 2) { - PADDLE_ENFORCE_EQ(input->dims().size(), 4, + PADDLE_ENFORCE_EQ(in_X->dims().size(), 4, "Pool2DOp intput should be 4-D."); PADDLE_ENFORCE_EQ(strides.size(), 2, "Pool2DOp strides should be 2-D."); PADDLE_ENFORCE_EQ(paddings.size(), 2, "Pool2DOp paddings should be 2-D."); } else { - PADDLE_ENFORCE_EQ(input->dims().size(), 5, + PADDLE_ENFORCE_EQ(in_X->dims().size(), 5, "Pool3DOp intput should be 5-D."); PADDLE_ENFORCE_EQ(strides.size(), 3, "Pool3DOp strides should be 3-D."); PADDLE_ENFORCE_EQ(paddings.size(), 3, "Pool3DOp paddings should be 3-D."); } - std::vector output_shape({input->dims()[0], input->dims()[1]}); + std::vector output_shape({in_X->dims()[0], in_X->dims()[1]}); for (size_t i = 0; i < ksize.size(); ++i) { - output_shape.push_back(outputSize_pool(input->dims()[i + 2], ksize[i], + output_shape.push_back(outputSize_pool(in_X->dims()[i + 2], ksize[i], paddings[i], strides[i])); } - output->Resize(framework::make_ddim(output_shape)); + out->Resize(framework::make_ddim(output_shape)); } }; @@ -78,9 +78,8 @@ class PoolOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto in = ctx.Input("Input"); - auto d_in = - ctx.Output(framework::GradVarName("Input")); + auto in = ctx.Input("X"); + auto d_in = ctx.Output(framework::GradVarName("X")); if (d_in) d_in->Resize(in->dims()); } }; @@ -90,27 +89,36 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { Pool3dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( - "Input", + "X", "The input tensor of pooling operator. " "The format of input tensor is NCDHW. Where N is batch size, C is the " "number of channels, D, H and W is the depth, height and width of " "image."); - AddOutput("Output", + AddOutput("Out", "The output tensor of pooling operator." "The format of output tensor is also NCDHW."); - AddAttr("pooling_type", - "pooling_type of pooling operator.['max' or 'ave']"); - AddAttr>("ksize", "strides of pooling operator."); - AddAttr("global_pooling", "whether to use the global_pooling.") + AddAttr("poolingType", + "poolingType of pooling operator.['max' or 'ave']"); + AddAttr>( + "ksize", "pooling size(depth, height, width) of pooling operator."); + AddAttr("globalPooling", + "default 0" + "whether to use the globalPooling.") .SetDefault(0); - AddAttr>("strides", "strides of pooling operator.") + AddAttr>( + "strides", + "default {1,1,1}" + "strides(depth, height, width) of pooling operator.") .SetDefault({1, 1, 1}); - AddAttr>("paddings", "paddings of pooling operator.") + AddAttr>( + "paddings", + "default {0,0,0}" + "paddings(depth, height, width) of pooling operator.") .SetDefault({0, 0, 0}); AddComment(R"DOC( The pooling3d operation calculates the output based on -the input, pooling_type and ksize, strides, paddings parameters. +the input, poolingType and ksize, strides, paddings parameters. )DOC"); } }; @@ -120,26 +128,33 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { Pool2dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( - "Input", + "X", "The input tensor of pooling operator. " "The format of input tensor is NCHW. Where N is batch size, C is the " "number of channels, H and W is the height and width of image."); - AddOutput("Output", + AddOutput("Out", "The output tensor of pooling operator." "The format of output tensor is also NCHW."); - AddAttr("pooling_type", - "pooling_type of pooling operator.['max' or 'ave']"); - AddAttr>("ksize", "strides of pooling operator."); - AddAttr("global_pooling", "whether to use the global_pooling.") + AddAttr("poolingType", + "poolingType of pooling operator.['max' or 'ave']"); + AddAttr>( + "ksize", "pooling size(height, width) of pooling operator."); + AddAttr("globalPooling", + "default 0" + "whether to use the globalPooling.[0 or 1]") .SetDefault(0); - AddAttr>("strides", "strides of pooling operator.") + AddAttr>("strides", + "default {1, 1}" + "strides(height, width) of pooling operator.") .SetDefault({1, 1}); - AddAttr>("paddings", "paddings of pooling operator.") + AddAttr>("paddings", + "default {0, 0}" + "paddings(height, width) of pooling operator.") .SetDefault({0, 0}); AddComment(R"DOC( The pooling2d operation calculates the output based on -the input, pooling_type and ksize, strides, paddings parameters. +the input, poolingType and ksize, strides, paddings parameters. )DOC"); } }; diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 16779cbb91..2e737f0a4b 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -28,17 +28,17 @@ template class PoolKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const Tensor* input = context.Input("Input"); - Tensor* output = context.Output("Output"); + const Tensor* in_X = context.Input("X"); + Tensor* out = context.Output("Out"); - int global_pooling = context.Attr("global_pooling"); - std::string pooling_type = context.Attr("pooling_type"); + int global_pooling = context.Attr("globalPooling"); + std::string pooling_type = context.Attr("poolingType"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); if (global_pooling == 1) { for (size_t i = 0; i < ksize.size(); ++i) { - ksize[i] = input->dims()[i + 2]; + ksize[i] = in_X->dims()[i + 2]; } } @@ -49,16 +49,16 @@ class PoolKernel : public framework::OpKernel { Place, paddle::operators::math::pool::maxPool, T> pool2d_forward; paddle::operators::math::pool::maxPool pool_process; - pool2d_forward(*input, *output, ksize, strides, paddings, - pool_process, context.device_context()); + pool2d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddings, pool_process); } else if (pooling_type == "ave") { paddle::operators::math::Pool2dForwardFunctor< Place, paddle::operators::math::pool::avePool, T> pool2d_forward; paddle::operators::math::pool::avePool pool_process; - pool2d_forward(*input, *output, ksize, strides, paddings, - pool_process, (context.device_context())); + pool2d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddings, pool_process); } } break; case 3: { @@ -67,15 +67,15 @@ class PoolKernel : public framework::OpKernel { Place, paddle::operators::math::pool::maxPool, T> pool3d_forward; paddle::operators::math::pool::maxPool pool_process; - pool3d_forward(*input, *output, ksize, strides, paddings, - pool_process, context.device_context()); + pool3d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddings, pool_process); } else if (pooling_type == "ave") { paddle::operators::math::Pool3dForwardFunctor< Place, paddle::operators::math::pool::avePool, T> pool3d_forward; paddle::operators::math::pool::avePool pool_process; - pool3d_forward(*input, *output, ksize, strides, paddings, - pool_process, context.device_context()); + pool3d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddings, pool_process); } } break; } @@ -86,26 +86,26 @@ template class PoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const Tensor* input = context.Input("Input"); - const Tensor* output = context.Input("Output"); - const Tensor* output_grad = - context.Input(framework::GradVarName("Output")); - Tensor* input_grad = - context.Output(framework::GradVarName("Input")); - - int global_pooling = context.Attr("global_pooling"); - std::string pooling_type = context.Attr("pooling_type"); + const Tensor* in_X = context.Input("X"); + const Tensor* out = context.Input("Out"); + const Tensor* out_grad = + context.Input(framework::GradVarName("Out")); + Tensor* in_X_grad = + context.Output(framework::GradVarName("X")); + + int global_pooling = context.Attr("globalPooling"); + std::string pooling_type = context.Attr("poolingType"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); if (global_pooling == 1) { - for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = input->dims()[i + 2]; + for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = in_X->dims()[i + 2]; } - if (input_grad) { - input_grad->mutable_data(context.GetPlace()); - auto temp = framework::EigenVector::Flatten(*input_grad); + if (in_X_grad) { + in_X_grad->mutable_data(context.GetPlace()); + auto temp = framework::EigenVector::Flatten(*in_X_grad); temp.device(context.GetEigenDevice()) = temp.constant(static_cast(0)); @@ -116,17 +116,15 @@ class PoolGradKernel : public framework::OpKernel { Place, paddle::operators::math::pool::maxPool, T> pool2d_backward; paddle::operators::math::pool::maxPool pool_process; - pool2d_backward(*input, *input_grad, *output, *output_grad, ksize, - strides, paddings, pool_process, - context.device_context()); + pool2d_backward(context.device_context(), *in_X, *in_X_grad, *out, + *out_grad, ksize, strides, paddings, pool_process); } else if (pooling_type == "ave") { paddle::operators::math::Pool2dBackwardFunctor< Place, paddle::operators::math::pool::avePool, T> pool2d_backward; paddle::operators::math::pool::avePool pool_process; - pool2d_backward(*input, *input_grad, *output, *output_grad, ksize, - strides, paddings, pool_process, - context.device_context()); + pool2d_backward(context.device_context(), *in_X, *in_X_grad, *out, + *out_grad, ksize, strides, paddings, pool_process); } } break; case 3: { @@ -135,17 +133,15 @@ class PoolGradKernel : public framework::OpKernel { Place, paddle::operators::math::pool::maxPool, T> pool3d_backward; paddle::operators::math::pool::maxPool pool_process; - pool3d_backward(*input, *input_grad, *output, *output_grad, ksize, - strides, paddings, pool_process, - context.device_context()); + pool3d_backward(context.device_context(), *in_X, *in_X_grad, *out, + *out_grad, ksize, strides, paddings, pool_process); } else if (pooling_type == "ave") { paddle::operators::math::Pool3dBackwardFunctor< Place, paddle::operators::math::pool::avePool, T> pool3d_backward; paddle::operators::math::pool::avePool pool_process; - pool3d_backward(*input, *input_grad, *output, *output_grad, ksize, - strides, paddings, pool_process, - context.device_context()); + pool3d_backward(context.device_context(), *in_X, *in_X_grad, *out, + *out_grad, ksize, strides, paddings, pool_process); } } break; } diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index cf327508f7..2a8fedc037 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -47,23 +47,23 @@ class TestPool2d_Op(OpTest): input = np.random.random(self.shape).astype("float32") output = self.pool2D_forward_naive(input, self.ksize, self.strides, self.paddings) - self.inputs = {'Input': input} + self.inputs = {'X': input} self.attrs = { 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'pooling_type': self.pool_type, + 'poolingType': self.pool_type, } - self.outputs = {'Output': output} + self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): if self.pool_type != "max": - self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) + self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def initTestCase(self): self.pool_type = "ave" diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index cfd0ced150..907ee0c0fe 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -57,23 +57,23 @@ class TestPool3d_Op(OpTest): input = np.random.random(self.shape).astype("float32") output = self.pool3D_forward_naive(input, self.ksize, self.strides, self.paddings) - self.inputs = {'Input': input} + self.inputs = {'X': input} self.attrs = { 'strides': self.strides, 'paddings': self.paddings, 'ksize': self.ksize, - 'pooling_type': self.pool_type, + 'poolingType': self.pool_type, } - self.outputs = {'Output': output} + self.outputs = {'Out': output} def test_check_output(self): self.check_output() def test_check_grad(self): if self.pool_type != "max": - self.check_grad(set(['Input']), 'Output', max_relative_error=0.07) + self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def initTestCase(self): self.pool_type = "ave" From 6f61b5df7dd55f3f4ee5b7f42c7e31e9f84ea1be Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 22 Sep 2017 10:49:59 +0800 Subject: [PATCH 021/342] fix unit test --- paddle/operators/pool_op.cc | 13 +++--- .../v2/framework/tests/test_pool2d_op.py | 40 +++++++++++++++++-- .../v2/framework/tests/test_pool3d_op.py | 40 ++++++++++++++++--- 3 files changed, 77 insertions(+), 16 deletions(-) diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index c23adf63bf..bcda072a2d 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -46,20 +46,19 @@ class PoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "ave", "pooling_type should be 'max' or 'ave'"); - PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3, - "Pooling ksize should be 2-D or 3-D"); + PADDLE_ENFORCE(in_X->dims().size() == 4 || in_X->dims().size() == 5, + "Pooling intput should be 4-D or 5-D"); if (global_pooling == 1) { - for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = in_X->dims()[i + 2]; + ksize.resize(static_cast(in_X->dims().size()) - 2); + for (size_t i = 0; i < ksize.size(); ++i) + ksize[i] = static_cast(in_X->dims()[i + 2]); } + if (ksize.size() == 2) { - PADDLE_ENFORCE_EQ(in_X->dims().size(), 4, - "Pool2DOp intput should be 4-D."); PADDLE_ENFORCE_EQ(strides.size(), 2, "Pool2DOp strides should be 2-D."); PADDLE_ENFORCE_EQ(paddings.size(), 2, "Pool2DOp paddings should be 2-D."); } else { - PADDLE_ENFORCE_EQ(in_X->dims().size(), 5, - "Pool3DOp intput should be 5-D."); PADDLE_ENFORCE_EQ(strides.size(), 3, "Pool3DOp strides should be 3-D."); PADDLE_ENFORCE_EQ(paddings.size(), 3, "Pool3DOp paddings should be 3-D."); } diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index 2a8fedc037..0efddc2881 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -3,9 +3,11 @@ import numpy as np from op_test import OpTest -def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0]): +def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): N, C, H, W = x.shape + if global_pool == 1: + ksize = [H, W] H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) @@ -21,9 +23,11 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0]): return out -def ave_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0]): +def ave_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): N, C, H, W = x.shape + if global_pool == 1: + ksize = [H, W] H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) @@ -46,7 +50,7 @@ class TestPool2d_Op(OpTest): self.op_type = "pool2d" input = np.random.random(self.shape).astype("float32") output = self.pool2D_forward_naive(input, self.ksize, self.strides, - self.paddings) + self.paddings, self.global_pool) self.inputs = {'X': input} self.attrs = { @@ -54,6 +58,7 @@ class TestPool2d_Op(OpTest): 'paddings': self.paddings, 'ksize': self.ksize, 'poolingType': self.pool_type, + 'globalPooling': self.global_pool, } self.outputs = {'Out': output} @@ -66,6 +71,7 @@ class TestPool2d_Op(OpTest): self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def initTestCase(self): + self.global_pool = 0 self.pool_type = "ave" self.pool2D_forward_naive = ave_pool2D_forward_naive self.shape = [2, 3, 5, 5] @@ -74,8 +80,21 @@ class TestPool2d_Op(OpTest): self.paddings = [0, 0] +class TestCase1(TestPool2d_Op): + def initTestCase(self): + self.global_pool = 0 + self.op_type = "pool2d" + self.pool_type = "ave" + self.pool2D_forward_naive = ave_pool2D_forward_naive + self.shape = [2, 3, 5, 5] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [1, 1] + + class TestCase2(TestPool2d_Op): def initTestCase(self): + self.global_pool = 1 self.op_type = "pool2d" self.pool_type = "ave" self.pool2D_forward_naive = ave_pool2D_forward_naive @@ -85,8 +104,21 @@ class TestCase2(TestPool2d_Op): self.paddings = [1, 1] -class TestCase1(TestPool2d_Op): +class TestCase3(TestPool2d_Op): + def initTestCase(self): + self.global_pool = 0 + self.op_type = "pool2d" + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + self.shape = [2, 3, 5, 5] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [1, 1] + + +class TestCase4(TestPool2d_Op): def initTestCase(self): + self.global_pool = 1 self.op_type = "pool2d" self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index 907ee0c0fe..4ba3d754d5 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -3,9 +3,11 @@ import numpy as np from op_test import OpTest -def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0]): +def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): N, C, D, H, W = x.shape + if global_pool == 1: + ksize = [D, H, W] D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 @@ -19,16 +21,17 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0]): for j in xrange(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) - x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) return out -def ave_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0]): +def ave_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): N, C, D, H, W = x.shape + if global_pool == 1: + ksize = [D, H, W] D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 @@ -42,7 +45,6 @@ def ave_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0]): for j in xrange(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) - x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] out[:, :, k, i, j] = np.sum(x_masked, axis=(2, 3, 4)) / ( @@ -56,7 +58,7 @@ class TestPool3d_Op(OpTest): self.op_type = "pool3d" input = np.random.random(self.shape).astype("float32") output = self.pool3D_forward_naive(input, self.ksize, self.strides, - self.paddings) + self.paddings, self.global_pool) self.inputs = {'X': input} self.attrs = { @@ -64,6 +66,7 @@ class TestPool3d_Op(OpTest): 'paddings': self.paddings, 'ksize': self.ksize, 'poolingType': self.pool_type, + 'globalPooling': self.global_pool, } self.outputs = {'Out': output} @@ -76,6 +79,7 @@ class TestPool3d_Op(OpTest): self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def initTestCase(self): + self.global_pool = 0 self.pool_type = "ave" self.pool3D_forward_naive = ave_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] @@ -86,6 +90,7 @@ class TestPool3d_Op(OpTest): class TestCase1(TestPool3d_Op): def initTestCase(self): + self.global_pool = 0 self.op_type = "pool3d" self.pool_type = "ave" self.pool3D_forward_naive = ave_pool3D_forward_naive @@ -97,6 +102,31 @@ class TestCase1(TestPool3d_Op): class TestCase2(TestPool3d_Op): def initTestCase(self): + self.global_pool = 1 + self.op_type = "pool3d" + self.pool_type = "ave" + self.pool3D_forward_naive = ave_pool3D_forward_naive + self.shape = [2, 3, 7, 7, 7] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + + +class TestCase3(TestPool3d_Op): + def initTestCase(self): + self.global_pool = 0 + self.op_type = "pool3d" + self.pool_type = "max" + self.pool3D_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 5, 5, 5] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + + +class TestCase4(TestPool3d_Op): + def initTestCase(self): + self.global_pool = 1 self.op_type = "pool3d" self.pool_type = "max" self.pool3D_forward_naive = max_pool3D_forward_naive From 0417e4e4bf205566ca882f3878c0a64417f5da96 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 22 Sep 2017 12:02:41 +0800 Subject: [PATCH 022/342] fix framework::LoDTensor => Tensor --- paddle/operators/math/pooling.h | 26 +++++++++++--------------- paddle/operators/pool_op.cc | 4 ++-- paddle/operators/pool_op.h | 3 +-- paddle/platform/hostdevice.h | 2 ++ 4 files changed, 16 insertions(+), 19 deletions(-) diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index 3f01c9dacb..627ece2ca4 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -16,17 +16,13 @@ limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/tensor.h" #include "paddle/platform/device_context.h" +#include "paddle/platform/hostdevice.h" namespace paddle { namespace operators { namespace math { ////////////////////// -#ifdef __NVCC__ -#define HL_DEVICE __device__ -#else -#define HL_DEVICE -#endif #define FLT_MAX __FLT_MAX__ ///////////////////// @@ -34,11 +30,11 @@ namespace pool { template class maxPool { public: - HL_DEVICE inline T initial() { return -(T)(FLT_MAX); } - HL_DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; } - HL_DEVICE inline void finalize(T& y, const T& poo_size) {} - HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, - T scale) { + DEVICE inline T initial() { return static_cast(-FLT_MAX); } + DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; } + DEVICE inline void finalize(T& y, const T& poo_size) {} + DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, + T scale) { dx += dy * (x == y); } }; @@ -46,11 +42,11 @@ class maxPool { template class avePool { public: - HL_DEVICE inline T initial() { return 0; } - HL_DEVICE inline void process(T& y, const T& x) { y += x; } - HL_DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } - HL_DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, - T scale) { + DEVICE inline T initial() { return static_cast(0); } + DEVICE inline void process(T& y, const T& x) { y += x; } + DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } + DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, + T scale) { dx += (scale * dy); } }; diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index bcda072a2d..434aae811c 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -37,7 +37,7 @@ class PoolOp : public framework::OperatorWithKernel { // PADDLE_ENFORCE_NOT_NULL(Attr>("ksize"), "ksize should // not be null."); auto in_X = ctx.Input("X"); - auto out = ctx.Output("Out"); + auto out = ctx.Output("Out"); int global_pooling = Attr("globalPooling"); std::string pooling_type = Attr("poolingType"); std::vector ksize = Attr>("ksize"); @@ -78,7 +78,7 @@ class PoolOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto in = ctx.Input("X"); - auto d_in = ctx.Output(framework::GradVarName("X")); + auto d_in = ctx.Output(framework::GradVarName("X")); if (d_in) d_in->Resize(in->dims()); } }; diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 2e737f0a4b..5a40f76172 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -90,8 +90,7 @@ class PoolGradKernel : public framework::OpKernel { const Tensor* out = context.Input("Out"); const Tensor* out_grad = context.Input(framework::GradVarName("Out")); - Tensor* in_X_grad = - context.Output(framework::GradVarName("X")); + Tensor* in_X_grad = context.Output(framework::GradVarName("X")); int global_pooling = context.Attr("globalPooling"); std::string pooling_type = context.Attr("poolingType"); diff --git a/paddle/platform/hostdevice.h b/paddle/platform/hostdevice.h index e7de86b7b2..eb2df291cc 100644 --- a/paddle/platform/hostdevice.h +++ b/paddle/platform/hostdevice.h @@ -2,8 +2,10 @@ #ifdef __CUDACC__ #define HOSTDEVICE __host__ __device__ +#define DEVICE __device__ #define HOST __host__ #else #define HOSTDEVICE +#define DEVICE #define HOST #endif From f2ccc11f592203aebe73fd71fb5b4f538a22d128 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 22 Sep 2017 18:15:46 +0800 Subject: [PATCH 023/342] fix pool doc (pool_op.cc) --- paddle/operators/pool_op.cc | 115 +++++++++++++++++++----------------- 1 file changed, 62 insertions(+), 53 deletions(-) diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index 434aae811c..c57922373f 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -32,10 +32,7 @@ class PoolOp : public framework::OperatorWithKernel { "X(Input) of Pooling should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Out(Output) of Pooling should not be null."); - // PADDLE_ENFORCE_NOT_NULL(Attr("poolingType"), - // "pooling_type should not be null."); - // PADDLE_ENFORCE_NOT_NULL(Attr>("ksize"), "ksize should - // not be null."); + auto in_X = ctx.Input("X"); auto out = ctx.Output("Out"); int global_pooling = Attr("globalPooling"); @@ -56,11 +53,15 @@ class PoolOp : public framework::OperatorWithKernel { } if (ksize.size() == 2) { - PADDLE_ENFORCE_EQ(strides.size(), 2, "Pool2DOp strides should be 2-D."); - PADDLE_ENFORCE_EQ(paddings.size(), 2, "Pool2DOp paddings should be 2-D."); + PADDLE_ENFORCE_EQ(strides.size(), 2, + "Pool2DOp strides size should be 2 elements."); + PADDLE_ENFORCE_EQ(paddings.size(), 2, + "Pool2DOp paddings size should be 2 elements"); } else { - PADDLE_ENFORCE_EQ(strides.size(), 3, "Pool3DOp strides should be 3-D."); - PADDLE_ENFORCE_EQ(paddings.size(), 3, "Pool3DOp paddings should be 3-D."); + PADDLE_ENFORCE_EQ(strides.size(), 3, + "Pool3DOp strides should be 3 elements."); + PADDLE_ENFORCE_EQ(paddings.size(), 3, + "Pool3DOp paddings should be 3 elements."); } std::vector output_shape({in_X->dims()[0], in_X->dims()[1]}); for (size_t i = 0; i < ksize.size(); ++i) { @@ -83,76 +84,84 @@ class PoolOpGrad : public framework::OperatorWithKernel { } }; -class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { +class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool3dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + Pool2dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", "The input tensor of pooling operator. " - "The format of input tensor is NCDHW. Where N is batch size, C is the " - "number of channels, D, H and W is the depth, height and width of " - "image."); + "The format of input tensor is NCHW. Where N is batch size, C is the " + "number of channels, H and W is the height and width of image."); AddOutput("Out", "The output tensor of pooling operator." - "The format of output tensor is also NCDHW."); + "The format of output tensor is also NCHW."); AddAttr("poolingType", - "poolingType of pooling operator.['max' or 'ave']"); + "poolingType of pooling operator." + "str constant equal to 'max' or 'ave'"); AddAttr>( - "ksize", "pooling size(depth, height, width) of pooling operator."); - AddAttr("globalPooling", - "default 0" - "whether to use the globalPooling.") + "ksize", "pooling size(height, width) of pooling operator."); + AddAttr( + "globalPooling", + "whether to use the globalPooling." + "int constant equal to 0 or 1" + "default 0" + "If globalPooling = 1, ksize is ignored and need not be specified.") .SetDefault(0); - AddAttr>( - "strides", - "default {1,1,1}" - "strides(depth, height, width) of pooling operator.") - .SetDefault({1, 1, 1}); - AddAttr>( - "paddings", - "default {0,0,0}" - "paddings(depth, height, width) of pooling operator.") - .SetDefault({0, 0, 0}); + AddAttr>("strides", + "strides(height, width) of pooling operator." + "default {1,1}") + .SetDefault({1, 1}); + AddAttr>("paddings", + "paddings(height, width) of pooling operator." + "default {0,0}") + .SetDefault({0, 0}); + AddComment(R"DOC( -The pooling3d operation calculates the output based on +The pooling2d operation calculates the output based on the input, poolingType and ksize, strides, paddings parameters. )DOC"); } }; - -class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { +class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool2dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + Pool3dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "The input tensor of pooling operator. " - "The format of input tensor is NCHW. Where N is batch size, C is the " - "number of channels, H and W is the height and width of image."); + AddInput("X", + "The input tensor of pooling operator. " + "The format of input tensor is NCDHW. Where N is batch size, C is " + "the " + "number of channels, D, H and W is the depth, height and width of " + "image."); AddOutput("Out", "The output tensor of pooling operator." - "The format of output tensor is also NCHW."); + "The format of output tensor is also NCDHW."); AddAttr("poolingType", - "poolingType of pooling operator.['max' or 'ave']"); + "poolingType of pooling operator." + "str constant equal to 'max' or 'ave'"); AddAttr>( - "ksize", "pooling size(height, width) of pooling operator."); - AddAttr("globalPooling", - "default 0" - "whether to use the globalPooling.[0 or 1]") + "ksize", "pooling size(depth, height, width) of pooling operator."); + AddAttr( + "globalPooling", + "whether to use the globalPooling." + "int constant equal to 0 or 1" + "default 0" + "If globalPooling = 1, ksize is ignored and need not be specified.") .SetDefault(0); - AddAttr>("strides", - "default {1, 1}" - "strides(height, width) of pooling operator.") - .SetDefault({1, 1}); - AddAttr>("paddings", - "default {0, 0}" - "paddings(height, width) of pooling operator.") - .SetDefault({0, 0}); + AddAttr>( + "strides", + "strides(depth, height, width) of pooling operator." + "default {1,1,1}") + .SetDefault({1, 1, 1}); + AddAttr>( + "paddings", + "paddings(depth, height, width) of pooling operator." + "default {0,0,0}") + .SetDefault({0, 0, 0}); AddComment(R"DOC( -The pooling2d operation calculates the output based on +The pooling3d operation calculates the output based on the input, poolingType and ksize, strides, paddings parameters. )DOC"); } From b0a47b149d981fd3a4ad8d9bf03c9bd0d0cc96d1 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 23 Sep 2017 14:55:02 +0800 Subject: [PATCH 024/342] remove CHECK in *.cu --- paddle/operators/math/pooling.cu | 9 --------- 1 file changed, 9 deletions(-) diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 124006942c..09e6bd9000 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -140,8 +140,6 @@ class Pool2dForwardFunctor { output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process); - - // CHECK_SYNC("Pool2dForwardKernel failed"); } }; @@ -186,8 +184,6 @@ class Pool2dBackwardFunctor { input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, pool_process); - - // CHECK_SYNC("KernelPool2dBackward failed"); } }; @@ -247,7 +243,6 @@ __global__ void KernelPool3DForward( } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); pool_process.finalize(ele, static_cast(pool_size)); - output_data[index] = ele; } } @@ -361,8 +356,6 @@ class Pool3dForwardFunctor { ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process); - - // CHECK_SYNC("Pool2dForwardKernel failed"); } }; @@ -415,8 +408,6 @@ class Pool3dBackwardFunctor { output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, pool_process); - - // CHECK_SYNC("KernelPool2dBackward failed"); } }; From 3994e91a678b8547af77b6b7f4629f122b0d9f07 Mon Sep 17 00:00:00 2001 From: guosheng Date: Fri, 8 Sep 2017 18:39:01 +0800 Subject: [PATCH 025/342] Add reduce_op --- paddle/operators/reduce_op.cc | 207 +++++++++++++++ paddle/operators/reduce_op.cu | 46 ++++ paddle/operators/reduce_op.h | 251 ++++++++++++++++++ .../v2/framework/tests/test_reduce_op.py | 92 +++++++ 4 files changed, 596 insertions(+) create mode 100644 paddle/operators/reduce_op.cc create mode 100644 paddle/operators/reduce_op.cu create mode 100644 paddle/operators/reduce_op.h create mode 100644 python/paddle/v2/framework/tests/test_reduce_op.py diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc new file mode 100644 index 0000000000..ea4bfc50b2 --- /dev/null +++ b/paddle/operators/reduce_op.cc @@ -0,0 +1,207 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/reduce_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; +using framework::DDim; + +class ReduceOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + auto x_dims = ctx.Input("X")->dims(); + auto x_rank = x_dims.size(); + PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); + int dim = static_cast(ctx.Attr("dim")); + if (dim < 0) dim = x_rank + dim; + PADDLE_ENFORCE_LT( + dim, x_rank, + "The dim should be in the range [-rank(input), rank(input)]"); + bool keep_dim = true; // TODO; + auto dims_vector = vectorize(x_dims); + if (keep_dim || x_rank == 1) { + dims_vector[dim] = 1; + } else { + dims_vector.erase(dims_vector.begin() + dim); + } + auto out_dims = framework::make_ddim(dims_vector); + ctx.Output("Out")->Resize(out_dims); + } +}; + +class ReduceGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + auto x_dims = ctx.Input("X")->dims(); + auto x_rank = x_dims.size(); + PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); + int dim = static_cast(ctx.Attr("dim")); + if (dim < 0) dim = x_rank + dim; + PADDLE_ENFORCE_LT( + dim, x_rank, + "The dim should be in the range [-rank(input), rank(input)]"); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + if (x_grad) x_grad->Resize(x_dims); + } +}; + +class ReduceSumOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReduceSumOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); + AddOutput("Out", "(Tensor) The result tensor."); + AddComment(R"DOC( +ReduceMean operator computes the sum of input tensor along the given dimension. +The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. +)DOC"); + AddAttr("dim", + "(int, default 0) The dimension to reduce. " + "Must be in the range [-rank(input), rank(input)]") + .SetDefault(0); + AddAttr("keep_dim", + "(bool, default fasle) " + "If true, retain the reduced dimension with length 1.") + .SetDefault(false); + } +}; + +class ReduceMeanOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReduceMeanOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); + AddOutput("Out", "(Tensor) The result tensor."); + AddComment(R"DOC( +ReduceMean operator computes the mean of input tensor along the given dimension. +The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. +)DOC"); + AddAttr("dim", + "(int, default 0) The dimension to reduce. " + "Must be in the range [-rank(input), rank(input)]") + .SetDefault(0); + AddAttr("keep_dim", + "(bool, default fasle) " + "If true, retain the reduced dimension with length 1.") + .SetDefault(false); + } +}; + +class ReduceMaxOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReduceMaxOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); + AddOutput("Out", "(Tensor) The result tensor."); + AddComment(R"DOC( +ReduceMax operator computes the maximum of input tensor along the given dimension. +The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. +)DOC"); + AddAttr("dim", + "(int, default 0) The dimension to reduce. " + "Must be in the range [-rank(input), rank(input)]") + .SetDefault(0); + AddAttr("keep_dim", + "(bool, default fasle) " + "If true, retain the reduced dimension with length 1.") + .SetDefault(false); + } +}; + +class ReduceMinOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReduceMinOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); + AddOutput("Out", "(Tensor) The result tensor."); + AddComment(R"DOC( +ReduceMin operator computes the minimum of input tensor along the given dimension. +The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. +)DOC"); + AddAttr("dim", + "(int, default 0) The dimension to reduce. " + "Must be in the range [-rank(input), rank(input)]") + .SetDefault(0); + AddAttr("keep_dim", + "(bool, default fasle) " + "If true, retain the reduced dimension with length 1.") + .SetDefault(false); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP(reduce_sum, ops::ReduceOp, ops::ReduceSumOpMaker, reduce_sum_grad, + ops::ReduceGradOp); +REGISTER_OP_CPU_KERNEL( + reduce_sum, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL(reduce_sum_grad, + ops::ReduceGradKernel); + +REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, + reduce_mean_grad, ops::ReduceGradOp); +REGISTER_OP_CPU_KERNEL( + reduce_mean, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL(reduce_mean_grad, + ops::ReduceGradKernel); + +REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, + ops::ReduceGradOp); +REGISTER_OP_CPU_KERNEL( + reduce_max, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL(reduce_max_grad, + ops::ReduceGradKernel); + +REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad, + ops::ReduceGradOp); +REGISTER_OP_CPU_KERNEL( + reduce_min, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL(reduce_min_grad, + ops::ReduceGradKernel); diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu new file mode 100644 index 0000000000..9effc17ed3 --- /dev/null +++ b/paddle/operators/reduce_op.cu @@ -0,0 +1,46 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/reduce_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL( + reduce_sum, + ops::ReduceKernel); +REGISTER_OP_GPU_KERNEL(reduce_sum_grad, + ops::ReduceGradEigenKernel); + +REGISTER_OP_GPU_KERNEL( + reduce_mean, + ops::ReduceKernel); +REGISTER_OP_GPU_KERNEL(reduce_mean_grad, + ops::ReduceGradKernel); + +REGISTER_OP_GPU_KERNEL( + reduce_max, + ops::ReduceKernel); +REGISTER_OP_GPU_KERNEL(reduce_max_grad, + ops::ReduceGradKernel); + +REGISTER_OP_GPU_KERNEL( + reduce_min, + ops::ReduceKernel); +REGISTER_OP_GPU_KERNEL(reduce_min_grad, + ops::ReduceGradKernel); \ No newline at end of file diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h new file mode 100644 index 0000000000..9fd7d335ac --- /dev/null +++ b/paddle/operators/reduce_op.h @@ -0,0 +1,251 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/operators/math/math_function.h" + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using DDim = framework::DDim; +template +using EigenTensor = framework::EigenTensor; + +struct SumFunctor { + template + void operator()(const Place& place, In& in, Out& out, const Dim& dim) { + out.device(place) = in.sum(dim); + } +}; + +struct SumGradFunctor { + template + void operator()(const Place& place, In_Const& in, In& in_grad, Out& out, + Out& out_grad, const Dim& dim, int size) { + in_grad.device(place) = out_grad.broadcast(dim); + } +}; + +struct MeanFunctor { + template + void operator()(const Place& place, In& in, Out& out, const Dim& dim) { + out.device(place) = in.mean(dim); + } +}; + +struct MeanGradFunctor { + template + void operator()(const Place& place, In_Const& in, In& in_grad, Out& out, + Out& out_grad, const Dim& dim, int size) { + in_grad.device(place) = out_grad.broadcast(dim) / in_grad.constant(size); + } +}; + +struct MaxFunctor { + template + void operator()(const Place& place, In& in, Out& out, const Dim& dim) { + out.device(place) = in.maximum(dim); + } +}; + +struct MinFunctor { + template + void operator()(const Place& place, In& in, Out& out, const Dim& dim) { + out.device(place) = in.minimum(dim); + } +}; + +struct MaxOrMinGradFunctor { + template + void operator()(const Place& place, In_Const& in, In& in_grad, Out& out, + Out& out_grad, const Dim& dim, int size) { + auto equals = in == out.broadcast(dim); + auto ones = in_grad.constant(1); + auto zeros = in_grad.constant(0); + in_grad.device(place) = + out_grad.broadcast(dim) * equals.select(ones, zeros); + } +}; + +template +class ReduceKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + int rank = context.Input("X")->dims().size(); + switch (rank) { + case 1: + ReduceCompute<1>(context); + break; + case 2: + ReduceCompute<2>(context); + break; + case 3: + ReduceCompute<3>(context); + break; + case 4: + ReduceCompute<4>(context); + break; + case 5: + ReduceCompute<5>(context); + break; + case 6: + ReduceCompute<6>(context); + break; + } + } + + private: + template + void ReduceCompute(const framework::ExecutionContext& context) const { + auto* input = context.Input("X"); + auto* output = context.Output("Out"); + output->mutable_data(context.GetPlace()); + + auto x = EigenTensor::From(*input); + auto x_rank = static_cast(x.dimensions().size()); + int dim = static_cast(context.Attr("dim")); + if (dim < 0) dim = x_rank + dim; + auto reduce_dim = Eigen::array({{dim}}); + // construct the squeezed output tensor + bool keep_dim = true; // static_cast(context.Attr("keep_dim")); + DDim dims = output->dims(); + auto dims_vector = vectorize(dims); + if (keep_dim && x_rank > 1) { + dims_vector.erase(dims_vector.begin() + dim); + dims = framework::make_ddim(dims_vector); + } + auto out = EigenTensor < T, D == 1 ? 1 : (D - 1) > ::From(*output, dims); + auto& place = context.GetEigenDevice(); + Functor functor; + functor(place, x, out, reduce_dim); + } +}; + +template +class ReduceGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + int rank = context.Input("X")->dims().size(); + switch (rank) { + case 1: + ReduceCompute<1>(context); + break; + case 2: + ReduceCompute<2>(context); + break; + case 3: + ReduceCompute<3>(context); + break; + case 4: + ReduceCompute<4>(context); + break; + case 5: + ReduceCompute<5>(context); + break; + case 6: + ReduceCompute<6>(context); + break; + } + } + + private: + template + void ReduceCompute(const framework::ExecutionContext& context) const { + auto* input0 = context.Input("X"); + auto* input1 = context.Input("Out"); + auto* input2 = context.Input(framework::GradVarName("Out")); + auto* output = context.Output(framework::GradVarName("X")); + + if (output != nullptr) { + output->mutable_data(context.GetPlace()); + auto x = EigenTensor::From(*input0); + auto x_grad = EigenTensor::From(*output); + auto x_rank = static_cast(x.dimensions().size()); + int dim = static_cast(context.Attr("dim")); + if (dim < 0) dim = x_rank + dim; + DDim dims = input0->dims(); + dims[dim] = 1; + auto x_reduce = EigenTensor::From(*input1, dims); + auto x_reduce_grad = EigenTensor::From(*input2, dims); + + Eigen::array braodcast_dim; + for (size_t i = 0; i < D; ++i) braodcast_dim[i] = 1; + braodcast_dim[dim] = input0->dims()[dim]; + auto& place = context.GetEigenDevice(); + Functor functor; + functor(place, x, x_grad, x_reduce, x_reduce_grad, braodcast_dim, + braodcast_dim[dim]); + } + } +}; + +// For EigenTensor unsupported reduce +template +class ReduceGradEigenFreeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* x = context.Input("X"); + auto* out = context.Input("Out"); + auto* x_grad = context.Output(framework::GradVarName("X")); + auto* out_grad = context.Input(framework::GradVarName("Out")); + if (x_grad != nullptr) { + DDim dims = x->dims(); + int rank = dims.size(); + int dim = static_cast(context.Attr("dim")); + if (dim < 0) dim = rank + dim; + + auto* x_data = x->data(); + auto* x_grad_data = x_grad->mutable_data(context.GetPlace()); + auto* out_data = out->data(); + auto* out_grad_data = out_grad->data(); + + int outer_count = 1; + int inner_count = 1; + int mid_count = dims[dim]; + for (int i = 0; i < dim; ++i) { + outer_count *= dims[i]; + } + for (int i = dim + 1; i < rank; ++i) { + inner_count *= dims[i]; + } + + int x_offset = 0; // offset on raw data + int out_offset = 0; // offset on reduced data + Functor functor; + for (int i = 0; i < outer_count; ++i) { + for (int j = 0; j < inner_count; ++j) { + out_offset = inner_count * i + j; + for (int k = 0; k < mid_count; ++k) { + x_offset = (inner_count * mid_count) * i + inner_count * k + j; + functor(x_data + x_offset, x_grad_data + x_offset, + out_data + out_offset, out_grad_data + out_offset, + mid_count); + } + } + } + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_reduce_op.py b/python/paddle/v2/framework/tests/test_reduce_op.py new file mode 100644 index 0000000000..49ef8eabd2 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_reduce_op.py @@ -0,0 +1,92 @@ +import unittest +import numpy as np +from gradient_checker import GradientChecker, create_op +from op_test_util import OpTestMeta +from paddle.v2.framework.op import Operator + + +class TestSumOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "reduce_sum" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.attrs = {'dim': -2} + out = self.inputs['X'].sum(axis=self.attrs['dim']) + self.outputs = {'Out': out} + + +class TestSumGradOp(GradientChecker): + def test_normal(self): + op = Operator("reduce_sum", X="X", Out="Out", dim=-2) + # use small size to decrease the error of numerical calculation + inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.check_grad(op, inputs, set(["X"]), "Out") + + def test_1d_tensor(self): + op = Operator("reduce_sum", X="X", Out="Out", dim=0) + # use small size to decrease the error of numerical calculation + inputs = {'X': np.random.random(10).astype("float32")} + self.check_grad(op, inputs, set(["X"]), "Out") + + +class TestKeepdimSumOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "reduce_sum" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.attrs = {'dim': -2} + out = self.inputs['X'].sum(axis=self.attrs['dim'], keepdims=True) + self.outputs = {'Out': out} + + +class TestMeanOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "reduce_mean" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.attrs = {'dim': -1} + out = self.inputs['X'].mean(axis=self.attrs['dim']) + self.outputs = {'Out': out} + + +class TestMeanGradOp(GradientChecker): + def test_normal(self): + op = Operator("reduce_mean", X="X", Out="Out", dim=-2) + # use small size to decrease the error of numerical calculation + inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.check_grad(op, inputs, set(["X"]), "Out") + + def test_1d_tensor(self): + op = Operator("reduce_mean", X="X", Out="Out", dim=0) + # use small size to decrease the error of numerical calculation + inputs = {'X': np.random.random(10).astype("float32")} + self.check_grad(op, inputs, set(["X"]), "Out") + + +class TestMaxOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "reduce_max" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.attrs = {'dim': -1} + out = self.inputs['X'].max(axis=self.attrs['dim']) + self.outputs = {'Out': out} + + +class TestMinOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "reduce_max" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.attrs = {'dim': -2} + out = self.inputs['X'].min(axis=self.attrs['dim']) + self.outputs = {'Out': out} + + +if __name__ == '__main__': + unittest.main() From c8d877195b9763ec2da9eb480bb6858cee834359 Mon Sep 17 00:00:00 2001 From: guosheng Date: Thu, 14 Sep 2017 01:11:31 +0800 Subject: [PATCH 026/342] Revise the reduce_op unit test accordingly --- paddle/operators/reduce_op.cc | 56 +++++---- paddle/operators/reduce_op.cu | 4 +- paddle/operators/reduce_op.h | 2 +- .../v2/framework/tests/test_reduce_op.py | 113 +++++++++--------- 4 files changed, 89 insertions(+), 86 deletions(-) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index ea4bfc50b2..20e6319730 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -30,12 +30,14 @@ class ReduceOp : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto x_rank = x_dims.size(); PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); - int dim = static_cast(ctx.Attr("dim")); + int dim = ctx.Attr("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, - "The dim should be in the range [-rank(input), rank(input)]"); - bool keep_dim = true; // TODO; + "The dim should be in the range [-rank(input), rank(input))"); + PADDLE_ENFORCE_GE(ctx.Attr("keep_dim"), 0, "keep_dim must be 0 or 1"); + PADDLE_ENFORCE_LE(ctx.Attr("keep_dim"), 1, "keep_dim must be 0 or 1"); + bool keep_dim = ctx.Attr("keep_dim") == 1; auto dims_vector = vectorize(x_dims); if (keep_dim || x_rank == 1) { dims_vector[dim] = 1; @@ -59,11 +61,11 @@ class ReduceGradOp : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto x_rank = x_dims.size(); PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); - int dim = static_cast(ctx.Attr("dim")); + int dim = ctx.Attr("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, - "The dim should be in the range [-rank(input), rank(input)]"); + "The dim should be in the range [-rank(input), rank(input))"); auto *x_grad = ctx.Output(framework::GradVarName("X")); if (x_grad) x_grad->Resize(x_dims); } @@ -84,12 +86,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; @@ -108,12 +111,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; @@ -132,12 +136,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; @@ -156,12 +161,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index 9effc17ed3..2dffea3a3a 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -21,8 +21,8 @@ REGISTER_OP_GPU_KERNEL( reduce_sum, ops::ReduceKernel); REGISTER_OP_GPU_KERNEL(reduce_sum_grad, - ops::ReduceGradEigenKernel); + ops::ReduceGradKernel); REGISTER_OP_GPU_KERNEL( reduce_mean, diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index 9fd7d335ac..0d62fa7d15 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -127,7 +127,7 @@ class ReduceKernel : public framework::OpKernel { if (dim < 0) dim = x_rank + dim; auto reduce_dim = Eigen::array({{dim}}); // construct the squeezed output tensor - bool keep_dim = true; // static_cast(context.Attr("keep_dim")); + bool keep_dim = context.Attr("keep_dim") == 1; DDim dims = output->dims(); auto dims_vector = vectorize(dims); if (keep_dim && x_rank > 1) { diff --git a/python/paddle/v2/framework/tests/test_reduce_op.py b/python/paddle/v2/framework/tests/test_reduce_op.py index 49ef8eabd2..58951f2902 100644 --- a/python/paddle/v2/framework/tests/test_reduce_op.py +++ b/python/paddle/v2/framework/tests/test_reduce_op.py @@ -1,91 +1,88 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta -from paddle.v2.framework.op import Operator +from op_test import OpTest -class TestSumOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSumOp(OpTest): def setUp(self): - self.type = "reduce_sum" + self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -2} - out = self.inputs['X'].sum(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + def test_check_output(self): + self.check_output() -class TestSumGradOp(GradientChecker): - def test_normal(self): - op = Operator("reduce_sum", X="X", Out="Out", dim=-2) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') - def test_1d_tensor(self): - op = Operator("reduce_sum", X="X", Out="Out", dim=0) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random(10).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") +class TestMeanOp(OpTest): + def setUp(self): + self.op_type = "reduce_mean" + self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")} + self.attrs = {'dim': 1} + self.outputs = {'Out': self.inputs['X'].mean(axis=self.attrs['dim'])} -class TestKeepdimSumOp(unittest.TestCase): - __metaclass__ = OpTestMeta + def test_check_output(self): + self.check_output() - def setUp(self): - self.type = "reduce_sum" - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -2} - out = self.inputs['X'].sum(axis=self.attrs['dim'], keepdims=True) - self.outputs = {'Out': out} + def test_check_grad(self): + self.check_grad(['X'], 'Out') -class TestMeanOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestMaxOp(OpTest): + """Remove Max with subgradient from gradient check to confirm the success of CI.""" def setUp(self): - self.type = "reduce_mean" + self.op_type = "reduce_max" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} self.attrs = {'dim': -1} - out = self.inputs['X'].mean(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.outputs = {'Out': self.inputs['X'].max(axis=self.attrs['dim'])} + + def test_check_output(self): + self.check_output() -class TestMeanGradOp(GradientChecker): - def test_normal(self): - op = Operator("reduce_mean", X="X", Out="Out", dim=-2) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") +class TestMinOp(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" - def test_1d_tensor(self): - op = Operator("reduce_mean", X="X", Out="Out", dim=0) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random(10).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") + def setUp(self): + self.op_type = "reduce_min" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.attrs = {'dim': 2} + self.outputs = {'Out': self.inputs['X'].min(axis=self.attrs['dim'])} + def test_check_output(self): + self.check_output() -class TestMaxOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestKeepDimReduce(OpTest): def setUp(self): - self.type = "reduce_max" + self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -1} - out = self.inputs['X'].max(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.attrs = {'dim': -2, 'keep_dim': 1} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=self.attrs['dim'], keepdims=True) + } + + def test_check_output(self): + self.check_output() + def test_check_grad(self): + self.check_grad(['X'], 'Out') -class TestMinOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class Test1DReduce(OpTest): def setUp(self): - self.type = "reduce_max" - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -2} - out = self.inputs['X'].min(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.op_type = "reduce_sum" + self.inputs = {'X': np.random.random(20).astype("float32")} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') if __name__ == '__main__': From 630273d45361c7832d1dabbd9e44c4ae6cdb3864 Mon Sep 17 00:00:00 2001 From: guosheng Date: Thu, 14 Sep 2017 15:21:29 +0800 Subject: [PATCH 027/342] Fix reduce_op according to CI log --- paddle/operators/reduce_op.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index 0d62fa7d15..f0d4e1f95c 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -14,8 +14,6 @@ #pragma once -#include "paddle/operators/math/math_function.h" - #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" From 8b3bf28c6b5da73d919b0414361473bee638f414 Mon Sep 17 00:00:00 2001 From: guosheng Date: Thu, 21 Sep 2017 11:12:32 +0800 Subject: [PATCH 028/342] Refine reduce_op and follow comments --- paddle/operators/CMakeLists.txt | 7 ++ paddle/operators/reduce_op.cc | 147 ++++++++++++++------------------ paddle/operators/reduce_op.h | 63 +++++++------- 3 files changed, 103 insertions(+), 114 deletions(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index f8b0bce681..eec0d0b595 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -61,6 +61,13 @@ function(op_library TARGET) # It's enough to just adding one operator to pybind file(APPEND ${pybind_file} "USE_OP(sigmoid);\n") endif() + + # reduce_op contains several operators + if ("${TARGET}" STREQUAL "reduce_op") + set(pybind_flag 1) + # It's enough to just adding one operator to pybind + file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n") + endif() # pybind USE_NO_KERNEL_OP file(READ ${TARGET}.cc TARGET_CONTENT) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 20e6319730..89f54fe74b 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -18,7 +18,7 @@ namespace paddle { namespace operators { using framework::Tensor; -using framework::DDim; +using framework::LoDTensor; class ReduceOp : public framework::OperatorWithKernel { public: @@ -26,18 +26,19 @@ class ReduceOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of ReduceOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of ReduceOp should not be null."); auto x_dims = ctx.Input("X")->dims(); auto x_rank = x_dims.size(); - PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); + PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); int dim = ctx.Attr("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, - "The dim should be in the range [-rank(input), rank(input))"); - PADDLE_ENFORCE_GE(ctx.Attr("keep_dim"), 0, "keep_dim must be 0 or 1"); - PADDLE_ENFORCE_LE(ctx.Attr("keep_dim"), 1, "keep_dim must be 0 or 1"); - bool keep_dim = ctx.Attr("keep_dim") == 1; + "The dim should be in the range [-rank(input), rank(input))."); + bool keep_dim = ctx.Attr("keep_dim"); auto dims_vector = vectorize(x_dims); if (keep_dim || x_rank == 1) { dims_vector[dim] = 1; @@ -45,7 +46,7 @@ class ReduceOp : public framework::OperatorWithKernel { dims_vector.erase(dims_vector.begin() + dim); } auto out_dims = framework::make_ddim(dims_vector); - ctx.Output("Out")->Resize(out_dims); + ctx.Output("Out")->Resize(out_dims); } }; @@ -55,119 +56,101 @@ class ReduceGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + "Input(Out@GRAD) should not be null."); auto x_dims = ctx.Input("X")->dims(); auto x_rank = x_dims.size(); - PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); + PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); int dim = ctx.Attr("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, - "The dim should be in the range [-rank(input), rank(input))"); - auto *x_grad = ctx.Output(framework::GradVarName("X")); + "The dim should be in the range [-rank(input), rank(input))."); + auto *x_grad = + ctx.Output(framework::GradVarName("X")); if (x_grad) x_grad->Resize(x_dims); } }; -class ReduceSumOpMaker : public framework::OpProtoAndCheckerMaker { +class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReduceSumOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + ReduceOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( "X", "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); AddOutput("Out", "(Tensor) The result tensor."); - AddComment(R"DOC( -ReduceMean operator computes the sum of input tensor along the given dimension. -The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. -)DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " "Must be in the range [-rank(input), rank(input))") .SetDefault(0); - AddAttr( - "keep_dim", - "(int, default 0) " - "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") - .SetDefault(0); + AddAttr("keep_dim", + "(bool, default false) " + "If true, retain the reduced dimension with length 1.") + .SetDefault(false); + comment_ = R"DOC( +{ReduceOP} operator computes the {reduce} of input tensor along the given dimension. +The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. +)DOC"; + AddComment(comment_); + } + + protected: + std::string comment_; + + void Replace(std::string &src, std::string from, std::string to) { + std::size_t len_from = std::strlen(from.c_str()); + std::size_t len_to = std::strlen(to.c_str()); + for (std::size_t pos = src.find(from); pos != std::string::npos; + pos = src.find(from, pos + len_to)) { + src.replace(pos, len_from, to); + } + } + + void SetComment(std::string name, std::string op) { + Replace(comment_, "{ReduceOP}", name); + Replace(comment_, "{reduce}", op); } }; -class ReduceMeanOpMaker : public framework::OpProtoAndCheckerMaker { +class ReduceSumOpMaker : public ReduceOpMaker { + public: + ReduceSumOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : ReduceOpMaker(proto, op_checker) { + SetComment("ReduceSum", "sum"); + AddComment(comment_); + } +}; + +class ReduceMeanOpMaker : public ReduceOpMaker { public: ReduceMeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); - AddOutput("Out", "(Tensor) The result tensor."); - AddComment(R"DOC( -ReduceMean operator computes the mean of input tensor along the given dimension. -The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. -)DOC"); - AddAttr("dim", - "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input))") - .SetDefault(0); - AddAttr( - "keep_dim", - "(int, default 0) " - "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") - .SetDefault(0); + : ReduceOpMaker(proto, op_checker) { + SetComment("ReduceMean", "mean"); + AddComment(comment_); } }; -class ReduceMaxOpMaker : public framework::OpProtoAndCheckerMaker { +class ReduceMaxOpMaker : public ReduceOpMaker { public: ReduceMaxOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); - AddOutput("Out", "(Tensor) The result tensor."); - AddComment(R"DOC( -ReduceMax operator computes the maximum of input tensor along the given dimension. -The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. -)DOC"); - AddAttr("dim", - "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input))") - .SetDefault(0); - AddAttr( - "keep_dim", - "(int, default 0) " - "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") - .SetDefault(0); + : ReduceOpMaker(proto, op_checker) { + SetComment("ReduceMax", "max"); + AddComment(comment_); } }; -class ReduceMinOpMaker : public framework::OpProtoAndCheckerMaker { +class ReduceMinOpMaker : public ReduceOpMaker { public: ReduceMinOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); - AddOutput("Out", "(Tensor) The result tensor."); - AddComment(R"DOC( -ReduceMin operator computes the minimum of input tensor along the given dimension. -The result tensor has 1 fewer dimension than the input unless `keep_dim` is true. -)DOC"); - AddAttr("dim", - "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input))") - .SetDefault(0); - AddAttr( - "keep_dim", - "(int, default 0) " - "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") - .SetDefault(0); + : ReduceOpMaker(proto, op_checker) { + SetComment("ReduceMin", "min"); + AddComment(comment_); } }; diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index f0d4e1f95c..972bd7bd46 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -27,61 +27,60 @@ template ; struct SumFunctor { - template - void operator()(const Place& place, In& in, Out& out, const Dim& dim) { - out.device(place) = in.sum(dim); + template + void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + y.device(place) = x.sum(dim); } }; struct SumGradFunctor { - template - void operator()(const Place& place, In_Const& in, In& in_grad, Out& out, - Out& out_grad, const Dim& dim, int size) { - in_grad.device(place) = out_grad.broadcast(dim); + void operator()(const Place& place, X& x, Y& y, DX& dx, DY& dy, + const Dim& dim, int size) { + dx.device(place) = dy.broadcast(dim); } }; struct MeanFunctor { - template - void operator()(const Place& place, In& in, Out& out, const Dim& dim) { - out.device(place) = in.mean(dim); + template + void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + y.device(place) = x.mean(dim); } }; struct MeanGradFunctor { - template - void operator()(const Place& place, In_Const& in, In& in_grad, Out& out, - Out& out_grad, const Dim& dim, int size) { - in_grad.device(place) = out_grad.broadcast(dim) / in_grad.constant(size); + void operator()(const Place& place, X& x, Y& y, DX& dx, DY& dy, + const Dim& dim, int size) { + dx.device(place) = dy.broadcast(dim) / dx.constant(size); } }; struct MaxFunctor { - template - void operator()(const Place& place, In& in, Out& out, const Dim& dim) { - out.device(place) = in.maximum(dim); + template + void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + y.device(place) = x.maximum(dim); } }; struct MinFunctor { - template - void operator()(const Place& place, In& in, Out& out, const Dim& dim) { - out.device(place) = in.minimum(dim); + template + void operator()(const Place& place, X& x, Y& y, const Dim& dim) { + y.device(place) = x.minimum(dim); } }; struct MaxOrMinGradFunctor { - template - void operator()(const Place& place, In_Const& in, In& in_grad, Out& out, - Out& out_grad, const Dim& dim, int size) { - auto equals = in == out.broadcast(dim); - auto ones = in_grad.constant(1); - auto zeros = in_grad.constant(0); - in_grad.device(place) = - out_grad.broadcast(dim) * equals.select(ones, zeros); + void operator()(const Place& place, X& x, Y& y, DX& dx, DY& dy, + const Dim& dim, int size) { + auto equals = x == y.broadcast(dim); + auto ones = dx.constant(1); + auto zeros = dx.constant(0); + dx.device(place) = dy.broadcast(dim) * equals.select(ones, zeros); } }; @@ -125,7 +124,7 @@ class ReduceKernel : public framework::OpKernel { if (dim < 0) dim = x_rank + dim; auto reduce_dim = Eigen::array({{dim}}); // construct the squeezed output tensor - bool keep_dim = context.Attr("keep_dim") == 1; + bool keep_dim = context.Attr("keep_dim"); DDim dims = output->dims(); auto dims_vector = vectorize(dims); if (keep_dim && x_rank > 1) { @@ -191,7 +190,7 @@ class ReduceGradKernel : public framework::OpKernel { braodcast_dim[dim] = input0->dims()[dim]; auto& place = context.GetEigenDevice(); Functor functor; - functor(place, x, x_grad, x_reduce, x_reduce_grad, braodcast_dim, + functor(place, x, x_reduce, x_grad, x_reduce_grad, braodcast_dim, braodcast_dim[dim]); } } @@ -235,8 +234,8 @@ class ReduceGradEigenFreeKernel : public framework::OpKernel { out_offset = inner_count * i + j; for (int k = 0; k < mid_count; ++k) { x_offset = (inner_count * mid_count) * i + inner_count * k + j; - functor(x_data + x_offset, x_grad_data + x_offset, - out_data + out_offset, out_grad_data + out_offset, + functor(x_data + x_offset, out_data + out_offset, + x_grad_data + x_offset, out_grad_data + out_offset, mid_count); } } From 1295e5ef5467a0a068179da243c20bc05e61f921 Mon Sep 17 00:00:00 2001 From: guosheng Date: Sun, 24 Sep 2017 16:07:14 +0800 Subject: [PATCH 029/342] Refine reduce_op unit test and add newline at end of file --- paddle/operators/reduce_op.cu | 2 +- python/paddle/v2/framework/tests/test_reduce_op.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index 2dffea3a3a..595127b858 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -43,4 +43,4 @@ REGISTER_OP_GPU_KERNEL( ops::ReduceKernel); REGISTER_OP_GPU_KERNEL(reduce_min_grad, ops::ReduceGradKernel); \ No newline at end of file + ops::MaxOrMinGradFunctor>); diff --git a/python/paddle/v2/framework/tests/test_reduce_op.py b/python/paddle/v2/framework/tests/test_reduce_op.py index 58951f2902..70359d60cb 100644 --- a/python/paddle/v2/framework/tests/test_reduce_op.py +++ b/python/paddle/v2/framework/tests/test_reduce_op.py @@ -60,7 +60,7 @@ class TestKeepDimReduce(OpTest): def setUp(self): self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -2, 'keep_dim': 1} + self.attrs = {'dim': -2, 'keep_dim': True} self.outputs = { 'Out': self.inputs['X'].sum(axis=self.attrs['dim'], keepdims=True) } From 12f2b8eb07b034a24e3a0e0538a757e389fb4c45 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 25 Sep 2017 04:12:04 +0000 Subject: [PATCH 030/342] Correct the forward of sequence_softmax_op. --- paddle/operators/reshape_op.cc | 3 +-- paddle/operators/sequence_softmax_op.cc | 10 ++++++---- paddle/operators/sequence_softmax_op.h | 6 +++++- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 0d05e34414..d5d04dac27 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -42,8 +42,7 @@ class ReshapeOp : public framework::OperatorWithKernel { int64_t capacity = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); auto *in = ctx.Input("X"); - int64_t in_size = framework::product(in->dims()); - PADDLE_ENFORCE_EQ(capacity, in_size, + PADDLE_ENFORCE_EQ(capacity, in->numel(), "The size of Input(X) mismatches with Attr(shape)."); // resize output std::vector shape_int64(shape.size(), 0); diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc index 0a99717440..58ef77b1a3 100644 --- a/paddle/operators/sequence_softmax_op.cc +++ b/paddle/operators/sequence_softmax_op.cc @@ -30,18 +30,20 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { "Output(Out) of SequenceSoftmaxOp should not be null."); auto *x = ctx.Input("X"); - auto dims = x->dims(); auto lod = x->lod(); - PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); + auto dims = x->dims(); PADDLE_ENFORCE_GE( dims[0], /* batch_size */ static_cast(lod[0].size() - 1), "The first dimension of Input(X) should be larger than batch size."); - PADDLE_ENFORCE_EQ(x->numel(), static_cast(lod[0].size() - 1), + + const size_t level = lod.size() - 1; + PADDLE_ENFORCE_EQ(x->numel(), static_cast(lod[level].back()), "The width of each timestep in Input(X) of " "SequenceSoftmaxOp should be 1."); - dims[0] = lod[0].size() - 1; + std::cout << DebugString() << std::endl; + ctx.Output("Out")->Resize({dims}); } }; diff --git a/paddle/operators/sequence_softmax_op.h b/paddle/operators/sequence_softmax_op.h index 54d8265271..f39c2ec6c3 100644 --- a/paddle/operators/sequence_softmax_op.h +++ b/paddle/operators/sequence_softmax_op.h @@ -38,7 +38,7 @@ class SequenceSoftmaxKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = x->lod(); - const size_t level = lod.size(); + const size_t level = lod.size() - 1; out->mutable_data(ctx.GetPlace()); for (int i = 0; i < static_cast(lod[level].size()) - 1; ++i) { @@ -47,6 +47,10 @@ class SequenceSoftmaxKernel : public framework::OpKernel { Tensor x_i = x->Slice(start_pos, end_pos); Tensor out_i = out->Slice(start_pos, end_pos); + // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) + framework::DDim dims = framework::make_ddim({1UL, end_pos - start_pos}); + x_i.Resize(dims); + out_i.Resize(dims); math::SoftmaxFunctor()(&x_i, &out_i, ctx); } } From 9b1431b8d932741ce9f976a860aabaefd176397e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 22 Sep 2017 23:27:18 +0800 Subject: [PATCH 031/342] fix maxpool backward functor and add unit test --- paddle/operators/math/CMakeLists.txt | 2 + paddle/operators/math/pool_test_maxPool2d.cc | 150 ++++++++++++++ paddle/operators/math/pool_test_maxPool3d.cc | 154 +++++++++++++++ paddle/operators/math/pooling.cc | 142 ++++++++++++++ paddle/operators/math/pooling.cu | 196 +++++++++++++++++++ paddle/operators/math/pooling.h | 20 ++ 6 files changed, 664 insertions(+) create mode 100644 paddle/operators/math/pool_test_maxPool2d.cc create mode 100644 paddle/operators/math/pool_test_maxPool3d.cc diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 185708cdaa..1233a9ea32 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -7,3 +7,5 @@ endif() nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) +cc_test(pool_test_maxPool2d_test SRCS pool_test_maxPool2d.cc DEPS math_function tensor) +cc_test(pool_test_maxPool3d_test SRCS pool_test_maxPool3d.cc DEPS math_function tensor) diff --git a/paddle/operators/math/pool_test_maxPool2d.cc b/paddle/operators/math/pool_test_maxPool2d.cc new file mode 100644 index 0000000000..8ddf1d79c7 --- /dev/null +++ b/paddle/operators/math/pool_test_maxPool2d.cc @@ -0,0 +1,150 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include +#include "paddle/operators/math/pooling.h" + +#include "paddle/memory/memcpy.h" +#include "paddle/platform/enforce.h" + +#include +#include + +#ifndef PADDLE_ONLY_CPU + +template +void testPool2d(paddle::platform::DeviceContext& context, PooType pool_process, + paddle::framework::Tensor& input, + paddle::framework::Tensor& input_grad, + paddle::framework::Tensor& output, + paddle::framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + paddle::operators::math::Pool2dForwardFunctor + pool2d_forward; + pool2d_forward(context, input, output, ksize, strides, paddings, + pool_process); + + int times = 50; + clock_t start, finish; + double totaltime; + + // Pool2dBackwardFunctor + start = clock(); + for (int i = 0; i < times; ++i) { + paddle::operators::math::Pool2dBackwardFunctor + pool2d_backward; + pool2d_backward(context, input, input_grad, output, output_grad, ksize, + strides, paddings, pool_process); + PADDLE_ENFORCE(cudaStreamSynchronize(0), + "cudaStreamSynchronize failed in pool2d_backward CopyFrom"); + } + finish = clock(); + totaltime = (double)(finish - start) / CLOCKS_PER_SEC; + totaltime /= times; + std::cout << "\nPool3dBackwardFunctor: " << totaltime << "s" << std::endl; + + // MaxPool3dBackwardFunctor + start = clock(); + for (int j = 0; j < times; ++j) { + paddle::operators::math::MaxPool2dBackwardFunctor< + paddle::platform::GPUPlace, float> + maxpool2d_backward; + maxpool2d_backward(context, input, input_grad, output, output_grad, ksize, + strides, paddings); + PADDLE_ENFORCE( + cudaStreamSynchronize(0), + "cudaStreamSynchronize failed in maxpool2d_backward CopyFrom"); + } + finish = clock(); + totaltime = (double)(finish - start) / CLOCKS_PER_SEC; + totaltime /= times; + std::cout << "\nMaxPool3dBackwardFunctor: " << totaltime << "s" << std::endl; +} + +void test2dPool() { + using paddle::platform::DeviceContext; + using paddle::platform::CUDADeviceContext; + using paddle::platform::GPUPlace; + + paddle::framework::Tensor input_tmp; + paddle::framework::Tensor output_tmp; + paddle::framework::Tensor input; + paddle::framework::Tensor input_grad; + paddle::framework::Tensor output; + paddle::framework::Tensor output_grad; + + int batch = 32; + int channel = 32; + int input_height = 128; + int input_width = 128; + int in_len = batch * channel * input_height * input_width; + std::vector ksize({3, 3}); + std::vector strides({1, 1}); + std::vector paddings({0, 0}); + + int output_height = + (input_height - ksize[0] + 2 * paddings[0]) / strides[0] + 1; + int output_width = + (input_width - ksize[1] + 2 * paddings[1]) / strides[1] + 1; + int output_len = output_height * output_width; + + input_tmp.mutable_data({batch, channel, input_height, input_width}, + paddle::platform::CPUPlace()); + output_tmp.mutable_data({batch, channel, output_height, output_width}, + paddle::platform::CPUPlace()); + + float* arr = new float[in_len]; + auto* place = new paddle::platform::GPUPlace(); + + float* input_ptr = input_tmp.data(); + for (int i = 0; i < in_len; ++i) arr[i] = i; // rand() / double(RAND_MAX/2); + memcpy(input_ptr, arr, in_len * sizeof(float)); + input.CopyFrom(input_tmp, *place); + + input_ptr = input_tmp.data(); + for (int i = 0; i < in_len; ++i) arr[i] = 0; + memcpy(input_ptr, arr, in_len * sizeof(float)); + input_grad.CopyFrom(input_tmp, *place); + + // output + input_ptr = output_tmp.data(); + for (int i = 0; i < output_len; ++i) + arr[i] = 0; // rand() / double(RAND_MAX/2); + memcpy(input_ptr, arr, output_len * sizeof(float)); + output.CopyFrom(input_tmp, *place); + + // output + input_ptr = output_tmp.data(); + for (int i = 0; i < output_len; ++i) + arr[i] = 1; // rand() / double(RAND_MAX/2); + memcpy(input_ptr, arr, output_len * sizeof(float)); + output_grad.CopyFrom(input_tmp, *place); + + paddle::platform::DeviceContext* context = + new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); + paddle::operators::math::pool::maxPool pool_process; + + testPool2d>( + *context, pool_process, input, input_grad, output, output_grad, ksize, + strides, paddings); +} + +int main() { + // testPool3d(); + test2dPool(); + // testPool3d(); +} +#endif \ No newline at end of file diff --git a/paddle/operators/math/pool_test_maxPool3d.cc b/paddle/operators/math/pool_test_maxPool3d.cc new file mode 100644 index 0000000000..000b006a12 --- /dev/null +++ b/paddle/operators/math/pool_test_maxPool3d.cc @@ -0,0 +1,154 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include +#include "paddle/operators/math/pooling.h" + +#include "paddle/memory/memcpy.h" +#include "paddle/platform/enforce.h" + +#include +#include + +#ifndef PADDLE_ONLY_CPU + +template +void testPool3d(paddle::platform::DeviceContext& context, PooType pool_process, + paddle::framework::Tensor& input, + paddle::framework::Tensor& input_grad, + paddle::framework::Tensor& output, + paddle::framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + paddle::operators::math::Pool3dForwardFunctor + pool3d_forward; + pool3d_forward(context, input, output, ksize, strides, paddings, + pool_process); + + int times = 50; + clock_t start, finish; + double totaltime; + + // Pool3dBackwardFunctor + start = clock(); + for (int i = 0; i < times; ++i) { + paddle::operators::math::Pool3dBackwardFunctor + pool3d_backward; + pool3d_backward(context, input, input_grad, output, output_grad, ksize, + strides, paddings, pool_process); + PADDLE_ENFORCE(cudaStreamSynchronize(0), + "cudaStreamSynchronize failed in pool3d_backward CopyFrom"); + } + finish = clock(); + totaltime = (double)(finish - start) / CLOCKS_PER_SEC; + totaltime /= times; + std::cout << "\nPool3dBackwardFunctor: " << totaltime << "s" << std::endl; + + // MaxPool3dBackwardFunctor + start = clock(); + for (int j = 0; j < times; ++j) { + paddle::operators::math::MaxPool3dBackwardFunctor< + paddle::platform::GPUPlace, float> + maxpool3d_backward; + maxpool3d_backward(context, input, input_grad, output, output_grad, ksize, + strides, paddings); + PADDLE_ENFORCE( + cudaStreamSynchronize(0), + "cudaStreamSynchronize failed in maxpool3d_backward CopyFrom"); + } + finish = clock(); + totaltime = (double)(finish - start) / CLOCKS_PER_SEC; + totaltime /= times; + std::cout << "\nMaxPool3dBackwardFunctor: " << totaltime << "s" << std::endl; +} + +void test3dPool() { + using paddle::platform::DeviceContext; + using paddle::platform::CUDADeviceContext; + using paddle::platform::GPUPlace; + + paddle::framework::Tensor input_tmp; + paddle::framework::Tensor output_tmp; + paddle::framework::Tensor input; + paddle::framework::Tensor input_grad; + paddle::framework::Tensor output; + paddle::framework::Tensor output_grad; + + int batch = 32; + int channel = 4; + int input_depth = 4; + int input_height = 128; + int input_width = 128; + int in_len = batch * channel * input_depth * input_height * input_width; + std::vector ksize({3, 3, 3}); + std::vector strides({2, 2, 2}); + std::vector paddings({1, 1, 1}); + + int output_depth = + (input_depth - ksize[0] + 2 * paddings[0]) / strides[0] + 1; + int output_height = + (input_height - ksize[1] + 2 * paddings[1]) / strides[1] + 1; + int output_width = + (input_width - ksize[2] + 2 * paddings[2]) / strides[2] + 1; + + int output_len = output_depth * output_height * output_width; + + input_tmp.mutable_data( + {batch, channel, input_depth, input_height, input_width}, + paddle::platform::CPUPlace()); + output_tmp.mutable_data( + {batch, channel, output_depth, output_height, output_width}, + paddle::platform::CPUPlace()); + + float* arr = new float[in_len]; + auto* place = new paddle::platform::GPUPlace(); + + // input + float* input_ptr = input_tmp.data(); + for (int i = 0; i < in_len; ++i) arr[i] = i; // rand() / double(RAND_MAX/2); + memcpy(input_ptr, arr, in_len * sizeof(float)); + input.CopyFrom(input_tmp, *place); + + // input_grad + input_ptr = input_tmp.data(); + for (int i = 0; i < in_len; ++i) arr[i] = 0; + memcpy(input_ptr, arr, in_len * sizeof(float)); + input_grad.CopyFrom(input_tmp, *place); + + // output + input_ptr = output_tmp.data(); + for (int i = 0; i < output_len; ++i) + arr[i] = 0; // rand() / double(RAND_MAX/2); + memcpy(input_ptr, arr, output_len * sizeof(float)); + output.CopyFrom(input_tmp, *place); + + // output_grad + input_ptr = output_tmp.data(); + for (int i = 0; i < output_len; ++i) + arr[i] = 1; // rand() / double(RAND_MAX/2); + memcpy(input_ptr, arr, output_len * sizeof(float)); + output_grad.CopyFrom(input_tmp, *place); + + paddle::platform::DeviceContext* context = + new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); + paddle::operators::math::pool::maxPool pool_process; + + testPool3d>( + *context, pool_process, input, input_grad, output, output_grad, ksize, + strides, paddings); +} + +int main() { test3dPool(); } +#endif \ No newline at end of file diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 5ce748ff08..7c616023ca 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -134,6 +134,70 @@ class Pool2dBackwardFunctor { } }; +template +class MaxPool2dBackwardFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + + bool stop = false; + for (int h = hstart; h < hend && !stop; ++h) { + for (int w = wstart; w < wend && !stop; ++w) { + int input_idx = h * input_width + w; + int output_idx = ph * output_width + pw; + if (input_data[input_idx] == output_data[output_idx]) { + input_grad_data[input_idx] += output_grad_data[output_idx]; + stop = true; + } + } + } + } + } + input_data += input_stride; + output_data += output_stride; + input_grad_data += input_stride; + output_grad_data += output_stride; + } + } + } +}; + +template class MaxPool2dBackwardFunctor; +template class MaxPool2dBackwardFunctor; + template class Pool2dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool2dForwardFunctor< @@ -299,6 +363,84 @@ class Pool3dBackwardFunctor { } }; +template +class MaxPool3dBackwardFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + const int input_stride = input_depth * input_height * input_width; + const int output_stride = output_depth * output_height * output_width; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int pd = 0; pd < output_depth; ++pd) { + int dstart = pd * stride_depth - padding_depth; + int dend = std::min(dstart + ksize_depth, input_depth); + dstart = std::max(dstart, 0); + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + bool stop = false; + for (int d = dstart; d < dend && !stop; ++d) { + for (int h = hstart; h < hend && !stop; ++h) { + for (int w = wstart; w < wend && !stop; ++w) { + int input_idx = (d * input_height + h) * input_width + w; + int output_idx = + (pd * output_height + ph) * output_width + pw; + + if (input_data[input_idx] == output_data[output_idx]) { + input_grad_data[input_idx] += + output_grad_data[output_idx]; + stop = true; + } + } + } + } + } + } + } + input_data += input_stride; + output_data += output_stride; + input_grad_data += input_stride; + output_grad_data += output_stride; + } + } + } +}; + +template class MaxPool3dBackwardFunctor; +template class MaxPool3dBackwardFunctor; + template class Pool3dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool3dForwardFunctor< diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 09e6bd9000..347270abc3 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -102,6 +102,51 @@ __global__ void KernelPool2dBackward( } } +template +__global__ void KernelMaxPool2dBackward( + const int nthreads, const T* input_data, const T* output_data, + const T* output_grad, T* input_grad, const int channels, + const int input_height, const int input_width, const int output_height, + const int output_width, const int ksize_height, const int ksize_width, + const int stride_height, const int stride_width, const int padding_height, + const int padding_width) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int pw = index % output_width; + int ph = (index / output_width) % output_height; + int c = (index / output_width / output_height) % channels; + int batch_idx = index / output_width / output_height / channels; + + int hstart = ph * stride_height - padding_height; + int hend = min(hstart + ksize_height, input_height); + hstart = max(hstart, 0); + + int wstart = pw * stride_width - padding_width; + int wend = min(wstart + ksize_width, input_width); + wstart = max(wstart, 0); + + input_data += (batch_idx * channels + c) * input_height * input_width; + input_grad += (batch_idx * channels + c) * input_height * input_width; + + T ele = output_data[index]; + int maxIndex = -1; + bool stop = false; + for (int h = hstart; h < hend && !stop; ++h) { + for (int w = wstart; w < wend && !stop; ++w) { + if (ele == input_data[h * input_width + w]) { + maxIndex = h * input_width + w; + stop = true; + } + } + } + + if (maxIndex != -1) { + // atomic add + atomicAdd(input_grad + maxIndex, output_grad[index]); + } + } +} + template class Pool2dForwardFunctor { public: @@ -187,6 +232,52 @@ class Pool2dBackwardFunctor { } }; +template +class MaxPool2dBackwardFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + int nthreads = batch_size * output_channels * output_height * output_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelMaxPool2dBackward< + T><<(context) + .stream()>>>( + nthreads, input_data, output_data, output_grad_data, input_grad_data, + input_channels, input_height, input_width, output_height, output_width, + ksize_height, ksize_width, stride_height, stride_width, padding_height, + padding_width); + } +}; + +template class MaxPool2dBackwardFunctor; +// template class MaxPool2dBackwardFunctor; + template class Pool2dForwardFunctor< platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool2dForwardFunctor< @@ -311,6 +402,58 @@ __global__ void KernelPool3DBackward( } } +template +__global__ void KernelMaxPool3DBackward( + const int nthreads, const T* input_data, const T* output_data, + const T* output_grad, T* input_grad, const int channels, + const int input_depth, const int input_height, const int input_width, + const int output_depth, const int output_height, const int output_width, + const int ksize_depth, const int ksize_height, const int ksize_width, + const int stride_depth, const int stride_height, const int stride_width, + const int padding_depth, const int padding_height, + const int padding_width) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + index += blockDim.x * gridDim.x) { + int pw = index % output_width; + int ph = (index / output_width) % output_height; + int pd = (index / output_width / output_height) % output_depth; + int c = (index / output_width / output_height / output_depth) % channels; + int batch_idx = + index / output_width / output_height / output_depth / channels; + int dstart = pd * stride_depth - padding_depth; + int hstart = ph * stride_height - padding_height; + int wstart = pw * stride_width - padding_width; + int dend = min(dstart + ksize_depth, input_depth); + int hend = min(hstart + ksize_height, input_height); + int wend = min(wstart + ksize_width, input_width); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + T ele = output_data[index]; + bool stop = false; + int maxIdx = -1; + input_data += + (batch_idx * channels + c) * input_depth * input_height * input_width; + input_grad += + (batch_idx * channels + c) * input_depth * input_height * input_width; + + for (int d = dstart; d < dend && !stop; ++d) { + for (int h = hstart; h < hend && !stop; ++h) { + for (int w = wstart; w < wend && !stop; ++w) { + if (ele == input_data[(d * input_height + h) * input_width + w]) { + stop = true; + maxIdx = (d * input_height + h) * input_width + w; + } + } + } + } + if (maxIdx != -1) { + // atomic add + atomicAdd(input_grad + maxIdx, output_grad[index]); + } + } +} + template class Pool3dForwardFunctor { public: @@ -411,6 +554,59 @@ class Pool3dBackwardFunctor { } }; +template +class MaxPool3dBackwardFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + + const T* input_data = input.data(); + const T* output_data = output.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + int nthreads = batch_size * output_channels * output_depth * output_height * + output_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelMaxPool3DBackward< + T><<(context) + .stream()>>>( + nthreads, input_data, output_data, output_grad_data, input_grad_data, + input_channels, input_depth, input_height, input_width, output_depth, + output_height, output_width, ksize_depth, ksize_height, ksize_width, + stride_depth, stride_height, stride_width, padding_depth, + padding_height, padding_width); + } +}; + +template class MaxPool3dBackwardFunctor; +// template class MaxPool3dBackwardFunctor; + template class Pool3dForwardFunctor< platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool3dForwardFunctor< diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index 627ece2ca4..4b8b02d374 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -72,6 +72,16 @@ class Pool2dBackwardFunctor { PoolProcess pool_process); }; +template +class MaxPool2dBackwardFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + template class Pool3dForwardFunctor { public: @@ -92,6 +102,16 @@ class Pool3dBackwardFunctor { PoolProcess pool_process); }; +template +class MaxPool3dBackwardFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& input_grad, + const framework::Tensor& output, + const framework::Tensor& output_grad, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + } // namespace math } // namespace operators } // namespace paddle From b72854389e3d7e24f24a82f2608ee470cd9b43ff Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 25 Sep 2017 20:14:12 +0800 Subject: [PATCH 032/342] Fix (According to the review) --- paddle/operators/math/pooling.cc | 32 ++++++++++++------- paddle/operators/math/pooling.cu | 32 ++++++++++++------- paddle/operators/math/pooling.h | 17 +++++++--- paddle/operators/pool_op.cc | 8 ++--- paddle/operators/pool_op.h | 32 +++++++++---------- .../v2/framework/tests/test_pool2d_op.py | 14 ++++---- .../v2/framework/tests/test_pool3d_op.py | 14 ++++---- 7 files changed, 87 insertions(+), 62 deletions(-) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 5ce748ff08..72b260e729 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -137,19 +137,23 @@ class Pool2dBackwardFunctor { template class Pool2dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool2dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, float>; + platform::CPUPlace, paddle::operators::math::pool::avgPool, float>; template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; + platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, + float>; template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, float>; + platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, + float>; template class Pool2dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; template class Pool2dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, double>; + platform::CPUPlace, paddle::operators::math::pool::avgPool, double>; template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; + platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, + double>; template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, double>; + platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, + double>; template class Pool3dForwardFunctor { @@ -302,19 +306,23 @@ class Pool3dBackwardFunctor { template class Pool3dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool3dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, float>; + platform::CPUPlace, paddle::operators::math::pool::avgPool, float>; template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; + platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, + float>; template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, float>; + platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, + float>; template class Pool3dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; template class Pool3dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, double>; + platform::CPUPlace, paddle::operators::math::pool::avgPool, double>; template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; + platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, + double>; template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avePool, double>; + platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, + double>; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 09e6bd9000..619fd64b97 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -190,19 +190,23 @@ class Pool2dBackwardFunctor { template class Pool2dForwardFunctor< platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool2dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, float>; + platform::GPUPlace, paddle::operators::math::pool::avgPool, float>; template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; + platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, + float>; template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, float>; + platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, + float>; template class Pool2dForwardFunctor< platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; template class Pool2dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, double>; + platform::GPUPlace, paddle::operators::math::pool::avgPool, double>; template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; + platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, + double>; template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, double>; + platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, + double>; template __global__ void KernelPool3DForward( @@ -414,19 +418,23 @@ class Pool3dBackwardFunctor { template class Pool3dForwardFunctor< platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; template class Pool3dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, float>; + platform::GPUPlace, paddle::operators::math::pool::avgPool, float>; template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; + platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, + float>; template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, float>; + platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, + float>; template class Pool3dForwardFunctor< platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; template class Pool3dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, double>; + platform::GPUPlace, paddle::operators::math::pool::avgPool, double>; template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; + platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, + double>; template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avePool, double>; + platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, + double>; } // namespace math } // namespace operators diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index 627ece2ca4..b975ad31af 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -33,6 +33,18 @@ class maxPool { DEVICE inline T initial() { return static_cast(-FLT_MAX); } DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; } DEVICE inline void finalize(T& y, const T& poo_size) {} +}; + +template +class avgPool { + public: + DEVICE inline T initial() { return static_cast(0); } + DEVICE inline void process(T& y, const T& x) { y += x; } + DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } +}; +template +class maxPoolGrad { + public: DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, T scale) { dx += dy * (x == y); @@ -40,11 +52,8 @@ class maxPool { }; template -class avePool { +class avgPoolGrad { public: - DEVICE inline T initial() { return static_cast(0); } - DEVICE inline void process(T& y, const T& x) { y += x; } - DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, T scale) { dx += (scale * dy); diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index c57922373f..e51249beb9 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -41,8 +41,8 @@ class PoolOp : public framework::OperatorWithKernel { std::vector strides = Attr>("strides"); std::vector paddings = Attr>("paddings"); - PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "ave", - "pooling_type should be 'max' or 'ave'"); + PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "avg", + "pooling_type should be 'max' or 'avg'"); PADDLE_ENFORCE(in_X->dims().size() == 4 || in_X->dims().size() == 5, "Pooling intput should be 4-D or 5-D"); @@ -99,7 +99,7 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("poolingType", "poolingType of pooling operator." - "str constant equal to 'max' or 'ave'"); + "str constant equal to 'max' or 'avg'"); AddAttr>( "ksize", "pooling size(height, width) of pooling operator."); AddAttr( @@ -140,7 +140,7 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("poolingType", "poolingType of pooling operator." - "str constant equal to 'max' or 'ave'"); + "str constant equal to 'max' or 'avg'"); AddAttr>( "ksize", "pooling size(depth, height, width) of pooling operator."); AddAttr( diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 5a40f76172..6f1d05272c 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -52,11 +52,11 @@ class PoolKernel : public framework::OpKernel { pool2d_forward(context.device_context(), *in_X, *out, ksize, strides, paddings, pool_process); - } else if (pooling_type == "ave") { + } else if (pooling_type == "avg") { paddle::operators::math::Pool2dForwardFunctor< - Place, paddle::operators::math::pool::avePool, T> + Place, paddle::operators::math::pool::avgPool, T> pool2d_forward; - paddle::operators::math::pool::avePool pool_process; + paddle::operators::math::pool::avgPool pool_process; pool2d_forward(context.device_context(), *in_X, *out, ksize, strides, paddings, pool_process); } @@ -69,11 +69,11 @@ class PoolKernel : public framework::OpKernel { paddle::operators::math::pool::maxPool pool_process; pool3d_forward(context.device_context(), *in_X, *out, ksize, strides, paddings, pool_process); - } else if (pooling_type == "ave") { + } else if (pooling_type == "avg") { paddle::operators::math::Pool3dForwardFunctor< - Place, paddle::operators::math::pool::avePool, T> + Place, paddle::operators::math::pool::avgPool, T> pool3d_forward; - paddle::operators::math::pool::avePool pool_process; + paddle::operators::math::pool::avgPool pool_process; pool3d_forward(context.device_context(), *in_X, *out, ksize, strides, paddings, pool_process); } @@ -112,16 +112,16 @@ class PoolGradKernel : public framework::OpKernel { case 2: { if (pooling_type == "max") { paddle::operators::math::Pool2dBackwardFunctor< - Place, paddle::operators::math::pool::maxPool, T> + Place, paddle::operators::math::pool::maxPoolGrad, T> pool2d_backward; - paddle::operators::math::pool::maxPool pool_process; + paddle::operators::math::pool::maxPoolGrad pool_process; pool2d_backward(context.device_context(), *in_X, *in_X_grad, *out, *out_grad, ksize, strides, paddings, pool_process); - } else if (pooling_type == "ave") { + } else if (pooling_type == "avg") { paddle::operators::math::Pool2dBackwardFunctor< - Place, paddle::operators::math::pool::avePool, T> + Place, paddle::operators::math::pool::avgPoolGrad, T> pool2d_backward; - paddle::operators::math::pool::avePool pool_process; + paddle::operators::math::pool::avgPoolGrad pool_process; pool2d_backward(context.device_context(), *in_X, *in_X_grad, *out, *out_grad, ksize, strides, paddings, pool_process); } @@ -129,16 +129,16 @@ class PoolGradKernel : public framework::OpKernel { case 3: { if (pooling_type == "max") { paddle::operators::math::Pool3dBackwardFunctor< - Place, paddle::operators::math::pool::maxPool, T> + Place, paddle::operators::math::pool::maxPoolGrad, T> pool3d_backward; - paddle::operators::math::pool::maxPool pool_process; + paddle::operators::math::pool::maxPoolGrad pool_process; pool3d_backward(context.device_context(), *in_X, *in_X_grad, *out, *out_grad, ksize, strides, paddings, pool_process); - } else if (pooling_type == "ave") { + } else if (pooling_type == "avg") { paddle::operators::math::Pool3dBackwardFunctor< - Place, paddle::operators::math::pool::avePool, T> + Place, paddle::operators::math::pool::avgPoolGrad, T> pool3d_backward; - paddle::operators::math::pool::avePool pool_process; + paddle::operators::math::pool::avgPoolGrad pool_process; pool3d_backward(context.device_context(), *in_X, *in_X_grad, *out, *out_grad, ksize, strides, paddings, pool_process); } diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index 0efddc2881..e8e5218073 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -23,7 +23,7 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): return out -def ave_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): +def avg_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): N, C, H, W = x.shape if global_pool == 1: @@ -72,8 +72,8 @@ class TestPool2d_Op(OpTest): def initTestCase(self): self.global_pool = 0 - self.pool_type = "ave" - self.pool2D_forward_naive = ave_pool2D_forward_naive + self.pool_type = "avg" + self.pool2D_forward_naive = avg_pool2D_forward_naive self.shape = [2, 3, 5, 5] self.ksize = [3, 3] self.strides = [1, 1] @@ -84,8 +84,8 @@ class TestCase1(TestPool2d_Op): def initTestCase(self): self.global_pool = 0 self.op_type = "pool2d" - self.pool_type = "ave" - self.pool2D_forward_naive = ave_pool2D_forward_naive + self.pool_type = "avg" + self.pool2D_forward_naive = avg_pool2D_forward_naive self.shape = [2, 3, 5, 5] self.ksize = [3, 3] self.strides = [1, 1] @@ -96,8 +96,8 @@ class TestCase2(TestPool2d_Op): def initTestCase(self): self.global_pool = 1 self.op_type = "pool2d" - self.pool_type = "ave" - self.pool2D_forward_naive = ave_pool2D_forward_naive + self.pool_type = "avg" + self.pool2D_forward_naive = avg_pool2D_forward_naive self.shape = [2, 3, 5, 5] self.ksize = [3, 3] self.strides = [1, 1] diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index 4ba3d754d5..2a3de89a70 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -27,7 +27,7 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): return out -def ave_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): +def avg_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): N, C, D, H, W = x.shape if global_pool == 1: @@ -80,8 +80,8 @@ class TestPool3d_Op(OpTest): def initTestCase(self): self.global_pool = 0 - self.pool_type = "ave" - self.pool3D_forward_naive = ave_pool3D_forward_naive + self.pool_type = "avg" + self.pool3D_forward_naive = avg_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] @@ -92,8 +92,8 @@ class TestCase1(TestPool3d_Op): def initTestCase(self): self.global_pool = 0 self.op_type = "pool3d" - self.pool_type = "ave" - self.pool3D_forward_naive = ave_pool3D_forward_naive + self.pool_type = "avg" + self.pool3D_forward_naive = avg_pool3D_forward_naive self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] @@ -104,8 +104,8 @@ class TestCase2(TestPool3d_Op): def initTestCase(self): self.global_pool = 1 self.op_type = "pool3d" - self.pool_type = "ave" - self.pool3D_forward_naive = ave_pool3D_forward_naive + self.pool_type = "avg" + self.pool3D_forward_naive = avg_pool3D_forward_naive self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] From 8c478b36e2f1c723d48b5c0e96fd77b7d950c467 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 25 Sep 2017 21:21:52 +0800 Subject: [PATCH 033/342] fix Atrr check --- paddle/operators/pool_op.cc | 81 +++++++++++++++++++++++++++++++++---- 1 file changed, 74 insertions(+), 7 deletions(-) diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index e51249beb9..9c1b7ea41d 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -101,7 +101,8 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { "poolingType of pooling operator." "str constant equal to 'max' or 'avg'"); AddAttr>( - "ksize", "pooling size(height, width) of pooling operator."); + "ksize", "pooling size(height, width) of pooling operator.") + .AddCustomChecker(GreaterThanChecker_pool({0, 0})); AddAttr( "globalPooling", "whether to use the globalPooling." @@ -112,17 +113,49 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>("strides", "strides(height, width) of pooling operator." "default {1,1}") - .SetDefault({1, 1}); + .SetDefault({1, 1}) + .AddCustomChecker(GreaterThanChecker_pool({0, 0})); AddAttr>("paddings", "paddings(height, width) of pooling operator." "default {0,0}") - .SetDefault({0, 0}); - + .SetDefault({0, 0}) + .AddCustomChecker(EqualGreaterThanChecker_pool({0, 0})); AddComment(R"DOC( The pooling2d operation calculates the output based on the input, poolingType and ksize, strides, paddings parameters. )DOC"); } + + private: + struct GreaterThanChecker_pool { + public: + explicit GreaterThanChecker_pool(std::vector lower_bound) + : lower_bound_(lower_bound) {} + void operator()(std::vector &value) const { + PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); + for (size_t i = 0; i < value.size(); ++i) { + PADDLE_ENFORCE(value[i] > lower_bound_[i], "larger_than check fails."); + } + } + + private: + std::vector lower_bound_; + }; + + struct EqualGreaterThanChecker_pool { + public: + explicit EqualGreaterThanChecker_pool(std::vector lower_bound) + : lower_bound_(lower_bound) {} + void operator()(std::vector &value) const { + PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); + for (size_t i = 0; i < value.size(); ++i) { + PADDLE_ENFORCE(value[i] >= lower_bound_[i], "larger_than check fails."); + } + } + + private: + std::vector lower_bound_; + }; }; class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -142,7 +175,8 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { "poolingType of pooling operator." "str constant equal to 'max' or 'avg'"); AddAttr>( - "ksize", "pooling size(depth, height, width) of pooling operator."); + "ksize", "pooling size(depth, height, width) of pooling operator.") + .AddCustomChecker(GreaterThanChecker_pool({0, 0, 0})); AddAttr( "globalPooling", "whether to use the globalPooling." @@ -154,17 +188,50 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { "strides", "strides(depth, height, width) of pooling operator." "default {1,1,1}") - .SetDefault({1, 1, 1}); + .SetDefault({1, 1, 1}) + .AddCustomChecker(GreaterThanChecker_pool({0, 0, 0})); AddAttr>( "paddings", "paddings(depth, height, width) of pooling operator." "default {0,0,0}") - .SetDefault({0, 0, 0}); + .SetDefault({0, 0, 0}) + .AddCustomChecker(EqualGreaterThanChecker_pool({0, 0, 0})); AddComment(R"DOC( The pooling3d operation calculates the output based on the input, poolingType and ksize, strides, paddings parameters. )DOC"); } + + private: + struct GreaterThanChecker_pool { + public: + explicit GreaterThanChecker_pool(std::vector lower_bound) + : lower_bound_(lower_bound) {} + void operator()(std::vector &value) const { + PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); + for (size_t i = 0; i < value.size(); ++i) { + PADDLE_ENFORCE(value[i] > lower_bound_[i], "larger_than check fails."); + } + } + + private: + std::vector lower_bound_; + }; + + struct EqualGreaterThanChecker_pool { + public: + explicit EqualGreaterThanChecker_pool(std::vector lower_bound) + : lower_bound_(lower_bound) {} + void operator()(std::vector &value) const { + PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); + for (size_t i = 0; i < value.size(); ++i) { + PADDLE_ENFORCE(value[i] >= lower_bound_[i], "larger_than check fails."); + } + } + + private: + std::vector lower_bound_; + }; }; } // namespace operators } // namespace paddle From f6e69d74124995fd0dac30b6d7d3717ef2a9241a Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 25 Sep 2017 23:10:03 +0800 Subject: [PATCH 034/342] fix maxpool backward functor --- paddle/operators/math/pool_test_maxPool2d.cc | 22 ++++++++++++-------- paddle/operators/math/pool_test_maxPool3d.cc | 21 +++++++++++-------- paddle/operators/math/pooling.cc | 4 ++-- paddle/operators/pool_op.h | 12 ++++------- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/paddle/operators/math/pool_test_maxPool2d.cc b/paddle/operators/math/pool_test_maxPool2d.cc index 8ddf1d79c7..63b6b5657b 100644 --- a/paddle/operators/math/pool_test_maxPool2d.cc +++ b/paddle/operators/math/pool_test_maxPool2d.cc @@ -23,16 +23,17 @@ #ifndef PADDLE_ONLY_CPU -template -void testPool2d(paddle::platform::DeviceContext& context, PooType pool_process, - paddle::framework::Tensor& input, +template +void testPool2d(paddle::platform::DeviceContext& context, PoolType pool_process, + PoolGradType poolGrad_process, paddle::framework::Tensor& input, paddle::framework::Tensor& input_grad, paddle::framework::Tensor& output, paddle::framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings) { paddle::operators::math::Pool2dForwardFunctor + PoolType, float> pool2d_forward; + pool2d_forward(context, input, output, ksize, strides, paddings, pool_process); @@ -44,10 +45,11 @@ void testPool2d(paddle::platform::DeviceContext& context, PooType pool_process, start = clock(); for (int i = 0; i < times; ++i) { paddle::operators::math::Pool2dBackwardFunctor + PoolGradType, float> pool2d_backward; pool2d_backward(context, input, input_grad, output, output_grad, ksize, - strides, paddings, pool_process); + strides, paddings, poolGrad_process); + PADDLE_ENFORCE(cudaStreamSynchronize(0), "cudaStreamSynchronize failed in pool2d_backward CopyFrom"); } @@ -136,10 +138,12 @@ void test2dPool() { paddle::platform::DeviceContext* context = new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); paddle::operators::math::pool::maxPool pool_process; + paddle::operators::math::pool::maxPoolGrad poolGrad_process; - testPool2d>( - *context, pool_process, input, input_grad, output, output_grad, ksize, - strides, paddings); + testPool2d, + paddle::operators::math::pool::maxPoolGrad>( + *context, pool_process, poolGrad_process, input, input_grad, output, + output_grad, ksize, strides, paddings); } int main() { diff --git a/paddle/operators/math/pool_test_maxPool3d.cc b/paddle/operators/math/pool_test_maxPool3d.cc index 000b006a12..ce572164c4 100644 --- a/paddle/operators/math/pool_test_maxPool3d.cc +++ b/paddle/operators/math/pool_test_maxPool3d.cc @@ -23,15 +23,15 @@ #ifndef PADDLE_ONLY_CPU -template -void testPool3d(paddle::platform::DeviceContext& context, PooType pool_process, - paddle::framework::Tensor& input, +template +void testPool3d(paddle::platform::DeviceContext& context, PoolType pool_process, + PoolGradType poolGrad_process, paddle::framework::Tensor& input, paddle::framework::Tensor& input_grad, paddle::framework::Tensor& output, paddle::framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings) { paddle::operators::math::Pool3dForwardFunctor + PoolType, float> pool3d_forward; pool3d_forward(context, input, output, ksize, strides, paddings, pool_process); @@ -44,10 +44,10 @@ void testPool3d(paddle::platform::DeviceContext& context, PooType pool_process, start = clock(); for (int i = 0; i < times; ++i) { paddle::operators::math::Pool3dBackwardFunctor + PoolGradType, float> pool3d_backward; pool3d_backward(context, input, input_grad, output, output_grad, ksize, - strides, paddings, pool_process); + strides, paddings, poolGrad_process); PADDLE_ENFORCE(cudaStreamSynchronize(0), "cudaStreamSynchronize failed in pool3d_backward CopyFrom"); } @@ -145,9 +145,12 @@ void test3dPool() { new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); paddle::operators::math::pool::maxPool pool_process; - testPool3d>( - *context, pool_process, input, input_grad, output, output_grad, ksize, - strides, paddings); + paddle::operators::math::pool::maxPoolGrad poolGrad_process; + + testPool3d, + paddle::operators::math::pool::maxPoolGrad>( + *context, pool_process, poolGrad_process, input, input_grad, output, + output_grad, ksize, strides, paddings); } int main() { test3dPool(); } diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 6539b80678..cb1f9d7285 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -196,7 +196,7 @@ class MaxPool2dBackwardFunctor { }; template class MaxPool2dBackwardFunctor; -template class MaxPool2dBackwardFunctor; +// template class MaxPool2dBackwardFunctor; template class Pool2dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; @@ -443,7 +443,7 @@ class MaxPool3dBackwardFunctor { }; template class MaxPool3dBackwardFunctor; -template class MaxPool3dBackwardFunctor; +// template class MaxPool3dBackwardFunctor; template class Pool3dForwardFunctor< platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 6f1d05272c..763103d884 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -111,12 +111,10 @@ class PoolGradKernel : public framework::OpKernel { switch (ksize.size()) { case 2: { if (pooling_type == "max") { - paddle::operators::math::Pool2dBackwardFunctor< - Place, paddle::operators::math::pool::maxPoolGrad, T> + paddle::operators::math::MaxPool2dBackwardFunctor pool2d_backward; - paddle::operators::math::pool::maxPoolGrad pool_process; pool2d_backward(context.device_context(), *in_X, *in_X_grad, *out, - *out_grad, ksize, strides, paddings, pool_process); + *out_grad, ksize, strides, paddings); } else if (pooling_type == "avg") { paddle::operators::math::Pool2dBackwardFunctor< Place, paddle::operators::math::pool::avgPoolGrad, T> @@ -128,12 +126,10 @@ class PoolGradKernel : public framework::OpKernel { } break; case 3: { if (pooling_type == "max") { - paddle::operators::math::Pool3dBackwardFunctor< - Place, paddle::operators::math::pool::maxPoolGrad, T> + paddle::operators::math::MaxPool3dBackwardFunctor pool3d_backward; - paddle::operators::math::pool::maxPoolGrad pool_process; pool3d_backward(context.device_context(), *in_X, *in_X_grad, *out, - *out_grad, ksize, strides, paddings, pool_process); + *out_grad, ksize, strides, paddings); } else if (pooling_type == "avg") { paddle::operators::math::Pool3dBackwardFunctor< Place, paddle::operators::math::pool::avgPoolGrad, T> From 477a6a0978063501051d171038d7993d3d27022a Mon Sep 17 00:00:00 2001 From: guosheng Date: Mon, 25 Sep 2017 16:07:25 +0800 Subject: [PATCH 035/342] Refine reduce_op, follow comments and remove ReduceGradEigenFreeKernel --- paddle/operators/reduce_op.cc | 16 ++++-- paddle/operators/reduce_op.h | 102 +++++++++------------------------- 2 files changed, 38 insertions(+), 80 deletions(-) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 89f54fe74b..61b33d4bbd 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -18,7 +18,6 @@ namespace paddle { namespace operators { using framework::Tensor; -using framework::LoDTensor; class ReduceOp : public framework::OperatorWithKernel { public: @@ -46,7 +45,11 @@ class ReduceOp : public framework::OperatorWithKernel { dims_vector.erase(dims_vector.begin() + dim); } auto out_dims = framework::make_ddim(dims_vector); - ctx.Output("Out")->Resize(out_dims); + ctx.Output("Out")->Resize(out_dims); + if (dim != 0) { + // Only pass LoD when not reducing on the first dim + ctx.ShareLoD("X", /*->*/ "Out"); + } } }; @@ -81,9 +84,12 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { "X", "(Tensor) The input tensor. Tensors with rank at most 6 are supported"); AddOutput("Out", "(Tensor) The result tensor."); - AddAttr("dim", - "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input))") + AddAttr( + "dim", + "(int, default 1) The dimension to reduce. " + "Must be in the range [-rank(input), rank(input)). " + "If `dim < 0`, the dim to reduce is `rank + dim`. " + "Noting that reducing on the first dim will make the LoD info lost.") .SetDefault(0); AddAttr("keep_dim", "(bool, default false) " diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index 972bd7bd46..2fbf94e34f 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -80,6 +80,8 @@ struct MaxOrMinGradFunctor { auto equals = x == y.broadcast(dim); auto ones = dx.constant(1); auto zeros = dx.constant(0); + // If there are multiple minimum or maximum elements, the subgradient of + // each is the set [0, 1], and we pass gradient to all of them here. dx.device(place) = dy.broadcast(dim) * equals.select(ones, zeros); } }; @@ -145,102 +147,52 @@ class ReduceGradKernel : public framework::OpKernel { int rank = context.Input("X")->dims().size(); switch (rank) { case 1: - ReduceCompute<1>(context); + ReduceGradCompute<1>(context); break; case 2: - ReduceCompute<2>(context); + ReduceGradCompute<2>(context); break; case 3: - ReduceCompute<3>(context); + ReduceGradCompute<3>(context); break; case 4: - ReduceCompute<4>(context); + ReduceGradCompute<4>(context); break; case 5: - ReduceCompute<5>(context); + ReduceGradCompute<5>(context); break; case 6: - ReduceCompute<6>(context); + ReduceGradCompute<6>(context); break; } } private: template - void ReduceCompute(const framework::ExecutionContext& context) const { + void ReduceGradCompute(const framework::ExecutionContext& context) const { auto* input0 = context.Input("X"); auto* input1 = context.Input("Out"); auto* input2 = context.Input(framework::GradVarName("Out")); auto* output = context.Output(framework::GradVarName("X")); - if (output != nullptr) { - output->mutable_data(context.GetPlace()); - auto x = EigenTensor::From(*input0); - auto x_grad = EigenTensor::From(*output); - auto x_rank = static_cast(x.dimensions().size()); - int dim = static_cast(context.Attr("dim")); - if (dim < 0) dim = x_rank + dim; - DDim dims = input0->dims(); - dims[dim] = 1; - auto x_reduce = EigenTensor::From(*input1, dims); - auto x_reduce_grad = EigenTensor::From(*input2, dims); - - Eigen::array braodcast_dim; - for (size_t i = 0; i < D; ++i) braodcast_dim[i] = 1; - braodcast_dim[dim] = input0->dims()[dim]; - auto& place = context.GetEigenDevice(); - Functor functor; - functor(place, x, x_reduce, x_grad, x_reduce_grad, braodcast_dim, - braodcast_dim[dim]); - } - } -}; - -// For EigenTensor unsupported reduce -template -class ReduceGradEigenFreeKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* x = context.Input("X"); - auto* out = context.Input("Out"); - auto* x_grad = context.Output(framework::GradVarName("X")); - auto* out_grad = context.Input(framework::GradVarName("Out")); - if (x_grad != nullptr) { - DDim dims = x->dims(); - int rank = dims.size(); - int dim = static_cast(context.Attr("dim")); - if (dim < 0) dim = rank + dim; - - auto* x_data = x->data(); - auto* x_grad_data = x_grad->mutable_data(context.GetPlace()); - auto* out_data = out->data(); - auto* out_grad_data = out_grad->data(); - - int outer_count = 1; - int inner_count = 1; - int mid_count = dims[dim]; - for (int i = 0; i < dim; ++i) { - outer_count *= dims[i]; - } - for (int i = dim + 1; i < rank; ++i) { - inner_count *= dims[i]; - } - - int x_offset = 0; // offset on raw data - int out_offset = 0; // offset on reduced data - Functor functor; - for (int i = 0; i < outer_count; ++i) { - for (int j = 0; j < inner_count; ++j) { - out_offset = inner_count * i + j; - for (int k = 0; k < mid_count; ++k) { - x_offset = (inner_count * mid_count) * i + inner_count * k + j; - functor(x_data + x_offset, out_data + out_offset, - x_grad_data + x_offset, out_grad_data + out_offset, - mid_count); - } - } - } - } + output->mutable_data(context.GetPlace()); + auto x = EigenTensor::From(*input0); + auto x_grad = EigenTensor::From(*output); + auto x_rank = static_cast(x.dimensions().size()); + int dim = static_cast(context.Attr("dim")); + if (dim < 0) dim = x_rank + dim; + DDim dims = input0->dims(); + dims[dim] = 1; + auto x_reduce = EigenTensor::From(*input1, dims); + auto x_reduce_grad = EigenTensor::From(*input2, dims); + + Eigen::array braodcast_dim; + for (size_t i = 0; i < D; ++i) braodcast_dim[i] = 1; + braodcast_dim[dim] = input0->dims()[dim]; + auto& place = context.GetEigenDevice(); + Functor functor; + functor(place, x, x_reduce, x_grad, x_reduce_grad, braodcast_dim, + braodcast_dim[dim]); } }; From dfc8d3c1c172362afe4cf8fd7ad317cec435ee13 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 26 Sep 2017 14:25:35 +0800 Subject: [PATCH 036/342] Fix (According to the review) --- paddle/operators/math/CMakeLists.txt | 2 - paddle/operators/math/pool_test_maxPool2d.cc | 154 ------------------ paddle/operators/math/pool_test_maxPool3d.cc | 157 ------------------- paddle/operators/math/pooling.cc | 129 +++++++-------- paddle/operators/math/pooling.cu | 144 ++++++++--------- paddle/operators/math/pooling.h | 35 ++--- paddle/operators/pool_op.cc | 46 +++--- paddle/operators/pool_op.cu | 21 +-- paddle/operators/pool_op.h | 73 ++++----- 9 files changed, 216 insertions(+), 545 deletions(-) delete mode 100644 paddle/operators/math/pool_test_maxPool2d.cc delete mode 100644 paddle/operators/math/pool_test_maxPool3d.cc diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 1233a9ea32..185708cdaa 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -7,5 +7,3 @@ endif() nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) -cc_test(pool_test_maxPool2d_test SRCS pool_test_maxPool2d.cc DEPS math_function tensor) -cc_test(pool_test_maxPool3d_test SRCS pool_test_maxPool3d.cc DEPS math_function tensor) diff --git a/paddle/operators/math/pool_test_maxPool2d.cc b/paddle/operators/math/pool_test_maxPool2d.cc deleted file mode 100644 index 63b6b5657b..0000000000 --- a/paddle/operators/math/pool_test_maxPool2d.cc +++ /dev/null @@ -1,154 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ - -#include -#include "paddle/operators/math/pooling.h" - -#include "paddle/memory/memcpy.h" -#include "paddle/platform/enforce.h" - -#include -#include - -#ifndef PADDLE_ONLY_CPU - -template -void testPool2d(paddle::platform::DeviceContext& context, PoolType pool_process, - PoolGradType poolGrad_process, paddle::framework::Tensor& input, - paddle::framework::Tensor& input_grad, - paddle::framework::Tensor& output, - paddle::framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings) { - paddle::operators::math::Pool2dForwardFunctor - pool2d_forward; - - pool2d_forward(context, input, output, ksize, strides, paddings, - pool_process); - - int times = 50; - clock_t start, finish; - double totaltime; - - // Pool2dBackwardFunctor - start = clock(); - for (int i = 0; i < times; ++i) { - paddle::operators::math::Pool2dBackwardFunctor - pool2d_backward; - pool2d_backward(context, input, input_grad, output, output_grad, ksize, - strides, paddings, poolGrad_process); - - PADDLE_ENFORCE(cudaStreamSynchronize(0), - "cudaStreamSynchronize failed in pool2d_backward CopyFrom"); - } - finish = clock(); - totaltime = (double)(finish - start) / CLOCKS_PER_SEC; - totaltime /= times; - std::cout << "\nPool3dBackwardFunctor: " << totaltime << "s" << std::endl; - - // MaxPool3dBackwardFunctor - start = clock(); - for (int j = 0; j < times; ++j) { - paddle::operators::math::MaxPool2dBackwardFunctor< - paddle::platform::GPUPlace, float> - maxpool2d_backward; - maxpool2d_backward(context, input, input_grad, output, output_grad, ksize, - strides, paddings); - PADDLE_ENFORCE( - cudaStreamSynchronize(0), - "cudaStreamSynchronize failed in maxpool2d_backward CopyFrom"); - } - finish = clock(); - totaltime = (double)(finish - start) / CLOCKS_PER_SEC; - totaltime /= times; - std::cout << "\nMaxPool3dBackwardFunctor: " << totaltime << "s" << std::endl; -} - -void test2dPool() { - using paddle::platform::DeviceContext; - using paddle::platform::CUDADeviceContext; - using paddle::platform::GPUPlace; - - paddle::framework::Tensor input_tmp; - paddle::framework::Tensor output_tmp; - paddle::framework::Tensor input; - paddle::framework::Tensor input_grad; - paddle::framework::Tensor output; - paddle::framework::Tensor output_grad; - - int batch = 32; - int channel = 32; - int input_height = 128; - int input_width = 128; - int in_len = batch * channel * input_height * input_width; - std::vector ksize({3, 3}); - std::vector strides({1, 1}); - std::vector paddings({0, 0}); - - int output_height = - (input_height - ksize[0] + 2 * paddings[0]) / strides[0] + 1; - int output_width = - (input_width - ksize[1] + 2 * paddings[1]) / strides[1] + 1; - int output_len = output_height * output_width; - - input_tmp.mutable_data({batch, channel, input_height, input_width}, - paddle::platform::CPUPlace()); - output_tmp.mutable_data({batch, channel, output_height, output_width}, - paddle::platform::CPUPlace()); - - float* arr = new float[in_len]; - auto* place = new paddle::platform::GPUPlace(); - - float* input_ptr = input_tmp.data(); - for (int i = 0; i < in_len; ++i) arr[i] = i; // rand() / double(RAND_MAX/2); - memcpy(input_ptr, arr, in_len * sizeof(float)); - input.CopyFrom(input_tmp, *place); - - input_ptr = input_tmp.data(); - for (int i = 0; i < in_len; ++i) arr[i] = 0; - memcpy(input_ptr, arr, in_len * sizeof(float)); - input_grad.CopyFrom(input_tmp, *place); - - // output - input_ptr = output_tmp.data(); - for (int i = 0; i < output_len; ++i) - arr[i] = 0; // rand() / double(RAND_MAX/2); - memcpy(input_ptr, arr, output_len * sizeof(float)); - output.CopyFrom(input_tmp, *place); - - // output - input_ptr = output_tmp.data(); - for (int i = 0; i < output_len; ++i) - arr[i] = 1; // rand() / double(RAND_MAX/2); - memcpy(input_ptr, arr, output_len * sizeof(float)); - output_grad.CopyFrom(input_tmp, *place); - - paddle::platform::DeviceContext* context = - new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); - paddle::operators::math::pool::maxPool pool_process; - paddle::operators::math::pool::maxPoolGrad poolGrad_process; - - testPool2d, - paddle::operators::math::pool::maxPoolGrad>( - *context, pool_process, poolGrad_process, input, input_grad, output, - output_grad, ksize, strides, paddings); -} - -int main() { - // testPool3d(); - test2dPool(); - // testPool3d(); -} -#endif \ No newline at end of file diff --git a/paddle/operators/math/pool_test_maxPool3d.cc b/paddle/operators/math/pool_test_maxPool3d.cc deleted file mode 100644 index ce572164c4..0000000000 --- a/paddle/operators/math/pool_test_maxPool3d.cc +++ /dev/null @@ -1,157 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ - -#include -#include "paddle/operators/math/pooling.h" - -#include "paddle/memory/memcpy.h" -#include "paddle/platform/enforce.h" - -#include -#include - -#ifndef PADDLE_ONLY_CPU - -template -void testPool3d(paddle::platform::DeviceContext& context, PoolType pool_process, - PoolGradType poolGrad_process, paddle::framework::Tensor& input, - paddle::framework::Tensor& input_grad, - paddle::framework::Tensor& output, - paddle::framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings) { - paddle::operators::math::Pool3dForwardFunctor - pool3d_forward; - pool3d_forward(context, input, output, ksize, strides, paddings, - pool_process); - - int times = 50; - clock_t start, finish; - double totaltime; - - // Pool3dBackwardFunctor - start = clock(); - for (int i = 0; i < times; ++i) { - paddle::operators::math::Pool3dBackwardFunctor - pool3d_backward; - pool3d_backward(context, input, input_grad, output, output_grad, ksize, - strides, paddings, poolGrad_process); - PADDLE_ENFORCE(cudaStreamSynchronize(0), - "cudaStreamSynchronize failed in pool3d_backward CopyFrom"); - } - finish = clock(); - totaltime = (double)(finish - start) / CLOCKS_PER_SEC; - totaltime /= times; - std::cout << "\nPool3dBackwardFunctor: " << totaltime << "s" << std::endl; - - // MaxPool3dBackwardFunctor - start = clock(); - for (int j = 0; j < times; ++j) { - paddle::operators::math::MaxPool3dBackwardFunctor< - paddle::platform::GPUPlace, float> - maxpool3d_backward; - maxpool3d_backward(context, input, input_grad, output, output_grad, ksize, - strides, paddings); - PADDLE_ENFORCE( - cudaStreamSynchronize(0), - "cudaStreamSynchronize failed in maxpool3d_backward CopyFrom"); - } - finish = clock(); - totaltime = (double)(finish - start) / CLOCKS_PER_SEC; - totaltime /= times; - std::cout << "\nMaxPool3dBackwardFunctor: " << totaltime << "s" << std::endl; -} - -void test3dPool() { - using paddle::platform::DeviceContext; - using paddle::platform::CUDADeviceContext; - using paddle::platform::GPUPlace; - - paddle::framework::Tensor input_tmp; - paddle::framework::Tensor output_tmp; - paddle::framework::Tensor input; - paddle::framework::Tensor input_grad; - paddle::framework::Tensor output; - paddle::framework::Tensor output_grad; - - int batch = 32; - int channel = 4; - int input_depth = 4; - int input_height = 128; - int input_width = 128; - int in_len = batch * channel * input_depth * input_height * input_width; - std::vector ksize({3, 3, 3}); - std::vector strides({2, 2, 2}); - std::vector paddings({1, 1, 1}); - - int output_depth = - (input_depth - ksize[0] + 2 * paddings[0]) / strides[0] + 1; - int output_height = - (input_height - ksize[1] + 2 * paddings[1]) / strides[1] + 1; - int output_width = - (input_width - ksize[2] + 2 * paddings[2]) / strides[2] + 1; - - int output_len = output_depth * output_height * output_width; - - input_tmp.mutable_data( - {batch, channel, input_depth, input_height, input_width}, - paddle::platform::CPUPlace()); - output_tmp.mutable_data( - {batch, channel, output_depth, output_height, output_width}, - paddle::platform::CPUPlace()); - - float* arr = new float[in_len]; - auto* place = new paddle::platform::GPUPlace(); - - // input - float* input_ptr = input_tmp.data(); - for (int i = 0; i < in_len; ++i) arr[i] = i; // rand() / double(RAND_MAX/2); - memcpy(input_ptr, arr, in_len * sizeof(float)); - input.CopyFrom(input_tmp, *place); - - // input_grad - input_ptr = input_tmp.data(); - for (int i = 0; i < in_len; ++i) arr[i] = 0; - memcpy(input_ptr, arr, in_len * sizeof(float)); - input_grad.CopyFrom(input_tmp, *place); - - // output - input_ptr = output_tmp.data(); - for (int i = 0; i < output_len; ++i) - arr[i] = 0; // rand() / double(RAND_MAX/2); - memcpy(input_ptr, arr, output_len * sizeof(float)); - output.CopyFrom(input_tmp, *place); - - // output_grad - input_ptr = output_tmp.data(); - for (int i = 0; i < output_len; ++i) - arr[i] = 1; // rand() / double(RAND_MAX/2); - memcpy(input_ptr, arr, output_len * sizeof(float)); - output_grad.CopyFrom(input_tmp, *place); - - paddle::platform::DeviceContext* context = - new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); - paddle::operators::math::pool::maxPool pool_process; - - paddle::operators::math::pool::maxPoolGrad poolGrad_process; - - testPool3d, - paddle::operators::math::pool::maxPoolGrad>( - *context, pool_process, poolGrad_process, input, input_grad, output, - output_grad, ksize, strides, paddings); -} - -int main() { test3dPool(); } -#endif \ No newline at end of file diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index cb1f9d7285..1918c8b169 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -19,12 +19,12 @@ namespace operators { namespace math { template -class Pool2dForwardFunctor { +class Pool2dFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process) { + std::vector& paddings, PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -54,14 +54,14 @@ class Pool2dForwardFunctor { int wstart = pw * stride_width - padding_width; int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); - T ele = pool_process.initial(); + T ele = pool_compute.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_process.process(ele, input_data[h * input_width + w]); + pool_compute.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, (static_cast(pool_size))); + pool_compute.finalize(ele, (static_cast(pool_size))); output_data[ph * output_width + pw] = ele; } } @@ -73,14 +73,14 @@ class Pool2dForwardFunctor { }; template -class Pool2dBackwardFunctor { +class Pool2dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process) { + PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -115,12 +115,11 @@ class Pool2dBackwardFunctor { float scale = 1.0 / pool_size; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_process.gradProcess( - input_data[h * input_width + w], - output_data[ph * output_width + pw], - output_grad_data[ph * output_width + pw], - input_grad_data[h * input_width + w], - static_cast(scale)); + pool_compute.compute(input_data[h * input_width + w], + output_data[ph * output_width + pw], + output_grad_data[ph * output_width + pw], + input_grad_data[h * input_width + w], + static_cast(scale)); } } } @@ -135,7 +134,7 @@ class Pool2dBackwardFunctor { }; template -class MaxPool2dBackwardFunctor { +class MaxPool2dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, @@ -195,37 +194,33 @@ class MaxPool2dBackwardFunctor { } }; -template class MaxPool2dBackwardFunctor; -// template class MaxPool2dBackwardFunctor; - -template class Pool2dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; -template class Pool2dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPool, float>; -template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, - float>; -template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, - float>; -template class Pool2dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; -template class Pool2dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPool, double>; -template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, - double>; -template class Pool2dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, - double>; +template class MaxPool2dGradFunctor; +// template class MaxPool2dGradFunctor; + +template class Pool2dFunctor, float>; +template class Pool2dFunctor, float>; +template class Pool2dGradFunctor< + platform::CPUPlace, paddle::operators::math::maxPoolGrad, float>; +template class Pool2dGradFunctor< + platform::CPUPlace, paddle::operators::math::avgPoolGrad, float>; +template class Pool2dFunctor, double>; +template class Pool2dFunctor, double>; +template class Pool2dGradFunctor< + platform::CPUPlace, paddle::operators::math::maxPoolGrad, double>; +template class Pool2dGradFunctor< + platform::CPUPlace, paddle::operators::math::avgPoolGrad, double>; template -class Pool3dForwardFunctor { +class Pool3dFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process) { + std::vector& paddings, PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -265,11 +260,11 @@ class Pool3dForwardFunctor { int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); int output_idx = (pd * output_height + ph) * output_width + pw; - T ele = pool_process.initial(); + T ele = pool_compute.initial(); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_process.process( + pool_compute.compute( ele, input_data[(d * input_height + h) * input_width + w]); } @@ -277,7 +272,7 @@ class Pool3dForwardFunctor { } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, static_cast(pool_size)); + pool_compute.finalize(ele, static_cast(pool_size)); output_data[output_idx] = ele; } } @@ -290,14 +285,14 @@ class Pool3dForwardFunctor { }; template -class Pool3dBackwardFunctor { +class Pool3dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process) { + PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -348,7 +343,7 @@ class Pool3dBackwardFunctor { int input_idx = (d * input_height + h) * input_width + w; int output_idx = (pd * output_height + ph) * output_width + pw; - pool_process.gradProcess( + pool_compute.compute( input_data[input_idx], output_data[output_idx], output_grad_data[output_idx], input_grad_data[input_idx], static_cast(scale)); @@ -368,7 +363,7 @@ class Pool3dBackwardFunctor { }; template -class MaxPool3dBackwardFunctor { +class MaxPool3dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, @@ -442,29 +437,25 @@ class MaxPool3dBackwardFunctor { } }; -template class MaxPool3dBackwardFunctor; -// template class MaxPool3dBackwardFunctor; - -template class Pool3dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, float>; -template class Pool3dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPool, float>; -template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, - float>; -template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, - float>; -template class Pool3dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPool, double>; -template class Pool3dForwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPool, double>; -template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::maxPoolGrad, - double>; -template class Pool3dBackwardFunctor< - platform::CPUPlace, paddle::operators::math::pool::avgPoolGrad, - double>; +template class MaxPool3dGradFunctor; +// template class MaxPool3dGradFunctor; + +template class Pool3dFunctor, float>; +template class Pool3dFunctor, float>; +template class Pool3dGradFunctor< + platform::CPUPlace, paddle::operators::math::maxPoolGrad, float>; +template class Pool3dGradFunctor< + platform::CPUPlace, paddle::operators::math::avgPoolGrad, float>; +template class Pool3dFunctor, double>; +template class Pool3dFunctor, double>; +template class Pool3dGradFunctor< + platform::CPUPlace, paddle::operators::math::maxPoolGrad, double>; +template class Pool3dGradFunctor< + platform::CPUPlace, paddle::operators::math::avgPoolGrad, double>; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index f41d80f77f..4164920035 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -25,7 +25,7 @@ __global__ void KernelPool2dForward( const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, - const int padding_width, PoolProcess pool_process) { + const int padding_width, PoolProcess pool_compute) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int pw = index % output_width; @@ -42,14 +42,14 @@ __global__ void KernelPool2dForward( wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; - T ele = pool_process.initial(); + T ele = pool_compute.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_process.process(ele, input_data[h * input_width + w]); + pool_compute.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, (static_cast(pool_size))); + pool_compute.finalize(ele, (static_cast(pool_size))); output_data[index] = ele; } } @@ -61,7 +61,7 @@ __global__ void KernelPool2dBackward( const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, - const int padding_width, PoolProcess pool_process) { + const int padding_width, PoolProcess pool_compute) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int offsetW = index % input_width + padding_width; @@ -93,9 +93,9 @@ __global__ void KernelPool2dBackward( wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); int output_sub_idx = ph * output_width + pw; - pool_process.gradProcess(input, output_data[output_sub_idx], - output_grad[output_sub_idx], gradient, - static_cast(1.0 / pool_size)); + pool_compute.compute(input, output_data[output_sub_idx], + output_grad[output_sub_idx], gradient, + static_cast(1.0 / pool_size)); } } input_grad[index] = gradient; @@ -148,12 +148,12 @@ __global__ void KernelMaxPool2dBackward( } template -class Pool2dForwardFunctor { +class Pool2dFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process) { + std::vector& paddings, PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -184,19 +184,19 @@ class Pool2dForwardFunctor { input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, - padding_width, pool_process); + padding_width, pool_compute); } }; template -class Pool2dBackwardFunctor { +class Pool2dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process) { + PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -228,12 +228,12 @@ class Pool2dBackwardFunctor { nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, - padding_width, pool_process); + padding_width, pool_compute); } }; template -class MaxPool2dBackwardFunctor { +class MaxPool2dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, @@ -275,29 +275,25 @@ class MaxPool2dBackwardFunctor { } }; -template class MaxPool2dBackwardFunctor; -// template class MaxPool2dBackwardFunctor; - -template class Pool2dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; -template class Pool2dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPool, float>; -template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, - float>; -template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, - float>; -template class Pool2dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; -template class Pool2dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPool, double>; -template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, - double>; -template class Pool2dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, - double>; +template class MaxPool2dGradFunctor; +// template class MaxPool2dGradFunctor; + +template class Pool2dFunctor, float>; +template class Pool2dFunctor, float>; +template class Pool2dGradFunctor< + platform::GPUPlace, paddle::operators::math::maxPoolGrad, float>; +template class Pool2dGradFunctor< + platform::GPUPlace, paddle::operators::math::avgPoolGrad, float>; +template class Pool2dFunctor, double>; +template class Pool2dFunctor, double>; +template class Pool2dGradFunctor< + platform::GPUPlace, paddle::operators::math::maxPoolGrad, double>; +template class Pool2dGradFunctor< + platform::GPUPlace, paddle::operators::math::avgPoolGrad, double>; template __global__ void KernelPool3DForward( @@ -307,7 +303,7 @@ __global__ void KernelPool3DForward( const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, - PoolProcess pool_process) { + PoolProcess pool_compute) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); index += blockDim.x * gridDim.x) { int pw = index % output_width; @@ -325,19 +321,19 @@ __global__ void KernelPool3DForward( dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); - T ele = pool_process.initial(); + T ele = pool_compute.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_process.process( + pool_compute.compute( ele, input_data[(d * input_height + h) * input_width + w]); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, static_cast(pool_size)); + pool_compute.finalize(ele, static_cast(pool_size)); output_data[index] = ele; } } @@ -351,7 +347,7 @@ __global__ void KernelPool3DBackward( const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, - PoolProcess pool_process) { + PoolProcess pool_compute) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; @@ -396,9 +392,9 @@ __global__ void KernelPool3DBackward( wstart = max(wstart, 0); int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); int output_sub_idx = (pd * output_height + ph) * output_width + pw; - pool_process.gradProcess(input, output_data[output_sub_idx], - output_grad[output_sub_idx], gradient, - static_cast(1.0 / pool_size)); + pool_compute.compute(input, output_data[output_sub_idx], + output_grad[output_sub_idx], gradient, + static_cast(1.0 / pool_size)); } } } @@ -459,12 +455,12 @@ __global__ void KernelMaxPool3DBackward( } template -class Pool3dForwardFunctor { +class Pool3dFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process) { + std::vector& paddings, PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -502,19 +498,19 @@ class Pool3dForwardFunctor { input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, - pool_process); + pool_compute); } }; template -class Pool3dBackwardFunctor { +class Pool3dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process) { + PoolProcess pool_compute) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -554,12 +550,12 @@ class Pool3dBackwardFunctor { input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, - padding_height, padding_width, pool_process); + padding_height, padding_width, pool_compute); } }; template -class MaxPool3dBackwardFunctor { +class MaxPool3dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, @@ -608,29 +604,25 @@ class MaxPool3dBackwardFunctor { } }; -template class MaxPool3dBackwardFunctor; -// template class MaxPool3dBackwardFunctor; - -template class Pool3dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, float>; -template class Pool3dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPool, float>; -template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, - float>; -template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, - float>; -template class Pool3dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPool, double>; -template class Pool3dForwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPool, double>; -template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::maxPoolGrad, - double>; -template class Pool3dBackwardFunctor< - platform::GPUPlace, paddle::operators::math::pool::avgPoolGrad, - double>; +template class MaxPool3dGradFunctor; +// template class MaxPool3dGradFunctor; + +template class Pool3dFunctor, float>; +template class Pool3dFunctor, float>; +template class Pool3dGradFunctor< + platform::GPUPlace, paddle::operators::math::maxPoolGrad, float>; +template class Pool3dGradFunctor< + platform::GPUPlace, paddle::operators::math::avgPoolGrad, float>; +template class Pool3dFunctor, double>; +template class Pool3dFunctor, double>; +template class Pool3dGradFunctor< + platform::GPUPlace, paddle::operators::math::maxPoolGrad, double>; +template class Pool3dGradFunctor< + platform::GPUPlace, paddle::operators::math::avgPoolGrad, double>; } // namespace math } // namespace operators diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index 2e05a5968a..cf0ab7ecae 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -21,17 +21,15 @@ limitations under the License. */ namespace paddle { namespace operators { namespace math { - ////////////////////// #define FLT_MAX __FLT_MAX__ ///////////////////// -namespace pool { template class maxPool { public: DEVICE inline T initial() { return static_cast(-FLT_MAX); } - DEVICE inline void process(T& y, const T& x) { y = y > x ? y : x; } + DEVICE inline void compute(T& y, const T& x) { y = y > x ? y : x; } DEVICE inline void finalize(T& y, const T& poo_size) {} }; @@ -39,14 +37,14 @@ template class avgPool { public: DEVICE inline T initial() { return static_cast(0); } - DEVICE inline void process(T& y, const T& x) { y += x; } + DEVICE inline void compute(T& y, const T& x) { y += x; } DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } }; template class maxPoolGrad { public: - DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, - T scale) { + DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx, + T scale) { dx += dy * (x == y); } }; @@ -54,35 +52,34 @@ class maxPoolGrad { template class avgPoolGrad { public: - DEVICE inline void gradProcess(const T& x, const T& y, const T& dy, T& dx, - T scale) { + DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx, + T scale) { dx += (scale * dy); } }; -} // namespace pool template -class Pool2dForwardFunctor { +class Pool2dFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process); + std::vector& paddings, PoolProcess pool_compute); }; template -class Pool2dBackwardFunctor { +class Pool2dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process); + PoolProcess pool_compute); }; template -class MaxPool2dBackwardFunctor { +class MaxPool2dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, @@ -92,27 +89,27 @@ class MaxPool2dBackwardFunctor { }; template -class Pool3dForwardFunctor { +class Pool3dFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_process); + std::vector& paddings, PoolProcess pool_compute); }; template -class Pool3dBackwardFunctor { +class Pool3dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_process); + PoolProcess pool_compute); }; template -class MaxPool3dBackwardFunctor { +class MaxPool3dGradFunctor { public: void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& input_grad, diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index 9c1b7ea41d..a5e731cc66 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { namespace operators { -int outputSize_pool(int input_size, int filter_size, int padding, int stride) { +int OutputSizePool(int input_size, int filter_size, int padding, int stride) { int output_size = (input_size - filter_size + 2 * padding) / stride + 1; return output_size; } @@ -33,7 +33,7 @@ class PoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Out(Output) of Pooling should not be null."); - auto in_X = ctx.Input("X"); + auto in_x = ctx.Input("X"); auto out = ctx.Output("Out"); int global_pooling = Attr("globalPooling"); std::string pooling_type = Attr("poolingType"); @@ -43,30 +43,25 @@ class PoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "avg", "pooling_type should be 'max' or 'avg'"); - PADDLE_ENFORCE(in_X->dims().size() == 4 || in_X->dims().size() == 5, + PADDLE_ENFORCE(in_x->dims().size() == 4 || in_x->dims().size() == 5, "Pooling intput should be 4-D or 5-D"); + PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3, + "Pooling size should be 2 elements. or 3 elements."); + PADDLE_ENFORCE_EQ(ksize.size(), strides.size(), + "strides size and pooling size should be the same."); + PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(), + "paddings size and pooling size should be the same."); if (global_pooling == 1) { - ksize.resize(static_cast(in_X->dims().size()) - 2); + ksize.resize(static_cast(in_x->dims().size()) - 2); for (size_t i = 0; i < ksize.size(); ++i) - ksize[i] = static_cast(in_X->dims()[i + 2]); + ksize[i] = static_cast(in_x->dims()[i + 2]); } - if (ksize.size() == 2) { - PADDLE_ENFORCE_EQ(strides.size(), 2, - "Pool2DOp strides size should be 2 elements."); - PADDLE_ENFORCE_EQ(paddings.size(), 2, - "Pool2DOp paddings size should be 2 elements"); - } else { - PADDLE_ENFORCE_EQ(strides.size(), 3, - "Pool3DOp strides should be 3 elements."); - PADDLE_ENFORCE_EQ(paddings.size(), 3, - "Pool3DOp paddings should be 3 elements."); - } - std::vector output_shape({in_X->dims()[0], in_X->dims()[1]}); + std::vector output_shape({in_x->dims()[0], in_x->dims()[1]}); for (size_t i = 0; i < ksize.size(); ++i) { - output_shape.push_back(outputSize_pool(in_X->dims()[i + 2], ksize[i], - paddings[i], strides[i])); + output_shape.push_back(OutputSizePool(in_x->dims()[i + 2], ksize[i], + paddings[i], strides[i])); } out->Resize(framework::make_ddim(output_shape)); } @@ -78,9 +73,16 @@ class PoolOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "X(Input) of Pooling should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Out"), + "Out(Output) of Pooling should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.Output(framework::GradVarName("X")), + "Input@Grad of Pooling should not be null."); + auto in = ctx.Input("X"); auto d_in = ctx.Output(framework::GradVarName("X")); - if (d_in) d_in->Resize(in->dims()); + d_in->Resize(in->dims()); } }; @@ -92,7 +94,7 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { "X", "The input tensor of pooling operator. " "The format of input tensor is NCHW. Where N is batch size, C is the " - "number of channels, H and W is the height and width of image."); + "number of channels, H and W is the height and width of feature."); AddOutput("Out", "The output tensor of pooling operator." "The format of output tensor is also NCHW."); @@ -166,7 +168,7 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { "The format of input tensor is NCDHW. Where N is batch size, C is " "the " "number of channels, D, H and W is the depth, height and width of " - "image."); + "feature."); AddOutput("Out", "The output tensor of pooling operator." "The format of output tensor is also NCDHW."); diff --git a/paddle/operators/pool_op.cu b/paddle/operators/pool_op.cu index b011d20da5..0e3b80868f 100644 --- a/paddle/operators/pool_op.cu +++ b/paddle/operators/pool_op.cu @@ -1,15 +1,16 @@ -/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - http://www.apache.org/licenses/LICENSE-2.0 +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/pool_op.h" diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 763103d884..9471282205 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -28,7 +28,7 @@ template class PoolKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const Tensor* in_X = context.Input("X"); + const Tensor* in_x = context.Input("X"); Tensor* out = context.Output("Out"); int global_pooling = context.Attr("globalPooling"); @@ -38,43 +38,43 @@ class PoolKernel : public framework::OpKernel { std::vector paddings = context.Attr>("paddings"); if (global_pooling == 1) { for (size_t i = 0; i < ksize.size(); ++i) { - ksize[i] = in_X->dims()[i + 2]; + ksize[i] = static_cast(in_x->dims()[i + 2]); } } switch (ksize.size()) { case 2: { if (pooling_type == "max") { - paddle::operators::math::Pool2dForwardFunctor< - Place, paddle::operators::math::pool::maxPool, T> + paddle::operators::math::Pool2dFunctor< + Place, paddle::operators::math::maxPool, T> pool2d_forward; - paddle::operators::math::pool::maxPool pool_process; - pool2d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddle::operators::math::maxPool pool_process; + pool2d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } else if (pooling_type == "avg") { - paddle::operators::math::Pool2dForwardFunctor< - Place, paddle::operators::math::pool::avgPool, T> + paddle::operators::math::Pool2dFunctor< + Place, paddle::operators::math::avgPool, T> pool2d_forward; - paddle::operators::math::pool::avgPool pool_process; - pool2d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddle::operators::math::avgPool pool_process; + pool2d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } } break; case 3: { if (pooling_type == "max") { - paddle::operators::math::Pool3dForwardFunctor< - Place, paddle::operators::math::pool::maxPool, T> + paddle::operators::math::Pool3dFunctor< + Place, paddle::operators::math::maxPool, T> pool3d_forward; - paddle::operators::math::pool::maxPool pool_process; - pool3d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddle::operators::math::maxPool pool_process; + pool3d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } else if (pooling_type == "avg") { - paddle::operators::math::Pool3dForwardFunctor< - Place, paddle::operators::math::pool::avgPool, T> + paddle::operators::math::Pool3dFunctor< + Place, paddle::operators::math::avgPool, T> pool3d_forward; - paddle::operators::math::pool::avgPool pool_process; - pool3d_forward(context.device_context(), *in_X, *out, ksize, strides, + paddle::operators::math::avgPool pool_process; + pool3d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } } break; @@ -86,11 +86,11 @@ template class PoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const Tensor* in_X = context.Input("X"); + const Tensor* in_x = context.Input("X"); const Tensor* out = context.Input("Out"); const Tensor* out_grad = context.Input(framework::GradVarName("Out")); - Tensor* in_X_grad = context.Output(framework::GradVarName("X")); + Tensor* in_x_grad = context.Output(framework::GradVarName("X")); int global_pooling = context.Attr("globalPooling"); std::string pooling_type = context.Attr("poolingType"); @@ -99,43 +99,44 @@ class PoolGradKernel : public framework::OpKernel { std::vector paddings = context.Attr>("paddings"); if (global_pooling == 1) { - for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = in_X->dims()[i + 2]; + for (size_t i = 0; i < ksize.size(); ++i) + ksize[i] = static_cast(in_x->dims()[i + 2]); } - if (in_X_grad) { - in_X_grad->mutable_data(context.GetPlace()); - auto temp = framework::EigenVector::Flatten(*in_X_grad); + if (in_x_grad) { + in_x_grad->mutable_data(context.GetPlace()); + auto temp = framework::EigenVector::Flatten(*in_x_grad); temp.device(context.GetEigenDevice()) = temp.constant(static_cast(0)); switch (ksize.size()) { case 2: { if (pooling_type == "max") { - paddle::operators::math::MaxPool2dBackwardFunctor + paddle::operators::math::MaxPool2dGradFunctor pool2d_backward; - pool2d_backward(context.device_context(), *in_X, *in_X_grad, *out, + pool2d_backward(context.device_context(), *in_x, *in_x_grad, *out, *out_grad, ksize, strides, paddings); } else if (pooling_type == "avg") { - paddle::operators::math::Pool2dBackwardFunctor< - Place, paddle::operators::math::pool::avgPoolGrad, T> + paddle::operators::math::Pool2dGradFunctor< + Place, paddle::operators::math::avgPoolGrad, T> pool2d_backward; - paddle::operators::math::pool::avgPoolGrad pool_process; - pool2d_backward(context.device_context(), *in_X, *in_X_grad, *out, + paddle::operators::math::avgPoolGrad pool_process; + pool2d_backward(context.device_context(), *in_x, *in_x_grad, *out, *out_grad, ksize, strides, paddings, pool_process); } } break; case 3: { if (pooling_type == "max") { - paddle::operators::math::MaxPool3dBackwardFunctor + paddle::operators::math::MaxPool3dGradFunctor pool3d_backward; - pool3d_backward(context.device_context(), *in_X, *in_X_grad, *out, + pool3d_backward(context.device_context(), *in_x, *in_x_grad, *out, *out_grad, ksize, strides, paddings); } else if (pooling_type == "avg") { - paddle::operators::math::Pool3dBackwardFunctor< - Place, paddle::operators::math::pool::avgPoolGrad, T> + paddle::operators::math::Pool3dGradFunctor< + Place, paddle::operators::math::avgPoolGrad, T> pool3d_backward; - paddle::operators::math::pool::avgPoolGrad pool_process; - pool3d_backward(context.device_context(), *in_X, *in_X_grad, *out, + paddle::operators::math::avgPoolGrad pool_process; + pool3d_backward(context.device_context(), *in_x, *in_x_grad, *out, *out_grad, ksize, strides, paddings, pool_process); } } break; From 4b06d8db9179c74a35582c85f782e8c268d361a6 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 26 Sep 2017 20:06:12 +0800 Subject: [PATCH 037/342] fix globalPooling type (int => bool) --- paddle/operators/pool_op.cc | 47 ++++++++++++++++++++----------------- paddle/operators/pool_op.h | 8 +++---- 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index a5e731cc66..9959b3ec07 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -35,7 +35,7 @@ class PoolOp : public framework::OperatorWithKernel { auto in_x = ctx.Input("X"); auto out = ctx.Output("Out"); - int global_pooling = Attr("globalPooling"); + bool global_pooling = Attr("globalPooling"); std::string pooling_type = Attr("poolingType"); std::vector ksize = Attr>("ksize"); std::vector strides = Attr>("strides"); @@ -45,6 +45,15 @@ class PoolOp : public framework::OperatorWithKernel { "pooling_type should be 'max' or 'avg'"); PADDLE_ENFORCE(in_x->dims().size() == 4 || in_x->dims().size() == 5, "Pooling intput should be 4-D or 5-D"); + + if (global_pooling) { + ksize.resize(static_cast(in_x->dims().size()) - 2); + for (size_t i = 0; i < ksize.size(); ++i) + ksize[i] = static_cast(in_x->dims()[i + 2]); + } + + PADDLE_ENFORCE(in_x->dims().size() == static_cast(ksize.size() + 2), + "Input size and Pooling size should be consistent."); PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3, "Pooling size should be 2 elements. or 3 elements."); PADDLE_ENFORCE_EQ(ksize.size(), strides.size(), @@ -52,12 +61,6 @@ class PoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(), "paddings size and pooling size should be the same."); - if (global_pooling == 1) { - ksize.resize(static_cast(in_x->dims().size()) - 2); - for (size_t i = 0; i < ksize.size(); ++i) - ksize[i] = static_cast(in_x->dims()[i + 2]); - } - std::vector output_shape({in_x->dims()[0], in_x->dims()[1]}); for (size_t i = 0; i < ksize.size(); ++i) { output_shape.push_back(OutputSizePool(in_x->dims()[i + 2], ksize[i], @@ -103,15 +106,16 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { "poolingType of pooling operator." "str constant equal to 'max' or 'avg'"); AddAttr>( - "ksize", "pooling size(height, width) of pooling operator.") - .AddCustomChecker(GreaterThanChecker_pool({0, 0})); - AddAttr( + "ksize", + "Pooling size(depth, height, width) of pooling operator." + "If globalPooling = true, ksize is ignored and need not be specified."); + AddAttr( "globalPooling", "whether to use the globalPooling." - "int constant equal to 0 or 1" - "default 0" - "If globalPooling = 1, ksize is ignored and need not be specified.") - .SetDefault(0); + "int constant equal to false or true" + "default false" + "If globalPooling = true, ksize is ignored and need not be specified.") + .SetDefault(false); AddAttr>("strides", "strides(height, width) of pooling operator." "default {1,1}") @@ -177,15 +181,16 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { "poolingType of pooling operator." "str constant equal to 'max' or 'avg'"); AddAttr>( - "ksize", "pooling size(depth, height, width) of pooling operator.") - .AddCustomChecker(GreaterThanChecker_pool({0, 0, 0})); - AddAttr( + "ksize", + "pooling size(depth, height, width) of pooling operator." + "If globalPooling = true, ksize is ignored and need not be specified."); + AddAttr( "globalPooling", "whether to use the globalPooling." - "int constant equal to 0 or 1" - "default 0" - "If globalPooling = 1, ksize is ignored and need not be specified.") - .SetDefault(0); + "int constant equal to false or true" + "default false" + "If globalPooling = true, ksize is ignored and need not be specified.") + .SetDefault(false); AddAttr>( "strides", "strides(depth, height, width) of pooling operator." diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 9471282205..73c9721624 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -31,12 +31,12 @@ class PoolKernel : public framework::OpKernel { const Tensor* in_x = context.Input("X"); Tensor* out = context.Output("Out"); - int global_pooling = context.Attr("globalPooling"); + bool global_pooling = context.Attr("globalPooling"); std::string pooling_type = context.Attr("poolingType"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (global_pooling == 1) { + if (global_pooling) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -92,13 +92,13 @@ class PoolGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out")); Tensor* in_x_grad = context.Output(framework::GradVarName("X")); - int global_pooling = context.Attr("globalPooling"); + bool global_pooling = context.Attr("globalPooling"); std::string pooling_type = context.Attr("poolingType"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (global_pooling == 1) { + if (global_pooling) { for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x->dims()[i + 2]); } From 3a5693e0a8689cb9490e4310c0c46e28dcad0514 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 27 Sep 2017 16:16:00 -0700 Subject: [PATCH 038/342] Add Skeleton of Double support --- paddle/framework/data_type.h | 36 +++++++++ paddle/framework/op_registry.h | 5 +- paddle/framework/operator.h | 77 +++++++++++++++---- paddle/framework/tensor.h | 12 +-- paddle/operators/accuracy_op.cu | 2 +- paddle/operators/accuracy_op.h | 2 +- paddle/operators/activation_op.h | 20 ++--- paddle/operators/add_op.h | 2 +- paddle/operators/clip_op.h | 4 +- paddle/operators/concat_op.h | 2 +- paddle/operators/cos_sim_op.h | 4 +- paddle/operators/crop_op.h | 4 +- paddle/operators/cross_entropy_op.cc | 12 +++ paddle/operators/cross_entropy_op.cu | 4 +- paddle/operators/cross_entropy_op.h | 4 +- paddle/operators/dropout_op.cu | 2 +- paddle/operators/dropout_op.h | 4 +- paddle/operators/elementwise_add_op.h | 4 +- paddle/operators/elementwise_div_op.h | 4 +- paddle/operators/elementwise_mul_op.h | 4 +- paddle/operators/elementwise_sub_op.h | 4 +- paddle/operators/fill_zeros_like_op.h | 2 +- paddle/operators/gather_op.h | 4 +- paddle/operators/gaussian_random_op.cc | 2 +- paddle/operators/gaussian_random_op.cu | 2 +- paddle/operators/gemm_conv2d_op.h | 4 +- paddle/operators/lookup_table_op.cu | 4 +- paddle/operators/lookup_table_op.h | 4 +- paddle/operators/lstm_unit_op.cu | 4 +- paddle/operators/lstm_unit_op.h | 4 +- paddle/operators/mean_op.h | 4 +- paddle/operators/minus_op.h | 2 +- paddle/operators/modified_huber_loss_op.cu | 2 +- paddle/operators/modified_huber_loss_op.h | 4 +- paddle/operators/mul_op.h | 4 +- paddle/operators/multiplex_op.cu | 4 +- paddle/operators/multiplex_op.h | 4 +- paddle/operators/pad_op.h | 4 +- paddle/operators/prelu_op.h | 4 +- paddle/operators/rank_loss_op.h | 4 +- paddle/operators/reshape_op.h | 4 +- paddle/operators/rowwise_add_op.h | 4 +- paddle/operators/scale_op.h | 2 +- paddle/operators/scatter_op.h | 4 +- paddle/operators/sequence_pool_op.h | 4 +- paddle/operators/sgd_op.h | 2 +- paddle/operators/smooth_l1_loss_op.h | 4 +- paddle/operators/softmax_op.h | 4 +- .../softmax_with_cross_entropy_op.cu | 4 +- .../operators/softmax_with_cross_entropy_op.h | 4 +- paddle/operators/split_op.h | 2 +- paddle/operators/squared_l2_distance_op.h | 4 +- paddle/operators/sum_op.h | 4 +- paddle/operators/top_k_op.cu | 2 +- paddle/operators/top_k_op.h | 2 +- paddle/operators/transpose_op.h | 4 +- paddle/operators/uniform_random_op.cc | 2 +- paddle/operators/uniform_random_op.cu | 2 +- paddle/platform/place.cc | 2 +- paddle/pybind/tensor_py.h | 8 +- 60 files changed, 217 insertions(+), 129 deletions(-) create mode 100644 paddle/framework/data_type.h diff --git a/paddle/framework/data_type.h b/paddle/framework/data_type.h new file mode 100644 index 0000000000..55e3931f87 --- /dev/null +++ b/paddle/framework/data_type.h @@ -0,0 +1,36 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include "paddle/framework/framework.pb.h" + +namespace paddle { +namespace framework { + +inline DataType ToDataType(std::type_index type) { + if (typeid(float).hash_code() == type.hash_code()) { + return DataType::FP32; + } else if (typeid(double).hash_code() == type.hash_code()) { + return DataType::FP64; + } else if (typeid(int).hash_code() == type.hash_code()) { + return DataType::INT32; + } else { + PADDLE_THROW("Not supported"); + return static_cast(-1); + } +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 90077d0192..0db67e4c67 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -104,8 +104,9 @@ template class OpKernelRegistrar : public Registrar { public: explicit OpKernelRegistrar(const char* op_type) { - OperatorWithKernel::OpKernelKey key; - key.place_ = PlaceType(); + using T = typename KernelType::ELEMENT_TYPE; + OperatorWithKernel::OpKernelKey key(ToDataType(std::type_index(typeid(T))), + PlaceType()); OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KernelType); } }; diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 77c7c855c0..4e81d1eaa9 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "op_info.h" #include "paddle/framework/attribute.h" +#include "paddle/framework/data_type.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/scope.h" @@ -407,7 +408,7 @@ class RuntimeInferShapeContext : public InferShapeContextBase { const Scope& scope_; }; -class OpKernel { +class OpKernelBase { public: /** * ExecutionContext is the only parameter of Kernel Run function. @@ -418,33 +419,47 @@ class OpKernel { virtual void Compute(const ExecutionContext& context) const = 0; - virtual ~OpKernel() {} + virtual ~OpKernelBase() = default; +}; + +template +class OpKernel : public OpKernelBase { + public: + using ELEMENT_TYPE = T; }; class OperatorWithKernel : public OperatorBase { public: struct OpKernelKey { platform::Place place_; + DataType data_type_; - OpKernelKey() = default; - explicit OpKernelKey(const platform::DeviceContext& dev_ctx) { - place_ = dev_ctx.GetPlace(); - } + OpKernelKey(DataType data_type, platform::Place place) + : place_(place), data_type_(data_type) {} + + OpKernelKey(DataType data_type, const platform::DeviceContext& dev_ctx) + : place_(dev_ctx.GetPlace()), data_type_(data_type) {} bool operator==(const OpKernelKey& o) const { - return platform::places_are_same_class(place_, o.place_); + return platform::places_are_same_class(place_, o.place_) && + data_type_ == o.data_type_; } }; struct OpKernelHash { - std::hash hash_; + std::hash hash_; size_t operator()(const OpKernelKey& key) const { - return hash_(platform::is_gpu_place(key.place_)); + int place = key.place_.which(); + int data_type = static_cast(key.data_type_); + // NOTE: Number of places limit to 16. + int pre_hash = data_type << 4 | (place & 0x0F); + return hash_(pre_hash); } }; using OpKernelMap = - std::unordered_map, OpKernelHash>; + std::unordered_map, + OpKernelHash>; OperatorWithKernel(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, const AttributeMap& attrs) @@ -458,8 +473,10 @@ class OperatorWithKernel : public OperatorBase { void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const final { - auto& opKernel = AllOpKernels().at(type_).at(OpKernelKey(dev_ctx)); - opKernel->Compute(ExecutionContext(*this, scope, dev_ctx)); + ExecutionContext ctx(*this, scope, dev_ctx); + auto& opKernel = AllOpKernels().at(type_).at( + OpKernelKey(IndicateDataType(ctx), dev_ctx)); + opKernel->Compute(ctx); } static std::unordered_map& @@ -469,13 +486,43 @@ class OperatorWithKernel : public OperatorBase { } bool SupportGPU() const override { - OperatorWithKernel::OpKernelKey key; - key.place_ = platform::GPUPlace(); - return OperatorWithKernel::AllOpKernels().at(type_).count(key) != 0; + auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_); + return std::any_of(op_kernels.begin(), op_kernels.end(), + [](OpKernelMap::const_reference kern_pair) { + return platform::is_gpu_place(kern_pair.first.place_); + }); } protected: virtual void InferShape(InferShapeContextBase* ctx) const = 0; + + // indicate kernel DataType by input data. Defaultly all input data must be + // same. + virtual DataType IndicateDataType(const ExecutionContext& ctx) const { + auto& scope = ctx.scope(); + int data_type = -1; + for (auto& input : this->inputs_) { + for (auto& ipt_name : input.second) { + auto* var = scope.FindVar(ipt_name); + if (var != nullptr) { + const Tensor* t = nullptr; + if (var->IsType()) { + t = &var->Get(); + } else if (var->IsType()) { + t = &var->Get(); + } + if (t != nullptr) { + int tmp = static_cast(ToDataType(t->type())); + PADDLE_ENFORCE(tmp == data_type || data_type == -1, + "DataType of Paddle Op must be same."); + data_type = tmp; + } + } + } + } + PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input"); + return static_cast(data_type); + } }; } // namespace framework diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index f040c09c08..80a3f0a393 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -29,20 +29,10 @@ limitations under the License. */ namespace paddle { -namespace pybind { -namespace details { -template -struct CastToPyBufferImpl; -} -} // namespace pybind - namespace framework { class Tensor { public: - template - friend struct pybind::details::CastToPyBufferImpl; - template friend struct EigenTensor; @@ -119,6 +109,8 @@ class Tensor { return holder_->place(); } + std::type_index type() const { return holder_->type(); } + private: template inline void check_memory_size() const; diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu index 75e8a98903..0ca9ef941d 100644 --- a/paddle/operators/accuracy_op.cu +++ b/paddle/operators/accuracy_op.cu @@ -47,7 +47,7 @@ __global__ void AccuracyCudaKernel(const int N, const int D, const int* Xdata, } template -class AccuracyOpCUDAKernel : public framework::OpKernel { +class AccuracyOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), diff --git a/paddle/operators/accuracy_op.h b/paddle/operators/accuracy_op.h index fe704efe1c..12c6b9aac8 100644 --- a/paddle/operators/accuracy_op.h +++ b/paddle/operators/accuracy_op.h @@ -35,7 +35,7 @@ template ; template -class AccuracyKernel : public framework::OpKernel { +class AccuracyKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* inference = ctx.Input("Inference"); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 15f8afb4ba..e400992ae2 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class ActivationKernel : public framework::OpKernel { +class ActivationKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -36,7 +36,7 @@ class ActivationKernel : public framework::OpKernel { }; template -class ActivationGradKernel : public framework::OpKernel { +class ActivationGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -202,7 +202,7 @@ struct SquareGradFunctor { }; template -class BReluKernel : public framework::OpKernel { +class BReluKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -219,7 +219,7 @@ class BReluKernel : public framework::OpKernel { }; template -class BReluGradKernel : public framework::OpKernel { +class BReluGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -239,7 +239,7 @@ class BReluGradKernel : public framework::OpKernel { }; template -class SoftReluKernel : public framework::OpKernel { +class SoftReluKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -256,7 +256,7 @@ class SoftReluKernel : public framework::OpKernel { }; template -class SoftReluGradKernel : public framework::OpKernel { +class SoftReluGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -277,7 +277,7 @@ class SoftReluGradKernel : public framework::OpKernel { }; template -class PowKernel : public framework::OpKernel { +class PowKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -293,7 +293,7 @@ class PowKernel : public framework::OpKernel { }; template -class PowGradKernel : public framework::OpKernel { +class PowGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -312,7 +312,7 @@ class PowGradKernel : public framework::OpKernel { }; template -class STanhKernel : public framework::OpKernel { +class STanhKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); @@ -329,7 +329,7 @@ class STanhKernel : public framework::OpKernel { }; template -class STanhGradKernel : public framework::OpKernel { +class STanhGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); diff --git a/paddle/operators/add_op.h b/paddle/operators/add_op.h index a7307b6818..75163032a1 100644 --- a/paddle/operators/add_op.h +++ b/paddle/operators/add_op.h @@ -25,7 +25,7 @@ template ; template -class AddKernel : public framework::OpKernel { +class AddKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* input0 = context.Input("X"); diff --git a/paddle/operators/clip_op.h b/paddle/operators/clip_op.h index ce1d4e1f46..ac702e9935 100644 --- a/paddle/operators/clip_op.h +++ b/paddle/operators/clip_op.h @@ -56,7 +56,7 @@ class ClipGradFunctor { }; template -class ClipKernel : public framework::OpKernel { +class ClipKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto max = context.Attr("max"); @@ -73,7 +73,7 @@ class ClipKernel : public framework::OpKernel { }; template -class ClipGradKernel : public framework::OpKernel { +class ClipGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto max = context.Attr("max"); diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h index f977054fdf..b0801ab062 100644 --- a/paddle/operators/concat_op.h +++ b/paddle/operators/concat_op.h @@ -21,7 +21,7 @@ namespace paddle { namespace operators { template -class ConcatKernel : public framework::OpKernel { +class ConcatKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto ins = ctx.MultiInput("X"); diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index bcf6f758ca..68c56f531f 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -28,7 +28,7 @@ template ; template -class CosSimKernel : public framework::OpKernel { +class CosSimKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { // get Tensor @@ -67,7 +67,7 @@ class CosSimKernel : public framework::OpKernel { }; template -class CosSimGradKernel : public framework::OpKernel { +class CosSimGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { // get Tensor diff --git a/paddle/operators/crop_op.h b/paddle/operators/crop_op.h index ac3aeaf41e..2e72583d68 100644 --- a/paddle/operators/crop_op.h +++ b/paddle/operators/crop_op.h @@ -27,7 +27,7 @@ using EigenTensor = framework::EigenTensor; using framework::Tensor; template -class CropKernel : public framework::OpKernel { +class CropKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); @@ -69,7 +69,7 @@ void CropGradFunction(const framework::ExecutionContext& context) { } template -class CropGradKernel : public framework::OpKernel { +class CropGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { size_t rank = diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 26fc9b51c4..4b67887f36 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -47,6 +47,12 @@ class CrossEntropyOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Y", {x_dims[0], 1}); ctx->ShareLoD("X", /*->*/ "Y"); } + + // CrossEntropy's data type just determined by "X" + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("X")->type()); + } }; class CrossEntropyGradientOp : public framework::OperatorWithKernel { @@ -87,6 +93,12 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); } + + // CrossEntropy's data type just determined by "X" + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("X")->type()); + } }; class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 1cfeb7a53b..76d63f77ad 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -53,7 +53,7 @@ __global__ void SoftCrossEntropyGradientKernel(T* dX, const T* dY, const T* X, } // namespace template -class CrossEntropyOpCUDAKernel : public framework::OpKernel { +class CrossEntropyOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -69,7 +69,7 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel { }; template -class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { +class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index 1f67461d3f..fa81d3b431 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -26,7 +26,7 @@ template ; template -class CrossEntropyOpKernel : public framework::OpKernel { +class CrossEntropyOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), @@ -42,7 +42,7 @@ class CrossEntropyOpKernel : public framework::OpKernel { }; template -class CrossEntropyGradientOpKernel : public framework::OpKernel { +class CrossEntropyGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), diff --git a/paddle/operators/dropout_op.cu b/paddle/operators/dropout_op.cu index a04e4a22cc..30c769000f 100644 --- a/paddle/operators/dropout_op.cu +++ b/paddle/operators/dropout_op.cu @@ -47,7 +47,7 @@ struct MaskGenerator { // Use std::random and thrust::random(thrust is a std library in CUDA) to // implement uniform random. template -class GPUDropoutKernel : public framework::OpKernel { +class GPUDropoutKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); diff --git a/paddle/operators/dropout_op.h b/paddle/operators/dropout_op.h index d57f64afcb..745525fe81 100644 --- a/paddle/operators/dropout_op.h +++ b/paddle/operators/dropout_op.h @@ -26,7 +26,7 @@ template ; template -class CPUDropoutKernel : public framework::OpKernel { +class CPUDropoutKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); @@ -62,7 +62,7 @@ class CPUDropoutKernel : public framework::OpKernel { }; template -class DropoutGradKernel : public framework::OpKernel { +class DropoutGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(context.Attr("is_training"), diff --git a/paddle/operators/elementwise_add_op.h b/paddle/operators/elementwise_add_op.h index e9f78ef26e..f04fe3ec60 100644 --- a/paddle/operators/elementwise_add_op.h +++ b/paddle/operators/elementwise_add_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class ElementwiseAddKernel : public framework::OpKernel { +class ElementwiseAddKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseCompute(ctx); @@ -101,7 +101,7 @@ struct ElementwiseAddBroadCast2GradFunctor { }; template -class ElementwiseAddGradKernel : public framework::OpKernel { +class ElementwiseAddGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, diff --git a/paddle/operators/elementwise_div_op.h b/paddle/operators/elementwise_div_op.h index 99b6d9c199..8946ff3d25 100644 --- a/paddle/operators/elementwise_div_op.h +++ b/paddle/operators/elementwise_div_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class ElementwiseDivKernel : public framework::OpKernel { +class ElementwiseDivKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseCompute(ctx); @@ -103,7 +103,7 @@ struct ElementwiseDivBroadCast2GradFunctor { }; template -class ElementwiseDivGradKernel : public framework::OpKernel { +class ElementwiseDivGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h index 6ab642378b..4469b07eaa 100644 --- a/paddle/operators/elementwise_mul_op.h +++ b/paddle/operators/elementwise_mul_op.h @@ -19,7 +19,7 @@ namespace paddle { namespace operators { template -class ElementwiseMulKernel : public framework::OpKernel { +class ElementwiseMulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseCompute(ctx); @@ -102,7 +102,7 @@ struct ElementwiseMulBroadCast2GradFunctor { }; template -class ElementwiseMulGradKernel : public framework::OpKernel { +class ElementwiseMulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, diff --git a/paddle/operators/elementwise_sub_op.h b/paddle/operators/elementwise_sub_op.h index 3ca1376c73..3f40c1c5bc 100644 --- a/paddle/operators/elementwise_sub_op.h +++ b/paddle/operators/elementwise_sub_op.h @@ -19,7 +19,7 @@ namespace paddle { namespace operators { template -class ElementwiseSubKernel : public framework::OpKernel { +class ElementwiseSubKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseCompute(ctx); @@ -102,7 +102,7 @@ struct ElementwiseSubBroadCast2GradFunctor { }; template -class ElementwiseSubGradKernel : public framework::OpKernel { +class ElementwiseSubGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { ElementwiseGradCompute, diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h index 4474581784..cdf56a723b 100644 --- a/paddle/operators/fill_zeros_like_op.h +++ b/paddle/operators/fill_zeros_like_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class FillZerosLikeKernel : public framework::OpKernel { +class FillZerosLikeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* output = context.Output("Y"); diff --git a/paddle/operators/gather_op.h b/paddle/operators/gather_op.h index 381854f301..073e566e8f 100644 --- a/paddle/operators/gather_op.h +++ b/paddle/operators/gather_op.h @@ -24,7 +24,7 @@ namespace operators { using Tensor = framework::Tensor; template -class GatherOpKernel : public framework::OpKernel { +class GatherOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *X = ctx.Input("X"); @@ -37,7 +37,7 @@ class GatherOpKernel : public framework::OpKernel { }; template -class GatherGradientOpKernel : public framework::OpKernel { +class GatherGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *Index = ctx.Input("Index"); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 05120a6e7b..fc340c181c 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -16,7 +16,7 @@ namespace paddle { namespace operators { template -class CPUGaussianRandomKernel : public framework::OpKernel { +class CPUGaussianRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { float mean = context.Attr("mean"); diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index 2d63b30499..315560bf1b 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -37,7 +37,7 @@ struct GaussianGenerator { }; template -class GPUGaussianRandomKernel : public framework::OpKernel { +class GPUGaussianRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); diff --git a/paddle/operators/gemm_conv2d_op.h b/paddle/operators/gemm_conv2d_op.h index 5c9e81732a..323e3f7c3b 100644 --- a/paddle/operators/gemm_conv2d_op.h +++ b/paddle/operators/gemm_conv2d_op.h @@ -25,7 +25,7 @@ namespace operators { using Tensor = framework::Tensor; template -class GemmConv2DKernel : public framework::OpKernel { +class GemmConv2DKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* input = context.Input("Input"); @@ -98,7 +98,7 @@ class GemmConv2DKernel : public framework::OpKernel { }; template -class GemmConvGrad2DKernel : public framework::OpKernel { +class GemmConvGrad2DKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* input = context.Input("Input"); diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 62f63b4f3c..c3808fa9a8 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -61,7 +61,7 @@ __global__ void LookupTableGrad(T* table, const T* output, const int32_t* ids, } template -class LookupTableCUDAKernel : public framework::OpKernel { +class LookupTableCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto table_t = context.Input("W"); @@ -85,7 +85,7 @@ class LookupTableCUDAKernel : public framework::OpKernel { }; template -class LookupTableGradCUDAKernel : public framework::OpKernel { +class LookupTableGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto ids_t = context.Input("Ids"); diff --git a/paddle/operators/lookup_table_op.h b/paddle/operators/lookup_table_op.h index a1298906dd..dfead2fc5b 100644 --- a/paddle/operators/lookup_table_op.h +++ b/paddle/operators/lookup_table_op.h @@ -23,7 +23,7 @@ namespace operators { using Tensor = framework::Tensor; template -class LookupTableKernel : public framework::OpKernel { +class LookupTableKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto table_t = context.Input("W"); // float tensor @@ -44,7 +44,7 @@ class LookupTableKernel : public framework::OpKernel { }; template -class LookupTableGradKernel : public framework::OpKernel { +class LookupTableGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto ids_t = context.Input("Ids"); diff --git a/paddle/operators/lstm_unit_op.cu b/paddle/operators/lstm_unit_op.cu index 6e5e497899..b1db0d5322 100644 --- a/paddle/operators/lstm_unit_op.cu +++ b/paddle/operators/lstm_unit_op.cu @@ -90,7 +90,7 @@ __global__ void LSTMUnitGradientKernel(const int nthreads, const int dim, } template -class LstmUnitOpCUDAKernel : public framework::OpKernel { +class LstmUnitOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -121,7 +121,7 @@ class LstmUnitOpCUDAKernel : public framework::OpKernel { }; template -class LstmUnitGradOpCUDAKernel : public framework::OpKernel { +class LstmUnitGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), diff --git a/paddle/operators/lstm_unit_op.h b/paddle/operators/lstm_unit_op.h index 683034fe15..0dc9a7d9a7 100644 --- a/paddle/operators/lstm_unit_op.h +++ b/paddle/operators/lstm_unit_op.h @@ -33,7 +33,7 @@ inline T tanh(T x) { } template -class LstmUnitKernel : public framework::OpKernel { +class LstmUnitKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), @@ -76,7 +76,7 @@ class LstmUnitKernel : public framework::OpKernel { }; template -class LstmUnitGradKernel : public framework::OpKernel { +class LstmUnitGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index ce31e178d8..c99286a5b9 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -28,7 +28,7 @@ template ; template -class MeanKernel : public framework::OpKernel { +class MeanKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* input = context.Input("X"); @@ -45,7 +45,7 @@ class MeanKernel : public framework::OpKernel { }; template -class MeanGradKernel : public framework::OpKernel { +class MeanGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto OG = context.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/minus_op.h b/paddle/operators/minus_op.h index 6310a4fd51..bd9a2790aa 100644 --- a/paddle/operators/minus_op.h +++ b/paddle/operators/minus_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class MinusKernel : public framework::OpKernel { +class MinusKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* left_tensor = context.Input("X"); diff --git a/paddle/operators/modified_huber_loss_op.cu b/paddle/operators/modified_huber_loss_op.cu index bce760f95e..8854e166cd 100644 --- a/paddle/operators/modified_huber_loss_op.cu +++ b/paddle/operators/modified_huber_loss_op.cu @@ -39,7 +39,7 @@ struct ModifiedHuberLossBackward { }; template -class ModifiedHuberLossGradGPUKernel : public framework::OpKernel { +class ModifiedHuberLossGradGPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("Y"); diff --git a/paddle/operators/modified_huber_loss_op.h b/paddle/operators/modified_huber_loss_op.h index cb51007749..aba75efad9 100644 --- a/paddle/operators/modified_huber_loss_op.h +++ b/paddle/operators/modified_huber_loss_op.h @@ -47,7 +47,7 @@ struct ModifiedHuberLossForward { }; template -class ModifiedHuberLossKernel : public framework::OpKernel { +class ModifiedHuberLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("X"); @@ -73,7 +73,7 @@ class ModifiedHuberLossKernel : public framework::OpKernel { // CPU backward kernel template -class ModifiedHuberLossGradCPUKernel : public framework::OpKernel { +class ModifiedHuberLossGradCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("Y"); diff --git a/paddle/operators/mul_op.h b/paddle/operators/mul_op.h index ac7136a769..684b1ea0c0 100644 --- a/paddle/operators/mul_op.h +++ b/paddle/operators/mul_op.h @@ -28,7 +28,7 @@ template ; template -class MulKernel : public framework::OpKernel { +class MulKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* x = context.Input("X"); @@ -52,7 +52,7 @@ class MulKernel : public framework::OpKernel { }; template -class MulGradKernel : public framework::OpKernel { +class MulGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { int x_num_col_dims = ctx.template Attr("x_num_col_dims"); diff --git a/paddle/operators/multiplex_op.cu b/paddle/operators/multiplex_op.cu index 505776612e..72b1f96eaf 100644 --- a/paddle/operators/multiplex_op.cu +++ b/paddle/operators/multiplex_op.cu @@ -21,7 +21,7 @@ namespace operators { using Tensor = framework::Tensor; template -class MultiplexGPUKernel : public framework::OpKernel { +class MultiplexGPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto ins = ctx.MultiInput("X"); @@ -51,7 +51,7 @@ class MultiplexGPUKernel : public framework::OpKernel { }; template -class MultiplexGradGPUKernel : public framework::OpKernel { +class MultiplexGradGPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* d_out = ctx.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/multiplex_op.h b/paddle/operators/multiplex_op.h index 637c63a34a..ab3cafaa32 100644 --- a/paddle/operators/multiplex_op.h +++ b/paddle/operators/multiplex_op.h @@ -23,7 +23,7 @@ namespace paddle { namespace operators { template -class MultiplexCPUKernel : public framework::OpKernel { +class MultiplexCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto ins = ctx.MultiInput("X"); @@ -48,7 +48,7 @@ class MultiplexCPUKernel : public framework::OpKernel { }; template -class MultiplexGradCPUKernel : public framework::OpKernel { +class MultiplexGradCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* d_out = ctx.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 2cc3b945ae..9534dbf545 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -47,7 +47,7 @@ void PadFunction(const framework::ExecutionContext& context) { } template -class PadKernel : public framework::OpKernel { +class PadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { int rank = context.Input("X")->dims().size(); @@ -97,7 +97,7 @@ void PadGradFunction(const framework::ExecutionContext& context) { } template -class PadGradKernel : public framework::OpKernel { +class PadGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { size_t rank = diff --git a/paddle/operators/prelu_op.h b/paddle/operators/prelu_op.h index 6b78ed295c..5ad31c2203 100644 --- a/paddle/operators/prelu_op.h +++ b/paddle/operators/prelu_op.h @@ -40,7 +40,7 @@ class PReluFunctor { }; template -class PReluKernel : public framework::OpKernel { +class PReluKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); @@ -77,7 +77,7 @@ class PReluGradFunctor { }; template -class PReluGradKernel : public framework::OpKernel { +class PReluGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* dx = context.Output(framework::GradVarName("X")); diff --git a/paddle/operators/rank_loss_op.h b/paddle/operators/rank_loss_op.h index 7df195ff47..f184d6efcb 100644 --- a/paddle/operators/rank_loss_op.h +++ b/paddle/operators/rank_loss_op.h @@ -21,7 +21,7 @@ namespace paddle { namespace operators { template -class RankLossKernel : public framework::OpKernel { +class RankLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* out_t = ctx.Output("Out"); @@ -42,7 +42,7 @@ class RankLossKernel : public framework::OpKernel { }; template -class RankLossGradKernel : public framework::OpKernel { +class RankLossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* d_left_t = diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 873acf3078..628dfe4c0f 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -21,7 +21,7 @@ namespace paddle { namespace operators { template -class ReshapeKernel : public framework::OpKernel { +class ReshapeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* out = ctx.Output("Out"); @@ -39,7 +39,7 @@ class ReshapeKernel : public framework::OpKernel { }; template -class ReshapeGradKernel : public framework::OpKernel { +class ReshapeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* d_out = ctx.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/rowwise_add_op.h b/paddle/operators/rowwise_add_op.h index 35774b9409..b43e5d868b 100644 --- a/paddle/operators/rowwise_add_op.h +++ b/paddle/operators/rowwise_add_op.h @@ -28,7 +28,7 @@ template ; template -class RowwiseAddKernel : public framework::OpKernel { +class RowwiseAddKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto out = context.Output("Out"); @@ -50,7 +50,7 @@ class RowwiseAddKernel : public framework::OpKernel { }; template -class RowwiseAddGradKernel : public framework::OpKernel { +class RowwiseAddGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* dout = context.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/scale_op.h b/paddle/operators/scale_op.h index 02fbdc52bb..dc6bc76899 100644 --- a/paddle/operators/scale_op.h +++ b/paddle/operators/scale_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class ScaleKernel : public framework::OpKernel { +class ScaleKernel : public framework::OpKernel { public: virtual void Compute(const framework::ExecutionContext& context) const { auto* tensor = context.Output("Out"); diff --git a/paddle/operators/scatter_op.h b/paddle/operators/scatter_op.h index e9595638a8..a8eb54399a 100644 --- a/paddle/operators/scatter_op.h +++ b/paddle/operators/scatter_op.h @@ -24,7 +24,7 @@ namespace operators { using Tensor = framework::Tensor; template -class ScatterOpKernel : public framework::OpKernel { +class ScatterOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *Ref = ctx.Input("Ref"); @@ -40,7 +40,7 @@ class ScatterOpKernel : public framework::OpKernel { }; template -class ScatterGradientOpKernel : public framework::OpKernel { +class ScatterGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { auto *dRef = ctx.Output(framework::GradVarName("Ref")); diff --git a/paddle/operators/sequence_pool_op.h b/paddle/operators/sequence_pool_op.h index cb80586e88..752d714125 100644 --- a/paddle/operators/sequence_pool_op.h +++ b/paddle/operators/sequence_pool_op.h @@ -38,7 +38,7 @@ enum SeqPoolType { }; template -class SequencePoolKernel : public framework::OpKernel { +class SequencePoolKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); @@ -85,7 +85,7 @@ class SequencePoolKernel : public framework::OpKernel { }; template -class SequencePoolGradKernel : public framework::OpKernel { +class SequencePoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index f8888f9c36..a3fe330894 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -25,7 +25,7 @@ template ; template -class SGDOpKernel : public framework::OpKernel { +class SGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto param = ctx.Input("param"); diff --git a/paddle/operators/smooth_l1_loss_op.h b/paddle/operators/smooth_l1_loss_op.h index 0604fb5e1c..39d0070b6c 100644 --- a/paddle/operators/smooth_l1_loss_op.h +++ b/paddle/operators/smooth_l1_loss_op.h @@ -45,7 +45,7 @@ struct SmoothL1LossForward { }; template -class SmoothL1LossKernel : public framework::OpKernel { +class SmoothL1LossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("X"); @@ -115,7 +115,7 @@ struct SmoothL1LossBackward { }; template -class SmoothL1LossGradKernel : public framework::OpKernel { +class SmoothL1LossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("InsideWeight"); diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 7220f486be..9996536454 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -26,7 +26,7 @@ template ; template -class SoftmaxKernel : public framework::OpKernel { +class SoftmaxKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto X = context.Input("X"); @@ -40,7 +40,7 @@ class SoftmaxKernel : public framework::OpKernel { }; template -class SoftmaxGradKernel : public framework::OpKernel { +class SoftmaxGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto Y = context.Input("Y"); diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index 1cf4296dcc..c3086e729e 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -53,7 +53,7 @@ __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, } // namespace template -class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { +class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), @@ -73,7 +73,7 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { }; template -class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { +class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index bf792c1f59..a8b18504e1 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -27,7 +27,7 @@ template ; template -class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { +class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(platform::is_cpu_place(context.GetPlace()), @@ -47,7 +47,7 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { }; template -class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { +class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* out_grad = diff --git a/paddle/operators/split_op.h b/paddle/operators/split_op.h index 860690ee89..bc1b12279e 100644 --- a/paddle/operators/split_op.h +++ b/paddle/operators/split_op.h @@ -21,7 +21,7 @@ namespace paddle { namespace operators { template -class SplitKernel : public framework::OpKernel { +class SplitKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index 097ac04fc0..259ef40296 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -28,7 +28,7 @@ template ; template -class SquaredL2DistanceKernel : public framework::OpKernel { +class SquaredL2DistanceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("X"); @@ -68,7 +68,7 @@ class SquaredL2DistanceKernel : public framework::OpKernel { }; template -class SquaredL2DistanceGradKernel : public framework::OpKernel { +class SquaredL2DistanceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("sub_result"); diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index 0b1e9ebaa3..7e8fbb9e41 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -22,7 +22,7 @@ template ; template -class SumKernel : public framework::OpKernel { +class SumKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto ins = context.MultiInput("X"); @@ -43,7 +43,7 @@ class SumKernel : public framework::OpKernel { }; template -class SumGradKernel : public framework::OpKernel { +class SumGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* input = context.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/top_k_op.cu b/paddle/operators/top_k_op.cu index 53fe505b77..7be6932f1e 100644 --- a/paddle/operators/top_k_op.cu +++ b/paddle/operators/top_k_op.cu @@ -279,7 +279,7 @@ __global__ void KeMatrixTopK(T* output, int output_stride, int* indices, } template -class TopkOpCUDAKernel : public framework::OpKernel { +class TopkOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), diff --git a/paddle/operators/top_k_op.h b/paddle/operators/top_k_op.h index ef66acc1d5..4b248faa12 100644 --- a/paddle/operators/top_k_op.h +++ b/paddle/operators/top_k_op.h @@ -28,7 +28,7 @@ template ; template -class TopkKernel : public framework::OpKernel { +class TopkKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { // Get the top k elements of each row of input tensor diff --git a/paddle/operators/transpose_op.h b/paddle/operators/transpose_op.h index ea299dce72..aaa3f47ab5 100644 --- a/paddle/operators/transpose_op.h +++ b/paddle/operators/transpose_op.h @@ -38,7 +38,7 @@ void EigenTranspose(const framework::ExecutionContext& context, } template -class TransposeKernel : public framework::OpKernel { +class TransposeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input("X"); @@ -73,7 +73,7 @@ class TransposeKernel : public framework::OpKernel { }; template -class TransposeGradKernel : public framework::OpKernel { +class TransposeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* out_grad = diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 2771df5608..878d71802a 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -21,7 +21,7 @@ namespace operators { // Use std::random and thrust::random(thrust is a std library in CUDA) to // implement uniform random. template -class CPUUniformRandomKernel : public framework::OpKernel { +class CPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* tensor = ctx.Output("Out"); diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index 6614b53b3f..5612ce9eb1 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -40,7 +40,7 @@ struct UniformGenerator { // Use std::random and thrust::random(thrust is a std library in CUDA) to // implement uniform random. template -class GPUUniformRandomKernel : public framework::OpKernel { +class GPUUniformRandomKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* tensor = context.Output("Out"); diff --git a/paddle/platform/place.cc b/paddle/platform/place.cc index b31515e1f0..856e54df89 100644 --- a/paddle/platform/place.cc +++ b/paddle/platform/place.cc @@ -47,7 +47,7 @@ bool is_cpu_place(const Place &p) { } bool places_are_same_class(const Place &p1, const Place &p2) { - return is_gpu_place(p1) == is_gpu_place(p2); + return p1.which() == p2.which(); } std::ostream &operator<<(std::ostream &os, const Place &p) { diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index f0d5a6f9ff..10621e90ee 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -42,7 +42,7 @@ template struct CastToPyBufferImpl { using CUR_TYPE = typename std::tuple_element>::type; py::buffer_info operator()(framework::Tensor &tensor) { - if (std::type_index(typeid(CUR_TYPE)) == tensor.holder_->type()) { + if (std::type_index(typeid(CUR_TYPE)) == tensor.type()) { auto dim_vec = framework::vectorize(tensor.dims()); std::vector dims_outside; std::vector strides; @@ -56,13 +56,13 @@ struct CastToPyBufferImpl { prod *= dims_outside[i - 1]; } framework::Tensor dst_tensor; - if (paddle::platform::is_gpu_place(tensor.holder_->place())) { + if (paddle::platform::is_gpu_place(tensor.place())) { dst_tensor.CopyFrom(tensor, platform::CPUPlace()); - } else if (paddle::platform::is_cpu_place(tensor.holder_->place())) { + } else if (paddle::platform::is_cpu_place(tensor.place())) { dst_tensor = tensor; } return py::buffer_info( - dst_tensor.mutable_data(dst_tensor.holder_->place()), + dst_tensor.mutable_data(dst_tensor.place()), sizeof(CUR_TYPE), py::format_descriptor::format(), (size_t)framework::arity(dst_tensor.dims()), dims_outside, strides); } else { From 2c05465d2f0f1134610364508fa73281fd44f1ad Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 27 Sep 2017 17:10:07 -0700 Subject: [PATCH 039/342] Fix unit-tests --- paddle/framework/operator_test.cc | 7 +++++-- paddle/operators/gather_op.cc | 10 ++++++++++ paddle/operators/gaussian_random_op.cc | 7 +++++++ paddle/operators/lookup_table_op.cc | 10 ++++++++++ paddle/operators/multiplex_op.cc | 10 ++++++++++ paddle/operators/scatter_op.cc | 10 ++++++++++ paddle/operators/softmax_with_cross_entropy_op.cc | 11 +++++++++++ paddle/operators/uniform_random_op.cc | 7 +++++++ 8 files changed, 70 insertions(+), 2 deletions(-) diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 8b4bb01a7b..7f0ec90ade 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -116,10 +116,13 @@ class OpWithKernelTest : public OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase* ctx) const override {} + DataType IndicateDataType(const ExecutionContext& ctx) const override { + return DataType::FP32; + } }; template -class CPUKernelTest : public OpKernel { +class CPUKernelTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { std::cout << "this is cpu kernel" << std::endl; @@ -146,7 +149,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker } }; -class CPUKernalMultiInputsTest : public OpKernel { +class CPUKernalMultiInputsTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { auto xs = ctx.op().Inputs("xs"); diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index 0e3cd174ad..da22bd0c52 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -37,6 +37,11 @@ class GatherOp : public framework::OperatorWithKernel { output_dims[0] = batch_size; ctx->SetOutputDim("Out", output_dims); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("X")->type()); + } }; class GatherGradOp : public framework::OperatorWithKernel { @@ -47,6 +52,11 @@ class GatherGradOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContextBase* ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("X")->type()); + } }; class GatherOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index fc340c181c..5cd2c7d2c0 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -56,6 +56,11 @@ class GaussianRandomOp : public framework::OperatorWithKernel { "dims can be one int or array. dims must be set."); ctx->SetOutputDim("Out", framework::make_ddim(temp)); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return static_cast(Attr("data_type")); + } }; class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { @@ -76,6 +81,8 @@ Use to initialize tensor with gaussian random generator. "Random seed of generator." "0 means use system wide seed") .SetDefault(0); + AddAttr("data_type", "output data type") + .SetDefault(framework::DataType::FP32); } }; diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 9b1314bfba..929008fbcb 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -36,6 +36,11 @@ class LookupTableOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", {ids_dims[0], table_dims[1]}); ctx->ShareLoD("Ids", /*->*/ "Out"); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("W")->type()); + } }; class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { @@ -69,6 +74,11 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { auto table_dims = ctx->GetInputDim("W"); ctx->SetOutputDim(framework::GradVarName("W"), table_dims); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("W")->type()); + } }; } // namespace operators diff --git a/paddle/operators/multiplex_op.cc b/paddle/operators/multiplex_op.cc index 9896d269cc..a069127a19 100644 --- a/paddle/operators/multiplex_op.cc +++ b/paddle/operators/multiplex_op.cc @@ -50,6 +50,11 @@ class MultiplexOp : public framework::OperatorWithKernel { } ctx->SetOutputDim("Out", in_dim); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.MultiInput("X")[0]->type()); + } }; class MultiplexOpMaker : public framework::OpProtoAndCheckerMaker { @@ -99,6 +104,11 @@ class MultiplexGradOp : public framework::OperatorWithKernel { } ctx->SetOutputsDim(framework::GradVarName("X"), d_ins); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.MultiInput("X")[0]->type()); + } }; } // namespace operators diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index 3fc4a39ebc..619acfc8b6 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -48,6 +48,11 @@ class ScatterOp : public framework::OperatorWithKernel { } ctx->SetOutputDim("Out", ref_dims); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("X")->type()); + } }; class ScatterGradOp : public framework::OperatorWithKernel { @@ -60,6 +65,11 @@ class ScatterGradOp : public framework::OperatorWithKernel { ctx->GetInputDim("Updates")); ctx->SetOutputDim(framework::GradVarName("Ref"), ctx->GetInputDim("Ref")); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("X")->type()); + } }; class ScatterOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index e2299b2544..de7c532421 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/operators/softmax_with_cross_entropy_op.h" +#include namespace paddle { namespace operators { @@ -115,6 +116,11 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { ctx->ShareLoD("Logits", /*->*/ "Softmax"); ctx->ShareLoD("Logits", /*->*/ "Loss"); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("Logits")->type()); + } }; class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { @@ -149,6 +155,11 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("Logits"), ctx->GetInputDim("Softmax")); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("Logits")->type()); + } }; } // namespace operators diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 878d71802a..97b1d0bed4 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -62,6 +62,11 @@ class UniformRandomOp : public framework::OperatorWithKernel { } ctx->SetOutputDim("Out", framework::make_ddim(temp)); } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return static_cast(Attr("data_type")); + } }; class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { @@ -80,6 +85,8 @@ Used to initialize tensor with uniform random generator. "Random seed of uniform random. " "0 means generate a seed by system") .SetDefault(0); + AddAttr("data_type", "output tensor data type") + .SetDefault(framework::DataType::FP32); } }; } // namespace operators From f1913d46972b11d852f42072eedd5485c721d2c5 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 27 Sep 2017 17:28:12 -0700 Subject: [PATCH 040/342] Change registry, test register double kernel --- paddle/framework/op_registry.h | 34 ++++++++++++++++++++++---- paddle/operators/elementwise_mul_op.cc | 6 +++-- paddle/operators/elementwise_mul_op.cu | 6 +++-- 3 files changed, 37 insertions(+), 9 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 0db67e4c67..804f901dfa 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -100,14 +100,38 @@ class OpRegistrar : public Registrar { } }; -template +template +struct OpKernelRegistrarFunctor; + +template +struct OpKernelRegistrarFunctor { + using KT = typename std::tuple_element>::type; + + void operator()(const char* op_type) const { + using T = typename KT::ELEMENT_TYPE; + OperatorWithKernel::OpKernelKey key(ToDataType(std::type_index(typeid(T))), + PlaceType()); + OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KT); + + constexpr auto size = std::tuple_size>::value; + OpKernelRegistrarFunctor + func; + func(op_type); + } +}; + +template +struct OpKernelRegistrarFunctor { + void operator()(const char* op_type) const {} +}; + +// User can register many kernel in one place. The data type could be different. +template class OpKernelRegistrar : public Registrar { public: explicit OpKernelRegistrar(const char* op_type) { - using T = typename KernelType::ELEMENT_TYPE; - OperatorWithKernel::OpKernelKey key(ToDataType(std::type_index(typeid(T))), - PlaceType()); - OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KernelType); + OpKernelRegistrarFunctor func; + func(op_type); } }; diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index bda5dfe03e..da7765aa6a 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -36,7 +36,9 @@ REGISTER_OP(elementwise_mul, ops::ElementwiseOp, ops::ElementwiseMulOpMaker, elementwise_mul_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_mul, - ops::ElementwiseMulKernel); + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel); REGISTER_OP_CPU_KERNEL( elementwise_mul_grad, - ops::ElementwiseMulGradKernel); + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel); diff --git a/paddle/operators/elementwise_mul_op.cu b/paddle/operators/elementwise_mul_op.cu index da08a75596..056f081d3e 100644 --- a/paddle/operators/elementwise_mul_op.cu +++ b/paddle/operators/elementwise_mul_op.cu @@ -19,7 +19,9 @@ namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL( elementwise_mul, - ops::ElementwiseMulKernel); + ops::ElementwiseMulKernel, + ops::ElementwiseMulKernel); REGISTER_OP_GPU_KERNEL( elementwise_mul_grad, - ops::ElementwiseMulGradKernel); + ops::ElementwiseMulGradKernel, + ops::ElementwiseMulGradKernel); From 1c710053292394154d41db6c44ea808a8feaf65c Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Sun, 10 Sep 2017 15:03:58 -0700 Subject: [PATCH 041/342] Design Doc: Session --- doc/design/session.md | 62 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 doc/design/session.md diff --git a/doc/design/session.md b/doc/design/session.md new file mode 100644 index 0000000000..2e8c0ece7a --- /dev/null +++ b/doc/design/session.md @@ -0,0 +1,62 @@ +# Design Doc: Session + +## Abstract + +This design doc proposes to have an object called *Session* which +encapsulates the environment in which the computation graph is +executed. + +## Background + +A computation graph is executed in an environment which contains the +[scope](./scope.md) and other states. PaddlePaddle used to only have +an implicit global session on which `paddle.eval()` is executed. + +This has the limitation that the user can not create two independent +environments. For example, in reinforcement learning, the user may +want to have a stale model for inference and a fresh model for +training, and only replace the stale model with the fresh model +periodically. Also, we have no concept that can encapsulate a remote +environment that could execute a computation graph. + +## Session + +Session is an object that owns all runtime states such as scope, +reader OP's file handles, connection to a remote PaddlePaddle cluster, +etc. + +Session has two methods: `eval` and `close`. `eval` executes the +target OP in a given graph, and `close` closes the session and +releases all related resources: + +```Python +a = paddle.constant(1.0) +b = paddle.constant(2.0) +c = a + b +sess = paddle.session() +sess.eval(c) +sess.close() +``` + +### Remote Session + +Paddle Cloud will support user creating a remote session pointing to +the Paddle Cloud cluster. The user can send the computation graph to +be executed on the Paddle Cloud. In this way, the user can control a +cluster from her local computer: + +```Python +reader = paddle.reader.recordio("/pfs/home/peter/mnist-train-*") # data stored on Paddle Cloud +image = reader.column(0) +label = reader.column(1) +fc1 = paddle.op.fc(image, size=256, act="sigmoid") +fc2 = paddle.op.fc(fc1, size=10, act="softmax") +cost = paddle.op.cross_entropy(fc2) +opt = paddle.optimizer.sgd(cost) + +remote_config = ... # remote configuration such as endpoint, number of nodes and authentication. +sess = paddle.remoteSession(remote_config) +for i in range(1000): + sess.eval(opt) +sess.close() +``` From 94dfd8649e06108bc0c03e6f53eb43ab13f30332 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Mon, 25 Sep 2017 16:02:58 -0700 Subject: [PATCH 042/342] fix according to comments --- doc/design/session.md | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/doc/design/session.md b/doc/design/session.md index 2e8c0ece7a..dc034c3906 100644 --- a/doc/design/session.md +++ b/doc/design/session.md @@ -6,26 +6,36 @@ This design doc proposes to have an object called *Session* which encapsulates the environment in which the computation graph is executed. +The session is able to distinguish running a graph locally or +remotely, using CPU only or using one or more GPUs. Different sessions +have different runtime environments such as [scopes](./scope.md) and +device contexts. + + ## Background -A computation graph is executed in an environment which contains the -[scope](./scope.md) and other states. PaddlePaddle used to only have -an implicit global session on which `paddle.eval()` is executed. +A computation graph runs in an environment which contains states such +as the scope and device contexts. The current design has an implicit +global session on which `paddle.eval()` is executed. + +Since the user is not able to explicitly switch between runtime +environments such as the scope and the device contexts, the user +cannot run a topology in two independent environments. For example, in +reinforcement learning, the user may want to have a stale model for +inference and a fresh model for training, and only replace the stale +model with the fresh model periodically. Also, we have no concept that +can encapsulate a remote environment that could execute a computation +graph. -This has the limitation that the user can not create two independent -environments. For example, in reinforcement learning, the user may -want to have a stale model for inference and a fresh model for -training, and only replace the stale model with the fresh model -periodically. Also, we have no concept that can encapsulate a remote -environment that could execute a computation graph. +We need a session concept to address above issues. ## Session -Session is an object that owns all runtime states such as scope, +A session is an object that owns all runtime states such as scope, reader OP's file handles, connection to a remote PaddlePaddle cluster, etc. -Session has two methods: `eval` and `close`. `eval` executes the +The session has two methods: `eval` and `close`. `eval` executes the target OP in a given graph, and `close` closes the session and releases all related resources: @@ -51,7 +61,7 @@ image = reader.column(0) label = reader.column(1) fc1 = paddle.op.fc(image, size=256, act="sigmoid") fc2 = paddle.op.fc(fc1, size=10, act="softmax") -cost = paddle.op.cross_entropy(fc2) +cost = paddle.op.cross_entropy(fc2, label) opt = paddle.optimizer.sgd(cost) remote_config = ... # remote configuration such as endpoint, number of nodes and authentication. From f24b5dffc42063ddfe28229fdb242c3df4ec1aa7 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Tue, 26 Sep 2017 18:03:37 -0700 Subject: [PATCH 043/342] Update Session design doc --- doc/design/refactor/session.md | 160 +++++++++++++++++++++++++++++++++ doc/design/session.md | 72 --------------- 2 files changed, 160 insertions(+), 72 deletions(-) create mode 100644 doc/design/refactor/session.md delete mode 100644 doc/design/session.md diff --git a/doc/design/refactor/session.md b/doc/design/refactor/session.md new file mode 100644 index 0000000000..5f58148f01 --- /dev/null +++ b/doc/design/refactor/session.md @@ -0,0 +1,160 @@ +# Design Doc: Session + +## Abstract + +The *session* object encapsulates the environment in which the +computation graph is executed. + +We will have *local* session and *remote* session, they offer the +same [interface](#interface). The local session encapsulates the local +runtime environment and the remote session encapsulates the cluster +runtime envrionment. + +The local runtime envrionment contains: + +1. computation devices (i.e., CPU, GPU) handles, and +1. the [scope](../scope.md) which holds all variables. + +The remote runtime envrionment contains: + +1. computation devices (i.e., CPU and GPU on node 0, 1) in a cluster, + and +1. the distributed [scope](../scope.md) in a cluster which holds all + variables. + +The user can create a remote session on Paddle Cloud and evaluate the +computation graph with it. In this way, the user can control the +remote computation resource in a cluster from his local computer. + + +## Background + +The current design has an implicit global session on which +`paddle.eval()` is executed. The pain point is: + +Since the user is not able to explicitly switch between runtime +environments such as the scope and the device contexts, the user +cannot run a topology in two independent environments. + +For example, in reinforcement learning, the user may want to have a +stale model for inference and a fresh model for training, and only +replace the stale model with the fresh model periodically. + +Furthermore, we have no concept that encapsulates a remote environment +that executes a computation graph. + +We need the session object to address above issues. + + +## Session + +A session is an object that owns the runtime environment. All +computations are executed through `session.eval`. + + +### Interface + +``` +eval( + targets, + feed_dict=None, +) +``` + +Evaluates the target Operations or Variables in `targets`. + +- *targets*: the evaluation targets. Can be a single Operation or + Variable, or a list with the Operations or Variables as elements. + + The value returned by `eval()` has the same shape as the `target` + argument. + + The computation graph is implicitly inferred from the targets. + +- *feed_dict*: a dictionary that contains the tensors which overrides + the edges of the computation graph. + +``` +close() +``` + +Closes the session. Calling this method releases the scope. + + +### Create a Local Session + +``` +session( + gpu_ids=None +) +``` + +Creates a new session. One session owns one scope, so creating +multiple sessions will create different scopes. + +- *gpu_ids*: a single `int` or a list of `int` of the GPU IDs to be + used as the computation devices. If not specified, all avaiable GPUs + will be used. + + +#### Example + +```Python +a = paddle.constant(1.0) +b = paddle.constant(2.0) +c = a + b +sess = paddle.session(gpu_ids=[0,1]) +sess.eval(c) +sess.close() +``` + +### Create a Remote Session + +``` +create_cloud_job( + name, + num_trainer, + mem_per_trainer, + gpu_per_trainer, + cpu_per_trainer, + num_ps, + mem_per_ps, + cpu_per_ps, +) +``` + +Creates a Paddle Cloud job. Fails if the job name exists. + +``` +get_cloud_job( + name +) +``` + +Gets a Paddle Cloud job. + +``` +remote_session( + job +) +``` + +- *job*: the Paddle Cloud job. + +#### Example + +```Python +reader = paddle.reader.recordio("/pfs/home/peter/mnist-train-*") # data stored on Paddle Cloud +image = reader.column(0) +label = reader.column(1) +fc1 = paddle.op.fc(image, size=256, act="sigmoid") +fc2 = paddle.op.fc(fc1, size=10, act="softmax") +cost = paddle.op.cross_entropy(fc2, label) +opt = paddle.optimizer.sgd(cost) + +job = paddle.create_cloud_job("test", 3, "1G", 1, 1, 2, "1G", 1) +sess = paddle.remote_ession(job) +for i in range(1000): + sess.eval(opt) +sess.close() +``` diff --git a/doc/design/session.md b/doc/design/session.md deleted file mode 100644 index dc034c3906..0000000000 --- a/doc/design/session.md +++ /dev/null @@ -1,72 +0,0 @@ -# Design Doc: Session - -## Abstract - -This design doc proposes to have an object called *Session* which -encapsulates the environment in which the computation graph is -executed. - -The session is able to distinguish running a graph locally or -remotely, using CPU only or using one or more GPUs. Different sessions -have different runtime environments such as [scopes](./scope.md) and -device contexts. - - -## Background - -A computation graph runs in an environment which contains states such -as the scope and device contexts. The current design has an implicit -global session on which `paddle.eval()` is executed. - -Since the user is not able to explicitly switch between runtime -environments such as the scope and the device contexts, the user -cannot run a topology in two independent environments. For example, in -reinforcement learning, the user may want to have a stale model for -inference and a fresh model for training, and only replace the stale -model with the fresh model periodically. Also, we have no concept that -can encapsulate a remote environment that could execute a computation -graph. - -We need a session concept to address above issues. - -## Session - -A session is an object that owns all runtime states such as scope, -reader OP's file handles, connection to a remote PaddlePaddle cluster, -etc. - -The session has two methods: `eval` and `close`. `eval` executes the -target OP in a given graph, and `close` closes the session and -releases all related resources: - -```Python -a = paddle.constant(1.0) -b = paddle.constant(2.0) -c = a + b -sess = paddle.session() -sess.eval(c) -sess.close() -``` - -### Remote Session - -Paddle Cloud will support user creating a remote session pointing to -the Paddle Cloud cluster. The user can send the computation graph to -be executed on the Paddle Cloud. In this way, the user can control a -cluster from her local computer: - -```Python -reader = paddle.reader.recordio("/pfs/home/peter/mnist-train-*") # data stored on Paddle Cloud -image = reader.column(0) -label = reader.column(1) -fc1 = paddle.op.fc(image, size=256, act="sigmoid") -fc2 = paddle.op.fc(fc1, size=10, act="softmax") -cost = paddle.op.cross_entropy(fc2, label) -opt = paddle.optimizer.sgd(cost) - -remote_config = ... # remote configuration such as endpoint, number of nodes and authentication. -sess = paddle.remoteSession(remote_config) -for i in range(1000): - sess.eval(opt) -sess.close() -``` From ae3dca770c34d14ec81587eef9798c6a7070ec13 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 27 Sep 2017 19:15:51 -0700 Subject: [PATCH 044/342] Fix CI --- paddle/operators/scatter_op.cc | 4 ++-- paddle/operators/softmax_with_cross_entropy_op.cc | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index 619acfc8b6..cadd8841b6 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -51,7 +51,7 @@ class ScatterOp : public framework::OperatorWithKernel { framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("X")->type()); + return framework::ToDataType(ctx.Input("Ref")->type()); } }; @@ -68,7 +68,7 @@ class ScatterGradOp : public framework::OperatorWithKernel { framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("X")->type()); + return framework::ToDataType(ctx.Input("Ref")->type()); } }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index de7c532421..a76489871f 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -158,7 +158,8 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("Logits")->type()); + return framework::ToDataType( + ctx.Input(framework::GradVarName("Loss"))->type()); } }; From e33b411221577e053ad461350189198284f0628f Mon Sep 17 00:00:00 2001 From: guosheng Date: Thu, 28 Sep 2017 10:53:20 +0800 Subject: [PATCH 045/342] Adapt reduce_op according to up-to-date dev --- paddle/operators/reduce_op.cc | 41 ++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 61b33d4bbd..3ef443d1c7 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -24,20 +24,20 @@ class ReduceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), - "Input(X) of ReduceOp should not be null."); - PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), - "Output(Out) of ReduceOp should not be null."); - auto x_dims = ctx.Input("X")->dims(); + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ReduceOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ReduceOp should not be null."); + auto x_dims = ctx->GetInputDim("X"); auto x_rank = x_dims.size(); PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); - int dim = ctx.Attr("dim"); + int dim = ctx->Attrs().Get("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, "The dim should be in the range [-rank(input), rank(input))."); - bool keep_dim = ctx.Attr("keep_dim"); + bool keep_dim = ctx->Attrs().Get("keep_dim"); auto dims_vector = vectorize(x_dims); if (keep_dim || x_rank == 1) { dims_vector[dim] = 1; @@ -45,10 +45,10 @@ class ReduceOp : public framework::OperatorWithKernel { dims_vector.erase(dims_vector.begin() + dim); } auto out_dims = framework::make_ddim(dims_vector); - ctx.Output("Out")->Resize(out_dims); + ctx->SetOutputDim("Out", out_dims); if (dim != 0) { - // Only pass LoD when not reducing on the first dim - ctx.ShareLoD("X", /*->*/ "Out"); + // Only pass LoD when not reducing on the first dim. + ctx->ShareLoD("X", /*->*/ "Out"); } } }; @@ -58,21 +58,22 @@ class ReduceGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null."); - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null."); - auto x_dims = ctx.Input("X")->dims(); + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null."); + auto x_dims = ctx->GetInputDim("X"); auto x_rank = x_dims.size(); PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); - int dim = ctx.Attr("dim"); + int dim = ctx->Attrs().Get("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, "The dim should be in the range [-rank(input), rank(input))."); - auto *x_grad = - ctx.Output(framework::GradVarName("X")); - if (x_grad) x_grad->Resize(x_dims); + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } } }; From 05ed8ee8ab35c5861a187deeca076322a2f9de34 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 28 Sep 2017 06:30:34 +0000 Subject: [PATCH 046/342] Add SoftmaxGradFunctor, and use SoftmaxGradFunctor in softmax_op instead. --- paddle/operators/CMakeLists.txt | 4 +-- paddle/operators/math/CMakeLists.txt | 9 +++-- paddle/operators/math/softmax.cc | 19 ++++++----- paddle/operators/math/softmax.cu | 19 ++++++----- paddle/operators/math/softmax.h | 49 +++++++++++++++++++++++----- paddle/operators/softmax_op.h | 31 +++++------------- 6 files changed, 74 insertions(+), 57 deletions(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e56895c63a..da39c2cb55 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -94,8 +94,8 @@ set(DEPS_OPS op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS framework_proto tensor net_op) op_library(cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op) -op_library(cross_entropy_op DEPS cross_entropy_function) -op_library(softmax_with_cross_entropy_op DEPS cross_entropy_function softmax_function) +op_library(cross_entropy_op DEPS cross_entropy) +op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 91ae3d49f1..b60e945aa8 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,15 +1,14 @@ if(WITH_GPU) nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu DEPS cblas device_context operator) - nv_library(softmax_function SRCS softmax.cc softmax.cu - DEPS operator) - nv_library(cross_entropy_function SRCS cross_entropy.cc cross_entropy.cu + nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator) + nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator) else() cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator) - cc_library(softmax_function SRCS softmax.cc DEPS operator) - cc_library(cross_entropy_function SRCS cross_entropy.cc DEPS operator) + cc_library(softmax SRCS softmax.cc DEPS operator) + cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator) endif() nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/softmax.cc b/paddle/operators/math/softmax.cc index ac9f3c4bf6..0ba8197ab8 100644 --- a/paddle/operators/math/softmax.cc +++ b/paddle/operators/math/softmax.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/math/softmax.h" @@ -19,6 +19,7 @@ namespace operators { namespace math { template class SoftmaxFunctor; +template class SoftmaxGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/softmax.cu b/paddle/operators/math/softmax.cu index 4c3df0550e..99f988d51e 100644 --- a/paddle/operators/math/softmax.cu +++ b/paddle/operators/math/softmax.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU @@ -21,6 +21,7 @@ namespace operators { namespace math { template class SoftmaxFunctor; +template class SoftmaxGradFunctor; } // namespace math } // namespace operators diff --git a/paddle/operators/math/softmax.h b/paddle/operators/math/softmax.h index 3d2f0d0aec..3c05a86bce 100644 --- a/paddle/operators/math/softmax.h +++ b/paddle/operators/math/softmax.h @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" @@ -68,6 +68,37 @@ class SoftmaxFunctor { .broadcast(one_by_class)); } }; + +template +class SoftmaxGradFunctor { + public: + void operator()(const framework::ExecutionContext& context, + const framework::Tensor* y, const framework::Tensor* y_grad, + framework::Tensor* x_grad) { + auto softmax = EigenMatrix::From(*y); + auto softmax_grad = EigenMatrix::From(*y_grad); + auto logits_grad = EigenMatrix::From(*x_grad); + + const int kBatchDim = 0; + const int kClassDim = 1; + + const int batch_size = softmax.dimension(kBatchDim); + const int num_classes = softmax.dimension(kClassDim); + + Eigen::DSizes along_class(kClassDim); + Eigen::DSizes batch_by_one(batch_size, 1); + Eigen::DSizes one_by_class(1, num_classes); + + auto dot = (softmax * softmax_grad) + .sum(along_class) + .eval() + .reshape(batch_by_one) + .broadcast(one_by_class); + logits_grad.device(context.GetEigenDevice()) = + (softmax_grad - dot) * softmax; + } +}; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 7220f486be..3d35507a9a 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -29,8 +29,8 @@ template class SoftmaxKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto X = context.Input("X"); - auto Y = context.Output("Y"); + auto* X = context.Input("X"); + auto* Y = context.Output("Y"); // allocate memory on device. Y->mutable_data(context.GetPlace()); @@ -43,29 +43,14 @@ template class SoftmaxGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto Y = context.Input("Y"); - auto dY = context.Input(framework::GradVarName("Y")); - auto dX = context.Output(framework::GradVarName("X")); - dX->mutable_data(context.GetPlace()); - - const int batch_size = Y->dims()[0]; - const int class_num = Y->dims()[1]; - - Eigen::DSizes along_class(1); - Eigen::DSizes batch_by_one(batch_size, 1); - Eigen::DSizes one_by_class(1, class_num); + auto* Y = context.Input("Y"); + auto* dY = context.Input(framework::GradVarName("Y")); + auto* dX = context.Output(framework::GradVarName("X")); - auto Y_eigen = EigenMatrix::From(*Y); - auto dY_eigen = EigenMatrix::From(*dY); - auto dX_eigen = EigenMatrix::From(*dX); - auto place = context.GetEigenDevice(); + // allocate memory on device. + dX->mutable_data(context.GetPlace()); - auto dot = (Y_eigen * dY_eigen) - .sum(along_class) - .eval() - .reshape(batch_by_one) - .broadcast(one_by_class); - dX_eigen.device(place) = (dY_eigen - dot) * Y_eigen; + math::SoftmaxGradFunctor()(context, Y, dY, dX); } }; From 03897f251dc40ae3ded98a84caa3b40fed164de9 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 28 Sep 2017 06:39:23 +0000 Subject: [PATCH 047/342] Finish the SequenceSoftmaxGradKernel, using SoftmaxGradFunctor. --- paddle/operators/mul_op.cc | 32 ++++---- paddle/operators/sequence_softmax_op.cc | 79 ++++++++++++------- paddle/operators/sequence_softmax_op.h | 53 ++++++++++--- .../tests/test_sequence_softmax_op.py | 5 +- 4 files changed, 111 insertions(+), 58 deletions(-) diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 9858c4d9c2..3c8fe04d2e 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include "paddle/operators/mul_op.h" @@ -35,12 +35,14 @@ class MulOp : public framework::OperatorWithKernel { int x_num_col_dims = ctx->Attrs().Get("x_num_col_dims"); int y_num_col_dims = ctx->Attrs().Get("y_num_col_dims"); - PADDLE_ENFORCE(x_dims.size() > x_num_col_dims, - "The rank of input tensor X should be larger than " - "`mul_op`'s `x_num_col_dims`."); - PADDLE_ENFORCE(y_dims.size() > y_num_col_dims, - "The rank of input tensor Y should be larger than " - "`mul_op`'s `y_num_col_dims`."); + PADDLE_ENFORCE_GT( + x_dims.size(), x_num_col_dims, + "The input tensor X's rank of MulOp should be larger than " + "x_num_col_dims."); + PADDLE_ENFORCE_GT( + y_dims.size(), y_num_col_dims, + "The input tensor Y's rank of MulOp should be larger than " + "y_num_col_dims."); auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims); auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims); diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc index 58ef77b1a3..e85b587a94 100644 --- a/paddle/operators/sequence_softmax_op.cc +++ b/paddle/operators/sequence_softmax_op.cc @@ -22,41 +22,42 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL( - ctx.InputVar("X"), "Input(X) of SequenceSoftmaxOp should not be null."); - PADDLE_ENFORCE_NOT_NULL( - ctx.OutputVar("Out"), - "Output(Out) of SequenceSoftmaxOp should not be null."); - - auto *x = ctx.Input("X"); - auto lod = x->lod(); - auto dims = x->dims(); - PADDLE_ENFORCE_GE( - dims[0], - /* batch_size */ static_cast(lod[0].size() - 1), - "The first dimension of Input(X) should be larger than batch size."); - - const size_t level = lod.size() - 1; - PADDLE_ENFORCE_EQ(x->numel(), static_cast(lod[level].back()), - "The width of each timestep in Input(X) of " - "SequenceSoftmaxOp should be 1."); - - std::cout << DebugString() << std::endl; - - ctx.Output("Out")->Resize({dims}); + void InferShape(framework::InferShapeContextBase* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SequenceSoftmaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SequenceSoftmaxOp should not be null."); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); } }; class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceSoftmaxOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + SequenceSoftmaxOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(LoDTensor)"); - AddOutput("Out", "(LoDTensor)"); + AddInput("X", + "(LoDTensor) 1-D or 2-D input LoDTensor with the 2-nd dimension " + "of length 1."); + AddOutput("Out", + "(LoDTensor) 1-D or 2-D output LoDTensor with the 2-nd dimension " + "of length 1."); AddComment(R"DOC( -Softmax of Sequence. +SequenceSoftmaxOp computes softmax activation among all time-steps for each +sequences. The dimension of each time-step should be 1. Thus, the shape of +input Tensor can be either [N, 1] or [N], where N is the sum of all sequences' +length. + +Equation: + for i-th sequence in mini-batch: + Out(X[lod[i]:lod[i+1]], :) = + exp(X[lod[i]:lod[i+1], :]) / sum(exp(X[lod[i]:lod[i+1], :])) + +For example, for a mini-batch of 3 sequences with variable-length, +each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7], +then softmax will be computed among X[0:2, :], X[2:5, :], X[2:7, :] +and N turns out to be 7. )DOC"); } }; @@ -66,7 +67,25 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext &ctx) const override {} + void InferShape(framework::InferShapeContextBase* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Out"), + "Input(Out) of SequenceSoftmaxGradOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) of SequenceSoftmaxGradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SequenceSoftmaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), + "Output(X@GRAD) of SequenceSoftmaxOp should not be null."); + + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Out"), + ctx->GetInputDim(framework::GradVarName("Out")), + "Input(Out) and Input(Out@GRAD) of SequenceSoftmaxGradOp should be of " + "the same shape."); + + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } }; } // namespace operators @@ -81,4 +100,4 @@ REGISTER_OP_CPU_KERNEL( ops::SequenceSoftmaxKernel); REGISTER_OP_CPU_KERNEL( sequence_softmax_grad, - ops::SequenceSoftmaxGradKernel); + ops::SequenceSoftmaxGradKernel); diff --git a/paddle/operators/sequence_softmax_op.h b/paddle/operators/sequence_softmax_op.h index f39c2ec6c3..ca5cef4fc6 100644 --- a/paddle/operators/sequence_softmax_op.h +++ b/paddle/operators/sequence_softmax_op.h @@ -16,19 +16,13 @@ limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" -#include "paddle/operators/math/softmax_function.h" +#include "paddle/operators/math/softmax.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -template -using EigenVector = framework::EigenVector; -template -using EigenMatrix = framework::EigenMatrix; template class SequenceSoftmaxKernel : public framework::OpKernel { @@ -38,7 +32,17 @@ class SequenceSoftmaxKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = x->lod(); + auto dims = x->dims(); + + PADDLE_ENFORCE_GE( + dims[0], + /* batch_size */ static_cast(lod[0].size() - 1), + "The first dimension of Input(X) should be larger than batch size."); + const size_t level = lod.size() - 1; + PADDLE_ENFORCE_EQ(x->numel(), static_cast(lod[level].back()), + "The width of each timestep in Input(X) of " + "SequenceSoftmaxOp should be 1."); out->mutable_data(ctx.GetPlace()); for (int i = 0; i < static_cast(lod[level].size()) - 1; ++i) { @@ -48,10 +52,10 @@ class SequenceSoftmaxKernel : public framework::OpKernel { Tensor out_i = out->Slice(start_pos, end_pos); // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) - framework::DDim dims = framework::make_ddim({1UL, end_pos - start_pos}); - x_i.Resize(dims); - out_i.Resize(dims); - math::SoftmaxFunctor()(&x_i, &out_i, ctx); + framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); + x_i.Resize(dims_i); + out_i.Resize(dims_i); + math::SoftmaxFunctor()(ctx, &x_i, &out_i); } } }; @@ -59,7 +63,32 @@ class SequenceSoftmaxKernel : public framework::OpKernel { template class SequenceSoftmaxGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override {} + void Compute(const framework::ExecutionContext& ctx) const override { + auto* out = ctx.Input("Out"); + auto* out_grad = ctx.Input(framework::GradVarName("Out")); + auto* x = ctx.Input("X"); + auto* x_grad = ctx.Output(framework::GradVarName("X")); + + auto lod = x->lod(); + const size_t level = lod.size() - 1; + + x_grad->mutable_data(ctx.GetPlace()); + for (int i = 0; i < static_cast(lod[level].size()) - 1; ++i) { + int start_pos = static_cast(lod[level][i]); + int end_pos = static_cast(lod[level][i + 1]); + + Tensor out_i = out->Slice(start_pos, end_pos); + Tensor out_grad_i = out_grad->Slice(start_pos, end_pos); + Tensor x_grad_i = x_grad->Slice(start_pos, end_pos); + + // Reshape from (end_pos - start_pos) x 1UL to 1UL x (end_pos - start_pos) + framework::DDim dims_i = framework::make_ddim({1UL, end_pos - start_pos}); + out_i.Resize(dims_i); + out_grad_i.Resize(dims_i); + x_grad_i.Resize(dims_i); + math::SoftmaxGradFunctor()(ctx, &out_i, &out_grad_i, &x_grad_i); + } + } }; } // namespace operators diff --git a/python/paddle/v2/framework/tests/test_sequence_softmax_op.py b/python/paddle/v2/framework/tests/test_sequence_softmax_op.py index d0667c1308..b54a56aa6d 100644 --- a/python/paddle/v2/framework/tests/test_sequence_softmax_op.py +++ b/python/paddle/v2/framework/tests/test_sequence_softmax_op.py @@ -5,7 +5,7 @@ from op_test import OpTest def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - np.max(x) + shiftx = x - np.max(x).clip(-64.) exps = np.exp(shiftx) return exps / np.sum(exps) @@ -30,6 +30,9 @@ class TestSequenceSoftmaxOp(OpTest): def test_check_output(self): self.check_output() + def test_check_grad(self): + self.check_grad(["X"], "Out", max_relative_error=0.01) + if __name__ == "__main__": unittest.main() From 6326c40d2709da9839edbf3fb2a280ca92804a23 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 28 Sep 2017 09:49:00 +0800 Subject: [PATCH 048/342] Add max pool with index --- paddle/operators/CMakeLists.txt | 6 + paddle/operators/math/CMakeLists.txt | 4 +- paddle/operators/math/pooling.cc | 255 ++++++++++++ paddle/operators/math/pooling.cu | 387 ++++++++++++++++++ paddle/operators/math/pooling.h | 68 +++ paddle/operators/pool_with_index_op.cc | 198 +++++++++ paddle/operators/pool_with_index_op.cu | 31 ++ paddle/operators/pool_with_index_op.h | 99 +++++ .../v2/framework/tests/test_pool_max_op.py | 125 ++++++ 9 files changed, 1171 insertions(+), 2 deletions(-) create mode 100644 paddle/operators/math/pooling.cc create mode 100644 paddle/operators/math/pooling.cu create mode 100644 paddle/operators/math/pooling.h create mode 100644 paddle/operators/pool_with_index_op.cc create mode 100644 paddle/operators/pool_with_index_op.cu create mode 100644 paddle/operators/pool_with_index_op.h create mode 100644 python/paddle/v2/framework/tests/test_pool_max_op.py diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e56895c63a..0feb969c62 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -62,6 +62,12 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP(sigmoid);\n") endif() + if ("${TARGET}" STREQUAL "pool_with_index_op") + set(pybind_flag 1) + # It's enough to just adding one operator to pybind + file(APPEND ${pybind_file} "USE_OP(maxPool2dWithIndex);\n") + endif() + # pybind USE_NO_KERNEL_OP file(READ ${TARGET}.cc TARGET_CONTENT) string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}") diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 91ae3d49f1..811deb4c2c 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,12 +1,12 @@ if(WITH_GPU) nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc - im2col.cu DEPS cblas device_context operator) + im2col.cu pooling.cc pooling.cu DEPS cblas device_context operator) nv_library(softmax_function SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy_function SRCS cross_entropy.cc cross_entropy.cu DEPS operator) else() - cc_library(math_function SRCS math_function.cc im2col.cc + cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc DEPS cblas device_context operator) cc_library(softmax_function SRCS softmax.cc DEPS operator) cc_library(cross_entropy_function SRCS cross_entropy.cc DEPS operator) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc new file mode 100644 index 0000000000..0e4d9007a6 --- /dev/null +++ b/paddle/operators/math/pooling.cc @@ -0,0 +1,255 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/pooling.h" + +namespace paddle { +namespace operators { +namespace math { + +template +class MaxPool2dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context.GetPlace()); + + T* mask_data = mask.mutable_data(context.GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + + T ele = static_cast(-FLT_MAX); + int index = -1; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (ele < input_data[h * input_width + w]) { + ele = input_data[h * input_width + w]; + index = h * input_width + w; + } + } + } + output_data[ph * output_width + pw] = ele; + mask_data[ph * output_width + pw] = index; + } + } + // offset + input_data += input_stride; + output_data += output_stride; + mask_data += output_stride; + } + } + } +}; + +template +class MaxPool2dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input_grad.dims()[0]; + const int input_height = input_grad.dims()[2]; + const int input_width = input_grad.dims()[3]; + const int output_channels = output_grad.dims()[1]; + const int output_height = output_grad.dims()[2]; + const int output_width = output_grad.dims()[3]; + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T* mask_data = mask.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + for (size_t n = 0; n < batch_size; ++n) { + for (size_t c = 0; c < output_channels; ++c) { + for (size_t ph = 0; ph < output_height; ++ph) { + for (size_t pw = 0; pw < output_width; ++pw) { + const size_t output_idx = ph * output_width + pw; + const size_t input_idx = static_cast(mask_data[output_idx]); + + input_grad_data[input_idx] += output_grad_data[output_idx]; + } + } + } + // offset + input_grad_data += input_stride; + output_grad_data += output_stride; + mask_data += output_stride; + } + } +}; + +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; + +template +class MaxPool3dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + const int input_stride = input_depth * input_height * input_width; + const int output_stride = output_depth * output_height * output_width; + const T* input_data = input.data(); + T* output_data = output.mutable_data(context.GetPlace()); + T* mask_data = mask.mutable_data(context.GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int pd = 0; pd < output_depth; ++pd) { + int dstart = pd * stride_depth - padding_depth; + int dend = std::min(dstart + ksize_depth, input_depth); + dstart = std::max(dstart, 0); + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + int output_idx = (pd * output_height + ph) * output_width + pw; + T ele = static_cast(-FLT_MAX); + int index = -1; + for (int d = dstart; d < dend; ++d) { + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (ele < + input_data[(d * input_height + h) * input_width + w]) { + index = (d * input_height + h) * input_width + w; + ele = + input_data[(d * input_height + h) * input_width + w]; + } + } + } + } + output_data[output_idx] = ele; + mask_data[output_idx] = index; + } + } + } + // offset + input_data += input_stride; + output_data += output_stride; + mask_data += output_stride; + } + } + } +}; + +template +class MaxPool3dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input_grad.dims()[0]; + const int input_depth = input_grad.dims()[2]; + const int input_height = input_grad.dims()[3]; + const int input_width = input_grad.dims()[4]; + const int output_channels = output_grad.dims()[1]; + const int output_depth = output_grad.dims()[2]; + const int output_height = output_grad.dims()[3]; + const int output_width = output_grad.dims()[4]; + const int input_stride = input_depth * input_height * input_width; + const int output_stride = output_depth * output_height * output_width; + + const T* mask_data = mask.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + for (size_t n = 0; n < batch_size; ++n) { + for (size_t c = 0; c < output_channels; ++c) { + for (size_t pd = 0; pd < output_depth; ++pd) { + for (size_t ph = 0; ph < output_height; ++ph) { + for (size_t pw = 0; pw < output_width; ++pw) { + const size_t output_idx = + (pd * output_height + ph) * output_width + pw; + const size_t input_idx = + static_cast(mask_data[output_idx]); + + input_grad_data[input_idx] += output_grad_data[output_idx]; + } + } + } + // offset + input_grad_data += input_stride; + output_grad_data += output_stride; + mask_data += output_stride; + } + } + } +}; + +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu new file mode 100644 index 0000000000..f32e6a26d0 --- /dev/null +++ b/paddle/operators/math/pooling.cu @@ -0,0 +1,387 @@ +/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/pooling.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { + +template +__global__ void KernelMaxPool2dWithIdxForward( + const int nthreads, const T* input_data, T* output_data, T* mask_data, + const int channels, const int input_height, const int input_width, + const int output_height, const int output_width, const int ksize_height, + const int ksize_width, const int stride_height, const int stride_width, + const int padding_height, const int padding_width) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int pw = index % output_width; + int ph = (index / output_width) % output_height; + int c = (index / output_width / output_height) % channels; + int batch_idx = index / output_width / output_height / channels; + + int hstart = ph * stride_height - padding_height; + int hend = min(hstart + ksize_height, input_height); + hstart = max(hstart, 0); + + int wstart = pw * stride_width - padding_width; + int wend = min(wstart + ksize_width, input_width); + wstart = max(wstart, 0); + + input_data += (batch_idx * channels + c) * input_height * input_width; + T ele = -FLT_MAX; + int index = -1; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (ele < input_data[h * input_width + w]) { + index = h * input_width + w; + ele = input_data[h * input_width + w]; + } + } + } + output_data[index] = ele; + mask_data[index] = index; + } +} + +template +__global__ void KernelMaxPool2DWithIdxBackward( + const int nthreads, T* input_grad, const T* output_grad, const T* mask_data, + const int channels, const int input_height, const int input_width, + const int output_height, const int output_width, const int ksize_height, + const int ksize_width, const int stride_height, const int stride_width, + const int padding_height, const int padding_width) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < nthreads) { + int offsetW = index % input_width + padding_width; + int offsetH = (index / input_width) % input_height + padding_height; + int offsetC = (index / input_width / input_height) % channels; + int batch_idx = index / input_width / input_height / channels; + + int phstart = (offsetH < ksize_height) + ? 0 + : (offsetH - ksize_height) / stride_height + 1; + int pwstart = (offsetW < ksize_width) + ? 0 + : (offsetW - ksize_width) / stride_width + 1; + int phend = min(offsetH / stride_height + 1, output_height); + int pwend = min(offsetW / stride_width + 1, output_width); + T gradient = 0; + int output_idx = + (batch_idx * channels + offsetC) * output_height * output_width; + mask_data += output_idx; + output_grad += output_idx; + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if ((offsetH * input_width + offsetW) == + mask_data[ph * output_width + pw]) + gradient += output_grad[ph * output_width + pw]; + } + } + input_grad[index] = gradient; + } +} + +template +class MaxPool2dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context.GetPlace()); + T* mask_data = mask.mutable_data(context.GetPlace()); + + int nthreads = batch_size * output_channels * output_height * output_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelMaxPool2dWithIdxForward< + T><<(context) + .stream()>>>(nthreads, input_data, output_data, mask_data, + input_channels, input_height, input_width, + output_height, output_width, ksize_height, + ksize_width, stride_height, stride_width, + padding_height, padding_width); + } +}; + +template +class MaxPool2dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input_grad.dims()[0]; + const int input_channels = input_grad.dims()[1]; + const int input_height = input_grad.dims()[2]; + const int input_width = input_grad.dims()[3]; + const int output_channels = output_grad.dims()[1]; + const int output_height = output_grad.dims()[2]; + const int output_width = output_grad.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + + const T* mask_data = mask.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + int nthreads = batch_size * input_channels * input_height * input_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelMaxPool2DWithIdxBackward< + T><<(context) + .stream()>>>(nthreads, input_grad_data, output_grad_data, + mask_data, input_channels, input_height, + input_width, output_height, output_width, + ksize_height, ksize_width, stride_height, + stride_width, padding_height, padding_width); + } +}; + +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; + +template +__global__ void KernelMaxPool3DWithIdxForward( + const int nthreads, const T* input_data, T* output_data, T* mask_data, + const int channels, const int input_depth, const int input_height, + const int input_width, const int output_depth, const int output_height, + const int output_width, const int ksize_depth, const int ksize_height, + const int ksize_width, const int stride_depth, const int stride_height, + const int stride_width, const int padding_depth, const int padding_height, + const int padding_width) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + index += blockDim.x * gridDim.x) { + int pw = index % output_width; + int ph = (index / output_width) % output_height; + int pd = (index / output_width / output_height) % output_depth; + int c = (index / output_width / output_height / output_depth) % channels; + int batch_idx = + index / output_width / output_height / output_depth / channels; + int dstart = pd * stride_depth - padding_depth; + int hstart = ph * stride_height - padding_height; + int wstart = pw * stride_width - padding_width; + int dend = min(dstart + ksize_depth, input_depth); + int hend = min(hstart + ksize_height, input_height); + int wend = min(wstart + ksize_width, input_width); + dstart = max(dstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + T ele = -FLT_MAX; + int index = -1; + input_data += + (batch_idx * channels + c) * input_depth * input_height * input_width; + + for (int d = dstart; d < dend; ++d) { + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (ele < input_data[(d * input_height + h) * input_width + w]) { + index = (d * input_height + h) * input_width + w; + ele = input_data[(d * input_height + h) * input_width + w]; + } + } + } + } + output_data[index] = ele; + mask_data[index] = index; + } +} + +template +__global__ void KernelMaxPool3DWithIdxBackward( + const int nthreads, T* input_grad, const T* output_grad, const T* mask, + const int channels, const int input_depth, const int input_height, + const int input_width, const int output_depth, const int output_height, + const int output_width, const int ksize_depth, const int ksize_height, + const int ksize_width, const int stride_depth, const int stride_height, + const int stride_width, const int padding_depth, const int padding_height, + const int padding_width) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + index += blockDim.x * gridDim.x) { + int offsetW = index % input_width + padding_width; + int offsetH = (index / input_width) % input_height + padding_height; + int offsetD = + (index / input_width / input_height) % input_depth + padding_depth; + int offsetC = (index / input_width / input_height / input_depth) % channels; + int batch_idx = index / input_width / input_height / input_depth / channels; + + int pdstart = (offsetD < ksize_depth) + ? 0 + : (offsetD - ksize_depth) / stride_depth + 1; + int phstart = (offsetH < ksize_height) + ? 0 + : (offsetH - ksize_height) / stride_height + 1; + int pwstart = (offsetW < ksize_width) + ? 0 + : (offsetW - ksize_width) / stride_width + 1; + int pdend = min((offsetD) / stride_depth + 1, output_depth); + int phend = min((offsetH) / stride_height + 1, output_height); + int pwend = min((offsetW) / stride_width + 1, output_width); + + T gradient = 0; + int output_idx = (batch_idx * channels + offsetC) * output_depth * + output_height * output_width; + mask += output_idx; + output_grad += output_idx; + + for (int pd = pdstart; pd < pdend; ++pd) { + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (((offsetD * input_height + offsetH) * input_width + offsetW) == + mask[(pd * output_height + ph) * output_width + pw]) + gradient += + output_grad[(pd * output_height + ph) * output_width + pw]; + } + } + } + input_grad[index] = gradient; + } +} + +template +class MaxPool3dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_channels = input.dims()[1]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context.GetPlace()); + T* mask_data = output.mutable_data(context.GetPlace()); + + int nthreads = batch_size * output_channels * output_depth * output_height * + output_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelMaxPool3DWithIdxForward< + T><<(context) + .stream()>>>( + nthreads, input_data, output_data, mask_data, input_channels, + input_depth, input_height, input_width, output_depth, output_height, + output_width, ksize_depth, ksize_height, ksize_width, stride_depth, + stride_height, stride_width, padding_depth, padding_height, + padding_width); + } +}; + +template +class MaxPool3dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input_grad.dims()[0]; + const int input_channels = input_grad.dims()[1]; + const int input_depth = input_grad.dims()[2]; + const int input_height = input_grad.dims()[3]; + const int input_width = input_grad.dims()[4]; + const int output_channels = input_grad.dims()[1]; + const int output_depth = input_grad.dims()[2]; + const int output_height = input_grad.dims()[3]; + const int output_width = input_grad.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + + const T* output_grad_data = output_grad.data(); + const T* mask_data = mask.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + int nthreads = + batch_size * input_channels * input_depth * input_height * input_width; + int blocks = (nthreads + 1024 - 1) / 1024; + dim3 threads(1024, 1); + dim3 grid(blocks, 1); + + KernelMaxPool3DWithIdxBackward< + T><<(context) + .stream()>>>( + nthreads, input_grad_data, output_grad_data, mask_data, input_channels, + input_depth, input_height, input_width, output_depth, output_height, + output_width, ksize_depth, ksize_height, ksize_width, stride_depth, + stride_height, stride_width, padding_depth, padding_height, + padding_width); + } +}; + +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h new file mode 100644 index 0000000000..3a05cd98fe --- /dev/null +++ b/paddle/operators/math/pooling.h @@ -0,0 +1,68 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/device_context.h" +#include "paddle/platform/hostdevice.h" + +namespace paddle { +namespace operators { +namespace math { +////////////////////// +#define FLT_MAX __FLT_MAX__ +///////////////////// + +template +class MaxPool2dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + +template +class MaxPool2dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + +template +class MaxPool3dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + +template +class MaxPool3dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc new file mode 100644 index 0000000000..d7a07a403d --- /dev/null +++ b/paddle/operators/pool_with_index_op.cc @@ -0,0 +1,198 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/pool_with_index_op.h" + +namespace paddle { +namespace operators { + +int OutputSizeMaxPool(int input_size, int filter_size, int padding, + int stride) { + int output_size = (input_size - filter_size + 2 * padding) / stride + 1; + return output_size; +} + +class MaxPoolWithIndexOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "X(Input) of Pooling should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Out(Output) of Pooling should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Mask"), + "Out(Output) of Pooling should not be null."); + + auto in_x_dims = ctx->GetInputDim("X"); + + std::vector ksize = ctx->Attrs().Get>("ksize"); + std::vector strides = ctx->Attrs().Get>("strides"); + std::vector paddings = ctx->Attrs().Get>("paddings"); + + PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5, + "Pooling intput should be 4-D or 5-D"); + + if (ctx->Attrs().Get("globalPooling")) { + ksize.resize(static_cast(in_x_dims.size()) - 2); + for (size_t i = 0; i < ksize.size(); ++i) + ksize[i] = static_cast(in_x_dims[i + 2]); + } + + PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U, + "Pooling intput size and pooling size should be consistent"); + PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3, + "Pooling size size should be 2 elements. or 3 elements."); + PADDLE_ENFORCE_EQ(ksize.size(), strides.size(), + "strides size and pooling size should be the same."); + PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(), + "paddings size and pooling size should be the same."); + + std::vector output_shape({in_x_dims[0], in_x_dims[1]}); + for (size_t i = 0; i < ksize.size(); ++i) { + output_shape.push_back(OutputSizeMaxPool(in_x_dims[i + 2], ksize[i], + paddings[i], strides[i])); + } + ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); + ctx->SetOutputDim("Mask", framework::make_ddim(output_shape)); + } +}; + +class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("X")), + "X(Input) of MaxPoolWithIndexOpGrad should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput(framework::GradVarName("X")), + "X@GRAD(Input@GRAD) of MaxPoolWithIndexOpGrad should not be null."); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } +}; + +class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MaxPool2dWithIndexOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "The input tensor of pooling operator. " + "The format of input tensor is NCHW. Where N is batch size, C is the " + "number of channels, H and W is the height and width of image."); + AddOutput("Out", + "The output tensor of pooling operator." + "The format of output tensor is also NCHW."); + AddOutput("Mask", + "The Mask tensor of pooling operator." + "The format of output tensor is also NCHW."); + + AddAttr>( + "ksize", "pooling size(height, width) of pooling operator."); + AddAttr( + "globalPooling", + "whether to use the globalPooling." + "int constant equal to false or true" + "default false" + "If globalPooling = true, ksize is ignored and need not be specified.") + .SetDefault(false); + AddAttr>("strides", + "strides(height, width) of pooling operator." + "default {1,1}") + .SetDefault({1, 1}); + AddAttr>("paddings", + "paddings(height, width) of pooling operator." + "default {0,0}") + .SetDefault({0, 0}); + + AddComment(R"DOC( +The maxPooling2d with index operation calculates the output and the mask based on +the input and ksize, strides, paddings parameters. +)DOC"); + } +}; + +class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MaxPool3dWithIndexOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "X", + "The input tensor of pooling operator. " + "The format of input tensor is NCDHW. Where N is batch size, C is " + "the number of channels, D, H and W is the depth, height and width of " + "image."); + AddOutput("Out", + "The output tensor of pooling operator." + "The format of output tensor is also NCDHW."); + AddOutput("Mask", + "The Mask tensor of pooling operator." + "The format of output tensor is also NCDHW."); + + AddAttr>( + "ksize", "pooling size(depth, height, width) of pooling operator."); + AddAttr( + "globalPooling", + "whether to use the globalPooling." + "int constant equal to false or true" + "default false" + "If globalPooling = true, ksize is ignored and need not be specified.") + .SetDefault(false); + AddAttr>( + "strides", + "strides(depth, height, width) of pooling operator." + "default {1,1,1}") + .SetDefault({1, 1, 1}); + AddAttr>( + "paddings", + "paddings(depth, height, width) of pooling operator." + "default {0,0,0}") + .SetDefault({0, 0, 0}); + AddComment(R"DOC( +The maxpooling3d with index operation calculates the output and the mask based on +the input and ksize, strides, paddings parameters. +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP(maxPool2dWithIndex, ops::MaxPoolWithIndexOp, + ops::MaxPool2dWithIndexOpMaker, maxPool2dWithIndex_grad, + ops::MaxPoolWithIndexOpGrad); + +REGISTER_OP_CPU_KERNEL( + maxPool2dWithIndex, + ops::MaxPoolWithIndexKernel); +REGISTER_OP_CPU_KERNEL( + maxPool2dWithIndex_grad, + ops::MaxPoolWithIndexGradKernel) + +REGISTER_OP(maxPool3dWithIndex, ops::MaxPoolWithIndexOp, + ops::MaxPool3dWithIndexOpMaker, maxPool3dWithIndex_grad, + ops::MaxPoolWithIndexOpGrad); + +REGISTER_OP_CPU_KERNEL( + maxPool3dWithIndex, + ops::MaxPoolWithIndexKernel); +REGISTER_OP_CPU_KERNEL( + maxPool3dWithIndex_grad, + ops::MaxPoolWithIndexGradKernel) diff --git a/paddle/operators/pool_with_index_op.cu b/paddle/operators/pool_with_index_op.cu new file mode 100644 index 0000000000..8007fc7ccf --- /dev/null +++ b/paddle/operators/pool_with_index_op.cu @@ -0,0 +1,31 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/pool_with_index_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL( + maxPool2dWithIndex, + ops::MaxPoolWithIndexKernel); +REGISTER_OP_GPU_KERNEL( + maxPool2dWithIndex_grad, + ops::MaxPoolWithIndexGradKernel) + +REGISTER_OP_GPU_KERNEL( + maxPool3dWithIndex, + ops::MaxPoolWithIndexKernel); +REGISTER_OP_GPU_KERNEL( + maxPool3dWithIndex_grad, + ops::MaxPoolWithIndexGradKernel) diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h new file mode 100644 index 0000000000..91abeed016 --- /dev/null +++ b/paddle/operators/pool_with_index_op.h @@ -0,0 +1,99 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" +#include "paddle/operators/math/pooling.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class MaxPoolWithIndexKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* in_x = context.Input("X"); + Tensor* out = context.Output("Out"); + Tensor* mask = context.Output("Mask"); + + bool global_pooling = context.Attr("globalPooling"); + std::vector ksize = context.Attr>("ksize"); + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + if (global_pooling) { + for (size_t i = 0; i < ksize.size(); ++i) { + ksize[i] = static_cast(in_x->dims()[i + 2]); + } + } + + switch (ksize.size()) { + case 2: { + paddle::operators::math::MaxPool2dWithIndexFunctor + pool2d_forward; + pool2d_forward(context.device_context(), *in_x, *out, *mask, ksize, + strides, paddings); + } break; + case 3: { + paddle::operators::math::MaxPool3dWithIndexFunctor + pool3d_forward; + pool3d_forward(context.device_context(), *in_x, *out, *mask, ksize, + strides, paddings); + } break; + } + } +}; + +template +class MaxPoolWithIndexGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* mask = context.Input("Maks"); + const Tensor* out_grad = + context.Input(framework::GradVarName("Out")); + Tensor* in_x_grad = context.Output(framework::GradVarName("X")); + + std::vector ksize = context.Attr>("ksize"); + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + + if (in_x_grad) { + in_x_grad->mutable_data(context.GetPlace()); + auto temp = framework::EigenVector::Flatten(*in_x_grad); + temp.device(context.GetEigenDevice()) = + temp.constant(static_cast(0)); + + switch (ksize.size()) { + case 2: { + paddle::operators::math::MaxPool2dWithIndexGradFunctor + pool2d_backward; + pool2d_backward(context.device_context(), *in_x_grad, *out_grad, + *mask, ksize, strides, paddings); + } break; + case 3: { + paddle::operators::math::MaxPool3dWithIndexGradFunctor + pool3d_backward; + pool3d_backward(context.device_context(), *in_x_grad, *out_grad, + *mask, ksize, strides, paddings); + } break; + } + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py new file mode 100644 index 0000000000..2945c8b7a4 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_pool_max_op.py @@ -0,0 +1,125 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): + + N, C, D, H, W = x.shape + if global_pool == 1: + ksize = [D, H, W] + D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1 + H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 + out = np.zeros((N, C, D_out, H_out, W_out)) + mask = np.zeros((N, C, D_out, H_out, W_out)) + for k in xrange(D_out): + d_start = np.max((k * strides[0] - paddings[0], 0)) + d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) + for i in xrange(H_out): + h_start = np.max((i * strides[0] - paddings[0], 0)) + h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) + for j in xrange(W_out): + w_start = np.max((j * strides[1] - paddings[1], 0)) + w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) + x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] + + out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) + # mask[:,:, k, i, j] = np.argmax(x_masked, axis=(2, 3, 4)) + return out + + +def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): + + N, C, H, W = x.shape + if global_pool == 1: + ksize = [H, W] + H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1 + W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 + out = np.zeros((N, C, H_out, W_out)) + mask = np.zeros((N, C, H_out, W_out)) + for i in xrange(H_out): + for j in xrange(W_out): + r_start = np.max((i * strides[0] - paddings[0], 0)) + r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) + c_start = np.max((j * strides[1] - paddings[1], 0)) + c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) + x_masked = x[:, :, r_start:r_end, c_start:c_end] + + out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) + # mask[:,:, i, j] = np.argmax(x_masked, axis=(2, 3)) + + return out + + +class TestMaxPoolWithIndex_Op(OpTest): + def setUp(self): + self.initTestCase() + self.op_type = "maxPool3dWithIndex" + input = np.random.random(self.shape).astype("float32") + output = self.pool_forward_naive(input, self.ksize, self.strides, + self.paddings, self.global_pool) + # mask = np.zeros(output.shape) + + self.attrs = { + 'strides': self.strides, + 'paddings': self.paddings, + 'ksize': self.ksize, + 'globalPooling': self.global_pool, + } + + self.inputs = {'X': input} + self.outputs = {'Out': output} + + def test_check_output(self): + self.check_output() + + # def test_check_grad(self): + # self.check_grad(set(['X']), ['Out'], max_relative_error=0.07) + + def initTestCase(self): + self.global_pool = 0 + self.pool_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 7, 7, 7] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + + +"""" +class TestCase1(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = 1 + self.op_type = "maxPool3dWithIndex" + self.pool_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 5, 5, 5] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [0, 0, 0] + + +class TestCase2(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = 0 + self.op_type = "maxPool2dWithIndex" + self.pool_forward_naive = max_pool2D_forward_naive + self.shape = [2, 3, 7, 7] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [1, 1] + + +class TestCase3(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = 1 + self.op_type = "maxPool2dWithIndex" + self.pool_forward_naive = max_pool2D_forward_naive + self.shape = [2, 3, 5, 5] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [0, 0] + + +if __name__ == '__main__': + unittest.main() +""" From f2feb333843d83b74a9d29f0c73ffe8409795df0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 10:50:14 -0700 Subject: [PATCH 049/342] Follow comments --- paddle/framework/operator.h | 4 ++-- paddle/platform/place.h | 11 +++++++++++ paddle/platform/variant.h | 2 ++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 4e81d1eaa9..7d563a3c05 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -451,8 +451,8 @@ class OperatorWithKernel : public OperatorBase { size_t operator()(const OpKernelKey& key) const { int place = key.place_.which(); int data_type = static_cast(key.data_type_); - // NOTE: Number of places limit to 16. - int pre_hash = data_type << 4 | (place & 0x0F); + int pre_hash = data_type << NUM_PLACE_TYPE_LIMIT_IN_BIT | + (place & ((1 << NUM_PLACE_TYPE_LIMIT_IN_BIT) - 1)); return hash_(pre_hash); } }; diff --git a/paddle/platform/place.h b/paddle/platform/place.h index 1117476bb3..0efc693234 100644 --- a/paddle/platform/place.h +++ b/paddle/platform/place.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include + #include "paddle/platform/variant.h" namespace paddle { @@ -46,8 +47,18 @@ struct IsGPUPlace : public boost::static_visitor { bool operator()(const GPUPlace &gpu) const { return true; } }; +// Define the max number of Place in bit length. i.e., the max number of places +// should be less equal than 2^(NUM_PLACE_TYPE_LIMIT_IN_BIT) +#define NUM_PLACE_TYPE_LIMIT_IN_BIT 4 + typedef boost::variant Place; +// static check number of place types is less equal than +// 2^(NUM_PLACE_TYPE_LIMIT_IN_BIT) +BOOST_MPL_ASSERT((boost::mpl::less_equal< + Place::types::size, + boost::mpl::long_<1 << NUM_PLACE_TYPE_LIMIT_IN_BIT>>)); + void set_place(const Place &); const Place &get_place(); diff --git a/paddle/platform/variant.h b/paddle/platform/variant.h index c2257af1b5..16ee00efe7 100644 --- a/paddle/platform/variant.h +++ b/paddle/platform/variant.h @@ -29,4 +29,6 @@ #endif #endif +#include +#include #include From b9336e6f8ce3397c3aca8433165331ad572bbb6f Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 28 Sep 2017 11:01:57 -0700 Subject: [PATCH 050/342] Adding support for the sigmoid_cross_entropy_with_logits operator (#4448) * Adding support for the sigmoid_cross_entropy_with_logits operator * Fixing a typo in the cuda file * Adding Python documentation for sigmoid_cross_entropy_with_logits_op * Correcting typos in documentation * Adding unit tests for sigmoid_cross_entropy_with_logits_op * Addressing code review feedback --- .../sigmoid_cross_entropy_with_logits_op.cc | 150 ++++++++++++++++++ .../sigmoid_cross_entropy_with_logits_op.cu | 24 +++ .../sigmoid_cross_entropy_with_logits_op.h | 75 +++++++++ ...st_sigmoid_cross_entropy_with_logits_op.py | 66 ++++++++ 4 files changed, 315 insertions(+) create mode 100644 paddle/operators/sigmoid_cross_entropy_with_logits_op.cc create mode 100644 paddle/operators/sigmoid_cross_entropy_with_logits_op.cu create mode 100644 paddle/operators/sigmoid_cross_entropy_with_logits_op.h create mode 100644 python/paddle/v2/framework/tests/test_sigmoid_cross_entropy_with_logits_op.py diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc new file mode 100644 index 0000000000..ede458e011 --- /dev/null +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -0,0 +1,150 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/sigmoid_cross_entropy_with_logits_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class SigmoidCrossEntropyWithLogitsOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Labels"), + "Input(Labels) should be not null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should be not null."); + + auto x_dims = ctx->GetInputDim("X"); + auto labels_dims = ctx->GetInputDim("Labels"); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); + PADDLE_ENFORCE_EQ(labels_dims.size(), 2, + "Input(Labels)'s rank should be 2."); + PADDLE_ENFORCE_EQ(x_dims[0], labels_dims[0], + "The 1st dimension of Input(X) and Input(Labels) should " + "be equal."); + PADDLE_ENFORCE_EQ(x_dims[1], labels_dims[1], + "The 2nd dimension of Input(X) and Input(Labels) should " + "be equal."); + + ctx->SetOutputDim("Out", x_dims); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class SigmoidCrossEntropyWithLogitsGradOp + : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Labels"), + "Input(Labels) should be not null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shoudl be not null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), + "Output(X@GRAD) should be not null."); + + auto x_dims = ctx->GetInputDim("X"); + auto labels_dims = ctx->GetInputDim("Labels"); + auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out")); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); + PADDLE_ENFORCE_EQ(labels_dims.size(), 2, + "Input(Labels)'s rank should be 2."); + PADDLE_ENFORCE_EQ(dout_dims.size(), 2, + "Input(Out@Grad)'s rank should be 2."); + PADDLE_ENFORCE_EQ(x_dims[0], labels_dims[0], + "The 1st dimension of Input(X) and Input(Labels) should " + "be equal."); + PADDLE_ENFORCE_EQ(x_dims[1], labels_dims[1], + "The 2nd dimension of Input(X) and Input(Labels) should " + "be equal."); + PADDLE_ENFORCE_EQ(x_dims[0], dout_dims[0], + "The 1st dimension of Input(X) and Input(Out@Grad) " + "should be equal."); + PADDLE_ENFORCE_EQ(x_dims[1], dout_dims[1], + "The 2nd dimension of Input(X) and Input(Out@Grad) " + "should be equal."); + + ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + } +}; + +class SigmoidCrossEntropyWithLogitsOpMaker + : public framework::OpProtoAndCheckerMaker { + public: + SigmoidCrossEntropyWithLogitsOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(Tensor, default Tensor), a 2-D tensor with shape N x D, " + "where N is the batch size and D is the number of classes. " + "This input is a tensor of logits computed by the previous " + " operator. Logits are unscaled log probabilities given as " + "log(p/(1-p))."); + AddInput("Labels", + "(Tensor, default Tensor), a 2-D tensor of the same type " + "and shape as X. This input is a tensor of probabalistic labels " + "for each logit"); + AddOutput("Out", + "(Tensor, default Tensor), a 2-D tensor with shape N x D " + " of elementwise logistic losses."); + AddComment(R"DOC( +SigmoidCrossEntropyWithLogits Operator. + +This measures the elementwise probability error in discrete classification tasks +in which each class is independent. This can be thought of as predicting labels +for a data-point that are not mutually exclusive. For example, a news article +can be about politics, technology or sports at the same time or none of these. + +The logistic loss is given as follows: + + loss = -Labels * log(sigmoid(X)) - (1 - Labels) * log(1 - sigmoid(X)) + +We know that sigmoid(X) = (1 / (1 + exp(-X))). By substituting this we get + + loss = X - X * Labels + log(1 + exp(-X)) + +For stability and to prevent overflow of exp(-X) when X < 0, +we can reformulate the loss as follows: + + loss = max(X, 0) - X * Labels + log(1 + exp(-abs(X))) + +Both the input `X` and `Labels` can carry the LoD (Level of Details) information. +However the output only shares the LoD with input `X`. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(sigmoid_cross_entropy_with_logits, + ops::SigmoidCrossEntropyWithLogitsOp, + ops::SigmoidCrossEntropyWithLogitsOpMaker, + sigmoid_cross_entropy_with_logits_grad, + ops::SigmoidCrossEntropyWithLogitsGradOp); +REGISTER_OP_CPU_KERNEL(sigmoid_cross_entropy_with_logits, + ops::SigmoidCrossEntropyWithLogitsKernel< + paddle::platform::CPUPlace, float>); +REGISTER_OP_CPU_KERNEL(sigmoid_cross_entropy_with_logits_grad, + ops::SigmoidCrossEntropyWithLogitsGradKernel< + paddle::platform::CPUPlace, float>); diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu new file mode 100644 index 0000000000..32a39956a1 --- /dev/null +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/sigmoid_cross_entropy_with_logits_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(sigmoid_cross_entropy_with_logits, + ops::SigmoidCrossEntropyWithLogitsKernel< + paddle::platform::GPUPlace, float>); +REGISTER_OP_GPU_KERNEL(sigmoid_cross_entropy_with_logits_grad, + ops::SigmoidCrossEntropyWithLogitsGradKernel< + paddle::platform::GPUPlace, float>); diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h new file mode 100644 index 0000000000..a6de9043fd --- /dev/null +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h @@ -0,0 +1,75 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +// Out = max(X, 0) - X * Labels + log(1 + exp(-abs(X))) +template +class SigmoidCrossEntropyWithLogitsKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + const framework::Tensor *X = context.Input("X"); + const framework::Tensor *Labels = + context.Input("Labels"); + framework::Tensor *Out = context.Output("Out"); + Out->mutable_data(context.GetPlace()); + + auto x = framework::EigenVector::Flatten(*X); + auto labels = framework::EigenVector::Flatten(*Labels); + auto out = framework::EigenVector::Flatten(*Out); + auto place = context.GetEigenDevice(); + + // term1 = max(x, 0) + auto term1 = x.cwiseMax(static_cast(0)); + // term2 = x * labels + auto term2 = x * labels; + // term3 = log(1 + exp(-abs(x))) + auto term3 = (static_cast(1) + (-(x.abs())).exp()).log(); + + out.device(place) = term1 - term2 + term3; + } +}; + +// dX = sigmoid(X) - labels +template +class SigmoidCrossEntropyWithLogitsGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + const framework::Tensor *X = context.Input("X"); + const framework::Tensor *Labels = + context.Input("Labels"); + const framework::Tensor *dOut = + context.Input(framework::GradVarName("Out")); + framework::Tensor *dX = + context.Output(framework::GradVarName("X")); + dX->mutable_data(context.GetPlace()); + + auto x = framework::EigenVector::Flatten(*X); + auto labels = framework::EigenVector::Flatten(*Labels); + auto dout = framework::EigenVector::Flatten(*dOut); + auto dx = framework::EigenVector::Flatten(*dX); + auto place = context.GetEigenDevice(); + + auto sigmoid_x = static_cast(1) / (static_cast(1) + (-x).exp()); + dx.device(place) = dout * (sigmoid_x - labels); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_sigmoid_cross_entropy_with_logits_op.py b/python/paddle/v2/framework/tests/test_sigmoid_cross_entropy_with_logits_op.py new file mode 100644 index 0000000000..e53856b38a --- /dev/null +++ b/python/paddle/v2/framework/tests/test_sigmoid_cross_entropy_with_logits_op.py @@ -0,0 +1,66 @@ +import numpy as np +from op_test import OpTest +from scipy.special import logit +from scipy.special import expit + + +class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): + '''Test sigmoid_cross_entropy_with_logit_op with binary labels + ''' + + def setUp(self): + self.op_type = "sigmoid_cross_entropy_with_logits" + batch_size = 64 + num_classes = 20 + self.inputs = { + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)) + .astype("float32")), + 'Labels': np.random.randint(0, 2, (batch_size, num_classes)) + .astype("float32") + } + + # Fw Pass is implemented as elementwise sigmoid followed by + # elementwise logistic loss + # Labels * -log(sigmoid(X)) + (1 - labels) * -log(1 - sigmoid(X)) + sigmoid_X = expit(self.inputs['X']) + term1 = self.inputs['Labels'] * np.log(sigmoid_X) + term2 = (1 - self.inputs['Labels']) * np.log(1 - sigmoid_X) + self.outputs = {'Out': -term1 - term2} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): + '''Test sigmoid_cross_entropy_with_logit_op with probabalistic labels + ''' + + def setUp(self): + self.op_type = "sigmoid_cross_entropy_with_logits" + batch_size = 64 + num_classes = 20 + self.inputs = { + 'X': logit( + np.random.uniform(0, 1, (batch_size, num_classes)) + .astype("float32")), + 'Labels': np.random.uniform(0, 1, (batch_size, num_classes)) + .astype("float32") + } + + # Fw Pass is implemented as elementwise sigmoid followed by + # elementwise logistic loss + # Labels * -log(sigmoid(X)) + (1 - labels) * -log(1 - sigmoid(X)) + sigmoid_X = expit(self.inputs['X']) + term1 = self.inputs['Labels'] * np.log(sigmoid_X) + term2 = (1 - self.inputs['Labels']) * np.log(1 - sigmoid_X) + self.outputs = {'Out': -term1 - term2} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') From b9c863723870ecec231ca0ff0b84e7beab8fe5ce Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 11:09:42 -0700 Subject: [PATCH 051/342] Fix compile --- paddle/operators/concat_op.h | 2 +- paddle/operators/reduce_op.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h index bff453971a..c113f19fb5 100644 --- a/paddle/operators/concat_op.h +++ b/paddle/operators/concat_op.h @@ -44,7 +44,7 @@ class ConcatKernel : public framework::OpKernel { }; template -class ConcatGradKernel : public framework::OpKernel { +class ConcatGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* in = ctx.Input(framework::GradVarName("Out")); diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index 2fbf94e34f..ba3f3db81d 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -87,7 +87,7 @@ struct MaxOrMinGradFunctor { }; template -class ReduceKernel : public framework::OpKernel { +class ReduceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { int rank = context.Input("X")->dims().size(); @@ -141,7 +141,7 @@ class ReduceKernel : public framework::OpKernel { }; template -class ReduceGradKernel : public framework::OpKernel { +class ReduceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { int rank = context.Input("X")->dims().size(); From fd479631e16063f0bfb8fcd2dedf7067b39f1e56 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 11:52:03 -0700 Subject: [PATCH 052/342] Stablize elementwise_mul by using double precision --- paddle/pybind/pybind.cc | 16 +++-- paddle/pybind/tensor_py.h | 15 ++++- python/paddle/v2/framework/tests/op_test.py | 60 +++++++++++++------ .../tests/test_elementwise_mul_op.py | 32 +++++----- 4 files changed, 78 insertions(+), 45 deletions(-) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index d85bf6c7fa..f4121e9d71 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -77,20 +77,18 @@ PYBIND11_PLUGIN(core) { }) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) + .def("set", PyCPUTensorSetFromArray) #ifndef PADDLE_ONLY_CPU .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDATensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) - .def("set_float_element", - [](Tensor &self, size_t offset, float f) { - // TODO(yuyang18): Only support GPU now. - self.data()[offset] = f; - }) - .def("get_float_element", [](Tensor &self, size_t offset) -> float { - // TODO(yuyang18): Only support GPU now. - return self.data()[offset]; - }); + .def("set_float_element", TensorSetElement) + .def("get_float_element", TensorGetElement) + .def("set_double_element", TensorSetElement) + .def("get_double_element", TensorGetElement) + .def("dtype", [](Tensor &self) { return ToDataType(self.type()); }); py::class_(m, "LoDTensor") .def_buffer( diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 10621e90ee..3e3e6bc031 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -73,10 +73,23 @@ struct CastToPyBufferImpl { }; } // namespace details inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { - auto buffer_info = details::CastToPyBufferImpl()(tensor); + auto buffer_info = + details::CastToPyBufferImpl()(tensor); return buffer_info; } +template +T TensorGetElement(framework::Tensor &self, size_t offset) { + PADDLE_ENFORCE(platform::is_cpu_place(self.place())); + return self.data()[offset]; +} + +template +void TensorSetElement(framework::Tensor &self, size_t offset, T elem) { + PADDLE_ENFORCE(platform::is_cpu_place(self.place())); + self.data()[offset] = elem; +} + template void PyCPUTensorSetFromArray( framework::Tensor &self, diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 89979044f2..70ae50d401 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -69,24 +69,27 @@ def set_input(scope, op, inputs, place): def set_output_grad(scope, op, outputs, place): + def __set_tensor__(name): + out_tensor = scope.find_var(name).get_tensor() + grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() + out_dtype = out_tensor.dtype() + if out_dtype == core.DataType.FP64: + data = np.ones(out_tensor.shape(), dtype=np.float64) + elif out_dtype == core.DataType.FP32: + data = np.ones(out_tensor.shape(), dtype=np.float32) + else: + raise ValueError("Not supported data type " + str(out_dtype)) + + grad_tensor.set(data, place) + for out_name, out_dup in Operator.get_op_outputs(op.type()): if out_name in outputs: if out_dup: sub_out = outputs[out_name] for sub_out_name, _ in sub_out: - out_tensor = scope.find_var(sub_out_name).get_tensor() - grad_tensor = scope.new_var(grad_var_name( - sub_out_name)).get_tensor() - grad_tensor.set_dims(out_tensor.shape()) - data = np.ones(out_tensor.shape(), dtype=np.float32) - grad_tensor.set(data, place) + __set_tensor__(sub_out_name) else: - out_tensor = scope.find_var(out_name).get_tensor() - grad_tensor = scope.new_var(grad_var_name(out_name)).get_tensor( - ) - grad_tensor.set_dims(out_tensor.shape()) - data = np.ones(out_tensor.shape(), dtype=np.float32) - grad_tensor.set(data, place) + __set_tensor__(out_name) def get_numeric_gradient(scope, @@ -96,7 +99,6 @@ def get_numeric_gradient(scope, output_names, delta=0.005, in_place=False): - set_input(scope, op, inputs, core.CPUPlace()) tensor_to_check = scope.find_var(input_to_check).get_tensor() @@ -115,7 +117,29 @@ def get_numeric_gradient(scope, tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_size = product(tensor_to_check.get_dims()) - gradient_flat = np.zeros(shape=(tensor_size, ), dtype='float32') + tensor_to_check_dtype = tensor_to_check.dtype() + if tensor_to_check_dtype == core.DataType.FP32: + tensor_to_check_dtype = np.float32 + elif tensor_to_check_dtype == core.DataType.FP64: + tensor_to_check_dtype = np.float64 + else: + raise ValueError("Not supported data type " + str( + tensor_to_check_dtype)) + + gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype) + + def __get_elem__(tensor, i): + if tensor_to_check_dtype == np.float32: + return tensor.get_float_element(i) + else: + return tensor.get_double_element(i) + + def __set_elem__(tensor, i, e): + if tensor_to_check_dtype == np.float32: + tensor.set_float_element(i, e) + else: + tensor.set_double_element(i, e) + # we only compute gradient of one element each time. # we use a for loop to compute the gradient of every element. for i in xrange(tensor_size): @@ -123,20 +147,20 @@ def get_numeric_gradient(scope, set_input(scope, op, inputs, core.CPUPlace()) # get one input element throw it's index i. - origin = tensor_to_check.get_float_element(i) + origin = __get_elem__(tensor_to_check, i) # add delta to it, run op and then get the sum of the result tensor. x_pos = origin + delta - tensor_to_check.set_float_element(i, x_pos) + __set_elem__(tensor_to_check, i, x_pos) y_pos = get_output() if in_place: set_input(scope, op, inputs, core.CPUPlace()) x_neg = origin - delta - tensor_to_check.set_float_element(i, x_neg) + __set_elem__(tensor_to_check, i, x_neg) y_neg = get_output() - tensor_to_check.set_float_element(i, origin) + __set_elem__(tensor_to_check, i, origin) gradient_flat[i] = (y_pos - y_neg) / delta / 2 return gradient_flat.reshape(tensor_to_check.get_dims()) diff --git a/python/paddle/v2/framework/tests/test_elementwise_mul_op.py b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py index cee4385a81..261ca9cb3d 100644 --- a/python/paddle/v2/framework/tests/test_elementwise_mul_op.py +++ b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py @@ -7,8 +7,8 @@ class ElementwiseMulOp(OpTest): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") + 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float64") } self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} @@ -16,23 +16,21 @@ class ElementwiseMulOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + self.check_grad(['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + self.check_grad(['Y'], 'Out', no_grad_set=set("X")) def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + self.check_grad(['X'], 'Out', no_grad_set=set('Y')) class TestElementwiseMulOp_Vector(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.random((32, )).astype("float32"), - 'Y': np.random.random((32, )).astype("float32") + 'X': np.random.random((32, )).astype("float64"), + 'Y': np.random.random((32, )).astype("float64") } self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} @@ -41,8 +39,8 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(2).astype(np.float32) + 'X': np.random.rand(2, 3, 4).astype(np.float64), + 'Y': np.random.rand(2).astype(np.float64) } self.attrs = {'axis': 0} @@ -55,8 +53,8 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(3).astype(np.float32) + 'X': np.random.rand(2, 3, 4).astype(np.float64), + 'Y': np.random.rand(3).astype(np.float64) } self.attrs = {'axis': 1} @@ -69,8 +67,8 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(4).astype(np.float32) + 'X': np.random.rand(2, 3, 4).astype(np.float64), + 'Y': np.random.rand(4).astype(np.float64) } self.outputs = { @@ -82,8 +80,8 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4, 5).astype(np.float32), - 'Y': np.random.rand(3, 4).astype(np.float32) + 'X': np.random.rand(2, 3, 4, 5).astype(np.float64), + 'Y': np.random.rand(3, 4).astype(np.float64) } self.attrs = {'axis': 1} From 6ed78729b2b63981e3521cb79b4b53137d327b2a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 11:59:17 -0700 Subject: [PATCH 053/342] Simplify op_test --- python/paddle/v2/framework/tests/op_test.py | 42 +++++++++------------ 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 70ae50d401..23794151bd 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -12,17 +12,19 @@ def grad_var_name(var_name): def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() + def __create_var__(name, var_name): + scope.new_var(var_name) + kwargs[name].append(var_name) + for in_name, in_dup in Operator.get_op_inputs(op_type): if in_name in inputs: kwargs[in_name] = [] if in_dup: sub_in = inputs[in_name] for sub_in_name, _ in sub_in: - var = scope.new_var(sub_in_name) - kwargs[in_name].append(sub_in_name) + __create_var__(in_name, sub_in_name) else: - var = scope.new_var(in_name) - kwargs[in_name].append(in_name) + __create_var__(in_name, in_name) for out_name, out_dup in Operator.get_op_outputs(op_type): if out_name in outputs: @@ -30,11 +32,9 @@ def create_op(scope, op_type, inputs, outputs, attrs): if out_dup: sub_out = outputs[out_name] for sub_out_name, _ in sub_out: - var = scope.new_var(sub_out_name) - kwargs[out_name].append(sub_out_name) + __create_var__(out_name, sub_out_name) else: - var = scope.new_var(out_name) - kwargs[out_name].append(out_name) + __create_var__(out_name, out_name) for attr_name in Operator.get_op_attr_names(op_type): if attr_name in attrs: @@ -44,28 +44,22 @@ def create_op(scope, op_type, inputs, outputs, attrs): def set_input(scope, op, inputs, place): + def __set_input__(var_name, var): + tensor = scope.find_var(var_name).get_tensor() + if isinstance(var, tuple): + tensor.set_lod(var[1]) + var = var[0] + tensor.set_dims(var.shape) + tensor.set(var, place) + for in_name, in_dup in Operator.get_op_inputs(op.type()): if in_name in inputs: if in_dup: sub_in = inputs[in_name] for sub_in_name, sub_in_val in sub_in: - var = scope.find_var(sub_in_name) - tensor = var.get_tensor() - sub_in_array = sub_in_val[0] \ - if isinstance(sub_in_val, tuple) else sub_in_val - tensor.set_dims(sub_in_array.shape) - tensor.set(sub_in_array, place) - if isinstance(sub_in_val, tuple): - tensor.set_lod(sub_in_val[1]) + __set_input__(sub_in_name, sub_in_val) else: - var = scope.find_var(in_name) - tensor = var.get_tensor() - in_val = inputs[in_name] - in_array = in_val[0] if isinstance(in_val, tuple) else in_val - tensor.set_dims(in_array.shape) - tensor.set(in_array, place) - if isinstance(in_val, tuple): - tensor.set_lod(in_val[1]) + __set_input__(in_name, inputs[in_name]) def set_output_grad(scope, op, outputs, place): From 87da1542260bfe6d8002c8da05008d5dde426b7c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 12:04:09 -0700 Subject: [PATCH 054/342] FIx sigmoid_xe_with_logits_op compile --- paddle/operators/sigmoid_cross_entropy_with_logits_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h index a6de9043fd..41c619f181 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.h @@ -21,7 +21,7 @@ namespace operators { // Out = max(X, 0) - X * Labels + log(1 + exp(-abs(X))) template -class SigmoidCrossEntropyWithLogitsKernel : public framework::OpKernel { +class SigmoidCrossEntropyWithLogitsKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { const framework::Tensor *X = context.Input("X"); @@ -48,7 +48,7 @@ class SigmoidCrossEntropyWithLogitsKernel : public framework::OpKernel { // dX = sigmoid(X) - labels template -class SigmoidCrossEntropyWithLogitsGradKernel : public framework::OpKernel { +class SigmoidCrossEntropyWithLogitsGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { const framework::Tensor *X = context.Input("X"); From d53b38e340b5f56f9547b53449fe6cdceefd3b97 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 14:32:25 -0700 Subject: [PATCH 055/342] Follow comments, change KT to KERNEL_TYPE --- paddle/framework/op_registry.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 804f901dfa..4db38badae 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -103,18 +103,19 @@ class OpRegistrar : public Registrar { template struct OpKernelRegistrarFunctor; -template -struct OpKernelRegistrarFunctor { - using KT = typename std::tuple_element>::type; +template +struct OpKernelRegistrarFunctor { + using KERNEL_TYPE = + typename std::tuple_element>::type; void operator()(const char* op_type) const { - using T = typename KT::ELEMENT_TYPE; + using T = typename KERNEL_TYPE::ELEMENT_TYPE; OperatorWithKernel::OpKernelKey key(ToDataType(std::type_index(typeid(T))), PlaceType()); - OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KT); + OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KERNEL_TYPE); - constexpr auto size = std::tuple_size>::value; - OpKernelRegistrarFunctor + constexpr auto size = std::tuple_size>::value; + OpKernelRegistrarFunctor func; func(op_type); } From 5d6d2bc1b991a1a46bac407618f9c490af9d27e9 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 28 Sep 2017 14:52:03 -0700 Subject: [PATCH 056/342] Fixing typos and grammatical mistakes in Refactorization documents (#4479) --- doc/design/refactorization.md | 183 +++++++++++++++++----------------- 1 file changed, 92 insertions(+), 91 deletions(-) diff --git a/doc/design/refactorization.md b/doc/design/refactorization.md index ad801ca421..a07675b3e0 100644 --- a/doc/design/refactorization.md +++ b/doc/design/refactorization.md @@ -1,40 +1,40 @@ # Design Doc: Refactorization Overview -The goal of refactorizaiton include: +The goals of refactoring include: -1. Make it easy for external contributors to write new elementory computaiton operations. -1. Make the codebase clean and readable. -1. Introduce a new design of computation representation -- a computation graph of operators and variables. -1. The graph representation helps implementing auto-scalable and auto fault recoverable distributed computing. +1. Making it easy for external contributors to write new elementary computation operations. +1. Making the codebase clean and readable. +1. Designing a new computation representation -- a computation graph of operators and variables. +1. Implementing auto-scalability and auto fault recoverable distributed computing with the help of computation graphs. ## Computation Graphs -1. PaddlePaddle represent the computation, training and inference of DL models, by computation graphs. +1. PaddlePaddle represents the computation, training and inference of Deep Learning models, by computation graphs. - 1. Please dig into [computation graphs](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/graph.md) for a solid example. + 1. Please refer to [computation graphs](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/graph.md) for a concrete example. -1. Users write Python programs to describe the graphs and run it (locally or remotely). +1. Users write Python programs to describe the graphs and run them (locally or remotely). 1. A graph is composed of *variables* and *operators*. -1. The description of graphs must be able to be serialized/deserialized, so it +1. The description of graphs must be capable of being serialized/deserialized, so that - 1. could to be sent to the cloud for distributed execution, and - 1. be sent to clients for mobile or enterprise deployment. + 1. It can to be sent to the cloud for distributed execution, and + 1. It can be sent to clients for mobile or enterprise deployment. -1. The Python program do +1. The Python program does the following steps - 1. *compilation*: runs a Python program to generate a protobuf message representation of the graph and send it to + 1. *compilation*: run a Python program to generate a protobuf message representation of the graph and send it to 1. the C++ library `libpaddle.so` for local execution, 1. the master process of a distributed training job for training, or 1. the server process of a Kubernetes serving job for distributed serving. - 1. *execution*: according to the protobuf message, constructs instances of class `Variable` and `OperatorBase`, and run them. + 1. *execution*: execute the graph by constructing instances of class [`Variable`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24) and [`OperatorBase`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L70), according to the protobuf message. -## Description and Realization +## Description and Realization of Computation Graph -At compile time, the Python program generates protobuf message representation of the graph, or the description of the graph. +At compile time, the Python program generates a protobuf message representation of the graph, or the description of the graph. -At runtime, the C++ program realizes the graph and run it. +At runtime, the C++ program realizes the graph and runs it. | | Representation (protobuf messages) | Realization (C++ class objects) | |---|---|---| @@ -42,30 +42,31 @@ At runtime, the C++ program realizes the graph and run it. |Operation|[OpDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L35)|[Operator](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L64)| |Block|BlockDesc|Block| -The word *graph* is exchangable with *block* in this document. A graph represent computation steps and local variables as a C++/Java program block, or a pair of { and }. +The word *graph* is interchangeable with *block* in this document. A graph represents computation steps and local variables similar to a C++/Java program block, or a pair of parentheses(`{` and `}`). ## Compilation and Execution -1. Run an applicaton Python program to describe the graph. In particular, +1. Run an application Python program to describe the graph. In particular, the Python application program does the following: - 1. create VarDesc to represent local/intermediate variables, - 1. create operators and set attributes, - 1. validate attribute values, - 1. inference the type and the shape of variables, - 1. plan for memory-reuse for variables, - 1. generate backward and optimization part of the Graph. - 1. possiblly split the graph for distributed training. + 1. Create `VarDesc` to represent local/intermediate variables, + 1. Create operators and set attributes, + 1. Validate attribute values, + 1. Infer the type and the shape of variables, + 1. Plan memory-reuse for variables, + 1. Generate the backward graph + 1. Optimize the computation graph. + 1. Potentially, split the graph for distributed training. -1. The invocation of `train` or `infer` in the application Python program: +1. The invocation of `train` or [`infer`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/inference.py#L108) methods in the application Python program does the following: - 1. create a new Scope instance in the [scope hierarchy](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md) for each run of a block, + 1. Create a new Scope instance in the [scope hierarchy](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md) for each run of a block, 1. realize local variables defined in the BlockDesc message in the new scope, 1. a scope is similar to the stack frame in programming languages, - 1. create an instance of class `Block`, in which, + 1. Create an instance of class `Block`, in which, 1. realize operators in the BlockDesc message, - 1. run the Block by calling + 1. Run the Block by calling 1. `Block::Eval(vector* targets)` for forward and backward computations, or 1. `Block::Eval(vector* targets)` for optimization. @@ -76,14 +77,14 @@ The word *graph* is exchangable with *block* in this document. A graph represen Compile Time -> IR -> Runtime ``` -### Benefit +### Benefits of IR - Optimization ```text Compile Time -> IR -> Optimized IR -> Runtime ``` -- Send automatically partitioned IR to different nodes. - - Automatic data parallel +- Automatically send partitioned IR to different nodes. + - Automatic Data Parallelism ```text Compile Time |-> Single GPU IR @@ -92,7 +93,7 @@ Compile Time -> IR -> Runtime |-> Node-1 (runs trainer-IR-1) |-> Node-2 (runs pserver-IR) ``` - - Automatic model parallel (planned for future) + - Automatic Model Parallelism (planned for future) --- @@ -105,10 +106,10 @@ Compile Time -> IR -> Runtime # Operator ![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/dd598e8f1976f5759f58af5e5ef94738a6b2e661/op.dot) -* `Operator` is the fundamental building block as the user interface. - * Operator stores input/output variable name, and attributes. - * The `InferShape` interface is used to infer output variable shapes by its input shapes. - * Use `Run` to compute `input variables` to `output variables`. +* `Operator` is the fundamental building block of the user interface. + * Operator stores input/output variable names, and attributes. + * The `InferShape` interface is used to infer the shape of the output variable shapes based on the shapes of the input variables. + * Use `Run` to compute the `output` variables from the `input` variables. --- @@ -126,30 +127,30 @@ Compile Time -> IR -> Runtime # Why separate Kernel and Operator * Separate GPU and CPU code. - * Make Paddle can run without GPU. -* Make one operator (which is user interface) can contain many implementations. - * Same mul op, different FP16, FP32 Kernel. different MKL, eigen kernel. + * Make Paddle capable of running without GPU. +* Make one operator (which is a user interface) and create many implementations. + * For example, same multiplication op can have different implementations kernels such as FP16 kernel, FP32 kernel, MKL, eigen kernel. --- # Libraries for Kernel development * `Eigen::Tensor` contains basic math and element-wise functions. * Note that `Eigen::Tensor` has broadcast implementation. - * Limit number of `tensor.device(dev) = ` in your code. + * Limit the number of `tensor.device(dev) = ` in your code. * `thrust::tranform` and `std::transform`. - * `thrust` has the same API as C++ standard library. Using `transform` can quickly implement a customized elementwise kernel. - * `thrust` has more complex API, like `scan`, `reduce`, `reduce_by_key`. + * `thrust` has the same API as C++ standard library. Using `transform`, one can quickly implement customized elementwise kernels. + * `thrust` also has more complex APIs, like `scan`, `reduce`, `reduce_by_key`. * Hand-writing `GPUKernel` and `CPU` code - * Do not write `.h`. CPU Kernel should be in `.cc`. GPU kernel should be in `.cu`. (`GCC` cannot compile GPU code.) + * Do not write in header (`.h`) files. CPU Kernel should be in cpp source (`.cc`) and GPU kernels should be in cuda (`.cu`) files. (GCC cannot compile GPU code.) --- -# Operator Register +# Operator Registration -## Why register is necessary? +## Why registration is necessary? We need a method to build mappings between Op type names and Op classes. -## How to do the register? +## How is registration implemented? -Maintain a map, whose key is the type name and value is corresponding Op constructor. +Maintaining a map, whose key is the type name and the value is the corresponding Op constructor. --- # The Registry Map @@ -177,34 +178,34 @@ REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, grad_op_class) REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) ``` -### `USE` Macros -make sure the registration process is executed and linked. +### USE Macros +Make sure the registration process is executed and linked. --- -# Register Process -1. Write Op class, as well as its gradient Op class if there is. -2. Write Op maker class. In the constructor, describe its inputs, outputs, and attributes. -3. Invoke macro `REGISTER_OP`. The macro will - 1. call maker class to complete `proto` and `checker` - 2. with the completed `proto` and `checker`, build a new key-value pair in the `OpInfoMap` +# Registration Process +1. Write an Op class and its gradient Op class, if required. +2. Write an Op maker class. In the constructor of this class, describe the inputs, outputs and attributes of the operator. +3. Invoke the macro `REGISTER_OP`. This macro will + 1. Call maker class to complete the `proto` and the `checker` + 2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap` -4. Invoke `USE` macro in where the Op is used to make sure it is linked. +4. Invoke the `USE` macro in which the Op is used, to make sure that it is linked. --- # Backward Module (1/2) ### Create Backward Operator -- Mapping from forwarding Op to backward Op +- Mapping from forward Op to backward Op ![backward](https://gist.githubusercontent.com/dzhwinter/a6fbd4623ee76c459f7f94591fd1abf0/raw/61026ab6e518e66bde66a889bc42557a1fccff33/backward.png) --- # Backward Module (2/2) ### Build Backward Network -- **Input** graph of forwarding operators -- **Output** graph of backward operators -- **corner case in construction** - - shared variable => insert `Add` operator - - no gradient => insert `fill_zero_grad` operator - - recursive netOp => call `Backward` recursively +- **Input**: graph of forwarding operators +- **Output**: graph of backward operators +- **Corner cases in construction** + - Shared Variables => insert an `Add` operator to combine gradients + - No Gradient => insert a `fill_zero_grad` operator + - Recursive NetOp => call `Backward` recursively - RNN Op => recursively call `Backward` on stepnet @@ -213,41 +214,41 @@ make sure the registration process is executed and linked. * `Tensor` is an n-dimension array with type. * Only dims and data pointers are stored in `Tensor`. - * All operators on `Tensor` is written in `Operator` or global functions. - * variable length Tensor design [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) -* `Variable` is the inputs and outputs of an operator. Not just `Tensor`. - * step_scopes in RNN is a variable and not a tensor. -* `Scope` is where variables store at. - * map - * `Scope` has a hierarchical structure. The local scope can get variable from its parent scope. + * All operations on `Tensor` are written in `Operator` or global functions. + * Variable length Tensor design [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) +* `Variable` instances are the inputs and the outputs of an operator. Not just `Tensor`. + * `step_scopes` in RNN is a variable and not a tensor. +* `Scope` is where variables are stores. + * map + * `Scope` has a hierarchical structure. The local scope can get variables from its parent scope. --- # Block (in design) ## the difference with original RNNOp -- as an operator is more intuitive than `RNNOp`, -- offers new interface `Eval(targets)` to deduce the minimal block to `Run`, -- fits the compile-time/ runtime separation design. - - during the compilation, `SymbolTable` stores `VarDesc`s and `OpDesc`s and serialize to a `BlockDesc` - - when graph executes, a Block with `BlockDesc` passed in creates `Op` and `Var` then `Run` +- As an operator is more intuitive than `RNNOp`, +- Offers a new interface `Eval(targets)` to deduce the minimal block to `Run`, +- Fits the compile-time/ runtime separation design paradigm. + - During the compilation, `SymbolTable` stores `VarDesc`s and `OpDesc`s and serialize to a `BlockDesc` + - When graph executes, a Block with `BlockDesc` is passed. It then creates `Op` and `Var` instances and then invokes `Run`. --- # Milestone -- take Paddle/books as the main line, the requirement of the models motivates framework refactoring, -- model migration - - framework development gives **priority support** to model migration, for example, +- Take Paddle/books as the main line, the requirement of the models motivates framework refactoring, +- Model migration + - Framework development gives **priority support** to model migration, for example, - the MNIST demo needs a Python interface, - the RNN models require the framework to support `LoDTensor`. - - determine some timelines, - - heavily-relied Ops need to be migrated first, - - different models can be migrated parallelly. -- improve the framework at the same time -- accept imperfection, concentrated on solving the specific problem at the right price. + - Determine some timelines, + - Frequently used Ops need to be migrated first, + - Different models can be migrated in parallel. +- Improve the framework at the same time +- Accept imperfection, concentrate on solving the specific problem at the right price. --- # Control the migration quality -- compare the performance of migrated models with old ones. -- follow google C style -- build the automatic workflow of generating Python/C++ documentations - - the documentation of layers and ops should be written inside the code - - take the documentation quality into account when doing PR - - preview the documentations, read and improve them from users' perspective +- Compare the performance of migrated models with old ones. +- Follow the google C++ style +- Build the automatic workflow of generating Python/C++ documentations. + - The documentation of layers and ops should be written inside the code. + - Take the documentation quality into account when submitting pull requests. + - Preview the documentations, read and improve them from a user's perspective. From 88a8eedda17dead5471f4d9a64e291e49b522775 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 28 Sep 2017 14:36:38 -0700 Subject: [PATCH 057/342] scatter gather gpu gather scatter gpu --- paddle/operators/cond_op.cc | 6 +- paddle/operators/gather.cu.h | 84 ++++++++++++++++++ paddle/operators/gather.h | 35 ++++---- paddle/operators/gather_op.cc | 9 +- paddle/operators/gather_op.cu | 70 +++++++++++++++ paddle/operators/gather_op.h | 27 ++++-- paddle/operators/gather_test.cc | 2 +- paddle/operators/scatter.cu.h | 86 +++++++++++++++++++ paddle/operators/scatter.h | 45 ++++------ paddle/operators/scatter_op.cc | 7 +- paddle/operators/scatter_op.cu | 63 ++++++++++++++ paddle/operators/scatter_op.h | 12 ++- paddle/operators/scatter_test.cc | 2 +- .../v2/framework/tests/test_scatter_op.py | 4 +- 14 files changed, 375 insertions(+), 77 deletions(-) create mode 100644 paddle/operators/gather.cu.h create mode 100644 paddle/operators/gather_op.cu create mode 100644 paddle/operators/scatter.cu.h create mode 100644 paddle/operators/scatter_op.cu diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index aaffa6661f..157656786a 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -169,8 +169,8 @@ void CondOp::Run(const Scope& scope, tensor_child->Resize(dim); tensor_child->mutable_data(dim, platform::CPUPlace()); - Gather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], - tensor_child); + CPUTGather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], + tensor_child); } } @@ -194,7 +194,7 @@ void CondOp::Run(const Scope& scope, PADDLE_ENFORCE_NOT_NULL(v); LoDTensor* tensor_child = v->GetMutable(); - ScatterUpdate(dev_ctx.GetPlace(), tensor_child, &index_tensors[i], + ScatterAssign(dev_ctx.GetPlace(), tensor_child, &index_tensors[i], tensor_parent); } } diff --git a/paddle/operators/gather.cu.h b/paddle/operators/gather.cu.h new file mode 100644 index 0000000000..c96071e295 --- /dev/null +++ b/paddle/operators/gather.cu.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/tensor.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; +using platform::Place; + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +template +__global__ void GatherCUDAKernel(const T* params, const int* indices, T* output, + size_t index_size, size_t slice_size) { + CUDA_1D_KERNEL_LOOP(i, index_size * slice_size) { + int indices_i = i / slice_size; + int slice_i = i - indices_i * slice_size; // offset inside the slice + int gather_i = indices[indices_i]; + int params_i = gather_i * slice_size + slice_i; + *(output + i) = *(params + params_i); + } +} + +// Implementation of GPU copy: +template +struct GPUGather { + void operator()(const T* src, const int* index, const int slice_size, + const int index_size, T* output) { + int block = 512; + int n = slice_size * index_size; + int grid = (n + block - 1) / block; + GatherCUDAKernel<<>>(src, index, output, index_size, + slice_size); + } +}; + +/** + * A thin wrapper on gpu tensor + * Return a new tensor from source tensor, gathered according to index + * input[src]: type-T source Tensor + * input[index]: type-int index Tensor (1-D) + * return: output tensor + */ +template +void GPUTGather(const Place& place, const Tensor* src, const Tensor* index, + Tensor* output) { + PADDLE_ENFORCE(platform::is_gpu_place(place)); + // check index of shape 1-D + PADDLE_ENFORCE(index->dims().size() == 1); + int index_size = index->dims()[0]; + + auto src_dims = src->dims(); + framework::DDim output_dims(src_dims); + output_dims[0] = index_size; + + // slice size + int slice_size = 1; + for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + + // Gathering + GPUGather gather_functor; + gather_functor(src->data(), index->data(), slice_size, index_size, + output->data()); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/gather.h b/paddle/operators/gather.h index 92fb51ec17..a3db17bd3d 100644 --- a/paddle/operators/gather.h +++ b/paddle/operators/gather.h @@ -26,31 +26,31 @@ namespace operators { // Implementation of CPU copy template -void CPUGather(const T* src, const int* indices, const int slice_size, - const int index_size, T* output) { - const size_t slice_bytes = slice_size * sizeof(T); +struct CPUGather { + void operator()(const T* src, const int* indices, const int slice_size, + const int index_size, T* output) { + const size_t slice_bytes = slice_size * sizeof(T); - for (int i = 0; i < index_size; ++i) { - int index_ = indices[i]; - memcpy(output + i * slice_size, src + index_ * slice_size, slice_bytes); + for (int i = 0; i < index_size; ++i) { + int index_ = indices[i]; + memcpy(output + i * slice_size, src + index_ * slice_size, slice_bytes); + } } -} - -// Implementation of GPU copy: -template -void GPUGather(const T* src, const int* index, const int slice_size, - const int index_size, T* output); +}; /** + * A thin wrapper on cpu tensor * Return a new tensor from source tensor, gathered according to index * input[src]: type-T source Tensor * input[index]: type-int index Tensor (1-D) * return: output tensor */ template -void Gather(const platform::Place& place, const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void CPUTGather(const platform::Place& place, + const paddle::framework::Tensor* src, + const paddle::framework::Tensor* index, + paddle::framework::Tensor* output) { + PADDLE_ENFORCE(platform::is_cpu_place(place)); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); int index_size = index->dims()[0]; @@ -64,10 +64,9 @@ void Gather(const platform::Place& place, const paddle::framework::Tensor* src, for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; // Gathering - if (platform::is_cpu_place(place)) { - CPUGather(src->data(), index->data(), slice_size, index_size, + CPUGather gather_functor; + gather_functor(src->data(), index->data(), slice_size, index_size, output->data()); - } } } // namespace operators diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index da22bd0c52..fe305337cb 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -31,6 +31,8 @@ class GatherOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of GatherOp should not be null."); + auto index_dims = ctx->GetInputDim("Index"); + PADDLE_ENFORCE(index_dims.size() == 1); int batch_size = ctx->GetInputDim("Index")[0]; PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx->GetInputDim("X")); @@ -79,8 +81,5 @@ Out = X[Index] namespace ops = paddle::operators; REGISTER_OP(gather, ops::GatherOp, ops::GatherOpMaker, gather_grad, ops::GatherGradOp); -REGISTER_OP_CPU_KERNEL(gather, - ops::GatherOpKernel); -REGISTER_OP_CPU_KERNEL( - gather_grad, - ops::GatherGradientOpKernel); +REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel); +REGISTER_OP_CPU_KERNEL(gather_grad, ops::GatherGradientOpKernel); diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu new file mode 100644 index 0000000000..f3ed692666 --- /dev/null +++ b/paddle/operators/gather_op.cu @@ -0,0 +1,70 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "gather.cu.h" +#include "paddle/framework/eigen.h" +#include "paddle/operators/gather_op.h" +#include "scatter.cu.h" + +namespace paddle { +namespace operators { + +// template +__global__ void print_arr(const float *params, const int N) { + CUDA_1D_KERNEL_LOOP(i, N) { printf("device: %d, %f\n", i, params[i]); } +} + +template +class GatherOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto *x = ctx.Input("X"); + auto *index = ctx.Input("Index"); + auto *output = ctx.Output("Out"); + + output->mutable_data(ctx.GetPlace()); + + GPUTGather(ctx.GetPlace(), x, index, output); + } +}; + +template +class GatherGradOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + LOG(INFO) << "Gather grad here"; + auto *Index = ctx.Input("Index"); + auto *dX = ctx.Output(framework::GradVarName("X")); + auto *dO = ctx.Input(framework::GradVarName("Out")); + auto *x = ctx.Input("X"); + + dX->mutable_data(ctx.GetPlace()); + auto dxt = framework::EigenVector::Flatten(*dX); + auto place = ctx.GetEigenDevice(); + dxt.device(place) = dxt.constant(static_cast(0)); + + GPUTScatter(ctx.GetPlace(), dO, Index, dX); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(gather, ops::GatherOpCUDAKernel); +REGISTER_OP_GPU_KERNEL(gather_grad, ops::GatherGradOpCUDAKernel); diff --git a/paddle/operators/gather_op.h b/paddle/operators/gather_op.h index 073e566e8f..b80a4ab370 100644 --- a/paddle/operators/gather_op.h +++ b/paddle/operators/gather_op.h @@ -23,29 +23,40 @@ namespace operators { using Tensor = framework::Tensor; -template +template class GatherOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - auto *X = ctx.Input("X"); - auto *Index = ctx.Input("Index"); - auto *Y = ctx.Output("Out"); + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + + auto *x = ctx.Input("X"); + auto *index = ctx.Input("Index"); + auto *output = ctx.Output("Out"); + + output->mutable_data(ctx.GetPlace()); - Y->mutable_data(ctx.GetPlace()); - Gather(ctx.GetPlace(), X, Index, Y); + CPUTGather(ctx.GetPlace(), x, index, output); } }; -template +template class GatherGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + auto *Index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); auto *dO = ctx.Input(framework::GradVarName("Out")); dX->mutable_data(ctx.GetPlace()); - ScatterUpdate(ctx.GetPlace(), dO, Index, dX); + auto dxt = framework::EigenVector::Flatten(*dX); + auto place = ctx.GetEigenDevice(); + dxt.device(place) = dxt.constant(static_cast(0)); + + ScatterAssign(ctx.GetPlace(), dO, Index, dX); } }; diff --git a/paddle/operators/gather_test.cc b/paddle/operators/gather_test.cc index 0ae1e99452..ea06ae2847 100644 --- a/paddle/operators/gather_test.cc +++ b/paddle/operators/gather_test.cc @@ -41,7 +41,7 @@ TEST(Gather, GatherData) { int* p_output = output->mutable_data(make_ddim({2, 4}), CPUPlace()); - Gather(CPUPlace(), src, index, output); + CPUTGather(CPUPlace(), src, index, output); for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); diff --git a/paddle/operators/scatter.cu.h b/paddle/operators/scatter.cu.h new file mode 100644 index 0000000000..82e5040305 --- /dev/null +++ b/paddle/operators/scatter.cu.h @@ -0,0 +1,86 @@ +/* Copyright (c) 2016 PaddlePaddle Authors All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/tensor.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace operators { + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +template +__global__ void ScatterCUDAKernel(const T* params, const int* indices, + T* output, size_t index_size, + size_t slice_size) { + CUDA_1D_KERNEL_LOOP(i, index_size * slice_size) { + int indices_i = i / slice_size; + int slice_i = i - indices_i * slice_size; // offset inside the slice + int scatter_i = indices[indices_i]; + int out_i = scatter_i * slice_size + slice_i; + *(output + out_i) = *(params + i); + } +} + +// Implementation of GPU copy: +template +struct GPUScatterAssign { + void operator()(const T* src, const int* index, const int slice_size, + const int index_size, T* output) { + int block = 512; + int n = slice_size * index_size; + int grid = (n + block - 1) / block; + // printf("grid, block: %d %d\n", grid, block); + ScatterCUDAKernel<<>>(src, index, output, index_size, + slice_size); + } +}; + +/** + * A thin wrapper on gpu tensor + * Return a new updated tensor from source tensor, scatter-assigned according to + * index + * input[src]: type-T source Tensor + * input[index]: type-int index Tensor (1-D) + * return: output tensor + */ +template +void GPUTScatter(const platform::Place& place, + const paddle::framework::Tensor* src, + const paddle::framework::Tensor* index, + paddle::framework::Tensor* output) { + PADDLE_ENFORCE(platform::is_gpu_place(place)); + // check index of shape 1-D + PADDLE_ENFORCE(index->dims().size() == 1); + int index_size = index->dims()[0]; + + auto src_dims = src->dims(); + framework::DDim output_dims(src_dims); + output_dims[0] = index_size; + + // slice size + int slice_size = 1; + for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + + // Scatter Assign + GPUScatterAssign scatter_functor; + scatter_functor(src->data(), index->data(), slice_size, index_size, + output->data()); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h index 6b542675c2..670204b4dd 100644 --- a/paddle/operators/scatter.h +++ b/paddle/operators/scatter.h @@ -24,49 +24,33 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; // Implementation of CPU copy template -void CPUScatterUpdate(const paddle::framework::Tensor* src, const int* index, - const size_t index_size, - paddle::framework::Tensor* output) { - paddle::framework::DDim output_dims = output->dims(); +void CPUScatterAssign(const T* src, const int* index, const int slice_size, + const int index_size, T* output) { + // paddle::framework::DDim output_dims = output->dims(); + const size_t slice_bytes = slice_size * sizeof(T); - for (size_t i = 0; i < index_size; ++i) { + for (int i = 0; i < index_size; ++i) { int index_ = index[i]; - - paddle::framework::Tensor src_ = *src; - paddle::framework::Tensor output_ = *output; - if (index_size > 1) src_ = src->Slice(i, i + 1); - if (output_dims[0] > 1) output_ = output->Slice(index_, index_ + 1); - - auto X = EigenVector::Flatten(src_); - auto Y = EigenVector::Flatten(output_); - - Y = X + Y; + memcpy(output + index_ * slice_size, src + i * slice_size, slice_bytes); } } -// Implementation of GPU scatter: -template -void GPUScatterUpdate(const T* src, const int* index, const int slice_size, - const int index_size, T* output); - /** * Return a updated tensor from source tensor, scattered according to index: - * dst[i] += src[index[i]] + * dst[i] = src[index[i]] * input[src]: type-T source Tensor * input[index]: type-int index Tensor (1-D) * return: output tensor */ template -void ScatterUpdate(const platform::Place& place, +void ScatterAssign(const platform::Place& place, const paddle::framework::Tensor* src, const paddle::framework::Tensor* index, paddle::framework::Tensor* output) { + PADDLE_ENFORCE(platform::is_cpu_place(place)); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); int index_size = index->dims()[0]; @@ -74,18 +58,19 @@ void ScatterUpdate(const platform::Place& place, auto src_dims = src->dims(); auto dst_dims = output->dims(); + const T* p_src = src->data(); + const int* p_index = index->data(); + T* p_output = output->data(); + // check src shape and dst shape should match for (int i = 1; i < src_dims.size(); i++) PADDLE_ENFORCE(src_dims[i] == dst_dims[i]); // slice size size_t slice_size = 1; - for (int i = 0; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - if (platform::is_cpu_place(place)) { - CPUScatterUpdate(src, index->data(), index_size, output); - } else { - } + CPUScatterAssign(p_src, p_index, slice_size, index_size, p_output); } } // namespace operators diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index cadd8841b6..d15ba15153 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -97,8 +97,5 @@ Out[Index] = Ref[Index] + Updates namespace ops = paddle::operators; REGISTER_OP(scatter, ops::ScatterOp, ops::ScatterOpMaker, scatter_grad, ops::ScatterGradOp); -REGISTER_OP_CPU_KERNEL(scatter, - ops::ScatterOpKernel); -REGISTER_OP_CPU_KERNEL( - scatter_grad, - ops::ScatterGradientOpKernel); +REGISTER_OP_CPU_KERNEL(scatter, ops::ScatterOpKernel); +REGISTER_OP_CPU_KERNEL(scatter_grad, ops::ScatterGradientOpKernel); diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu new file mode 100644 index 0000000000..e27a926c6a --- /dev/null +++ b/paddle/operators/scatter_op.cu @@ -0,0 +1,63 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "gather.cu.h" +#include "paddle/operators/gather_op.h" +#include "scatter.cu.h" + +namespace paddle { +namespace operators { + +template +class ScatterOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto *Ref = ctx.Input("Ref"); + auto *Index = ctx.Input("Index"); + auto *Updates = ctx.Input("Updates"); + auto *Out = ctx.Output("Out"); + + Out->ShareDataWith(*Ref); + + GPUTScatter(ctx.GetPlace(), Updates, Index, Out); + } +}; + +template +class ScatterGradOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "This kernel only runs on GPU device."); + auto *dRef = ctx.Output(framework::GradVarName("Ref")); + auto *dUpdates = ctx.Output(framework::GradVarName("Updates")); + auto *Index = ctx.Input("Index"); + auto *dOut = ctx.Input(framework::GradVarName("Out")); + + // In place gradient: dRef = dO + dRef->ShareDataWith(*dOut); + dUpdates->mutable_data(ctx.GetPlace()); + // Gradient by Gather: dUpdates = dO[Index] + GPUTGather(ctx.GetPlace(), dOut, Index, dUpdates); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(scatter, ops::ScatterOpCUDAKernel); +REGISTER_OP_GPU_KERNEL(scatter_grad, ops::ScatterGradOpCUDAKernel); diff --git a/paddle/operators/scatter_op.h b/paddle/operators/scatter_op.h index a8eb54399a..74b2718f43 100644 --- a/paddle/operators/scatter_op.h +++ b/paddle/operators/scatter_op.h @@ -23,10 +23,12 @@ namespace operators { using Tensor = framework::Tensor; -template +template class ScatterOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); auto *Ref = ctx.Input("Ref"); auto *Index = ctx.Input("Index"); auto *Updates = ctx.Input("Updates"); @@ -35,14 +37,16 @@ class ScatterOpKernel : public framework::OpKernel { // In place output: Out = Ref, Out[Index] += Updates Out->ShareDataWith(*Ref); // Apply ScatterUpdate: Out[index] += Updates[:] - ScatterUpdate(ctx.GetPlace(), Updates, Index, Out); + ScatterAssign(ctx.GetPlace(), Updates, Index, Out); } }; -template +template class ScatterGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); auto *dRef = ctx.Output(framework::GradVarName("Ref")); auto *dUpdates = ctx.Output(framework::GradVarName("Updates")); auto *Index = ctx.Input("Index"); @@ -52,7 +56,7 @@ class ScatterGradientOpKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates += dO[Index] - Gather(ctx.GetPlace(), dOut, Index, dUpdates); + CPUTGather(ctx.GetPlace(), dOut, Index, dUpdates); } }; diff --git a/paddle/operators/scatter_test.cc b/paddle/operators/scatter_test.cc index 26fdaff146..bace6419d0 100644 --- a/paddle/operators/scatter_test.cc +++ b/paddle/operators/scatter_test.cc @@ -40,7 +40,7 @@ TEST(scatter, ScatterUpdate) { float* p_output = output->mutable_data(make_ddim({4, 4}), CPUPlace()); - ScatterUpdate(CPUPlace(), src, index, output); + ScatterAssign(CPUPlace(), src, index, output); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0)); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], float(0)); diff --git a/python/paddle/v2/framework/tests/test_scatter_op.py b/python/paddle/v2/framework/tests/test_scatter_op.py index 33c73c5263..1032269d5d 100644 --- a/python/paddle/v2/framework/tests/test_scatter_op.py +++ b/python/paddle/v2/framework/tests/test_scatter_op.py @@ -10,7 +10,7 @@ class TestScatterOp(OpTest): index_np = np.array([1, 2]).astype("int32") updates_np = np.random.random((2, 3)).astype("float32") output_np = np.copy(ref_np) - output_np[index_np] += updates_np + output_np[index_np] = updates_np self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.outputs = {'Out': output_np} @@ -18,7 +18,7 @@ class TestScatterOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['Updates', 'Ref'], 'Out', in_place=True) + self.check_grad(['Updates'], 'Out', in_place=True) if __name__ == "__main__": From 757c76b83f3701c29efc88c546ec90a18952f98a Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Thu, 28 Sep 2017 15:07:17 -0700 Subject: [PATCH 058/342] update according to comments --- doc/design/refactor/session.md | 74 +++++++++++++++++++++------------- 1 file changed, 47 insertions(+), 27 deletions(-) diff --git a/doc/design/refactor/session.md b/doc/design/refactor/session.md index 5f58148f01..9a7451ece5 100644 --- a/doc/design/refactor/session.md +++ b/doc/design/refactor/session.md @@ -5,17 +5,17 @@ The *session* object encapsulates the environment in which the computation graph is executed. -We will have *local* session and *remote* session, they offer the +We will have the *local* session and *remote* session, they offer the same [interface](#interface). The local session encapsulates the local runtime environment and the remote session encapsulates the cluster -runtime envrionment. +runtime environment. -The local runtime envrionment contains: +The local runtime environment contains: 1. computation devices (i.e., CPU, GPU) handles, and 1. the [scope](../scope.md) which holds all variables. -The remote runtime envrionment contains: +The remote runtime environment contains: 1. computation devices (i.e., CPU and GPU on node 0, 1) in a cluster, and @@ -29,12 +29,12 @@ remote computation resource in a cluster from his local computer. ## Background -The current design has an implicit global session on which +The current design has an implicit global session in which `paddle.eval()` is executed. The pain point is: Since the user is not able to explicitly switch between runtime -environments such as the scope and the device contexts, the user -cannot run a topology in two independent environments. +environments, the user cannot run a topology in two independent +environments. For example, in reinforcement learning, the user may want to have a stale model for inference and a fresh model for training, and only @@ -49,12 +49,12 @@ We need the session object to address above issues. ## Session A session is an object that owns the runtime environment. All -computations are executed through `session.eval`. +computations are executed through `session.eval()`. ### Interface -``` +```python eval( targets, feed_dict=None, @@ -64,37 +64,57 @@ eval( Evaluates the target Operations or Variables in `targets`. - *targets*: the evaluation targets. Can be a single Operation or - Variable, or a list with the Operations or Variables as elements. + Variable, or a list with the Operations or Variables as + elements. The value returned by `eval()` has the same shape as the + `target` argument. + + The PaddlePaddle program is represented by + the [ProgramDesc](../design/program.md), `eval()` will infer the + ProgramDesc from the given targets and run the PaddlePaddle + program. Please + see + [this graph](./distributed_architecture.md#local-training-architecture) for + the detailed illustration for the local session + and + [this graph](./distributed_architecture.md#distributed-training-architecture) for + the detailed illustration for the remote session. + +- *feed_dict*: a dictionary that contains the tensors which override + the edges of the computation graph. - The value returned by `eval()` has the same shape as the `target` - argument. + feed_dict not only can provide the input data, it can override any + OP's input as well: - The computation graph is implicitly inferred from the targets. + ```python + a = pd.constant(1.0, name="a") + b = pd.constant(2.0) + c = pd.mul(a,b) + sess.eval(targets=c, feed_dict={"a":3.0}) # returns 6.0 + ``` -- *feed_dict*: a dictionary that contains the tensors which overrides - the edges of the computation graph. - -``` +```python close() ``` -Closes the session. Calling this method releases the scope. +Closes the session and releases the scope that the session owns. ### Create a Local Session -``` +```python session( - gpu_ids=None + devices=None ) ``` Creates a new session. One session owns one scope, so creating multiple sessions will create different scopes. -- *gpu_ids*: a single `int` or a list of `int` of the GPU IDs to be - used as the computation devices. If not specified, all avaiable GPUs - will be used. +- *devices*: a single `string` or a list of `string` of device names, + the corresponding devices will be the computation devices for + `eval()`. If not specified, all available devices (e.g., all GPUs) + will be used. The user doesn't need to specify the CPU device since + it will be always used. #### Example @@ -103,14 +123,14 @@ multiple sessions will create different scopes. a = paddle.constant(1.0) b = paddle.constant(2.0) c = a + b -sess = paddle.session(gpu_ids=[0,1]) +sess = paddle.session(devices=["gpu:0", "gpu:1", "fpga:0"]) sess.eval(c) sess.close() ``` ### Create a Remote Session -``` +```python create_cloud_job( name, num_trainer, @@ -125,7 +145,7 @@ create_cloud_job( Creates a Paddle Cloud job. Fails if the job name exists. -``` +```python get_cloud_job( name ) @@ -133,7 +153,7 @@ get_cloud_job( Gets a Paddle Cloud job. -``` +```python remote_session( job ) From b851515b16d179f35410836a17f855b9b6a9c268 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 28 Sep 2017 15:41:20 -0700 Subject: [PATCH 059/342] merge new op grammar --- paddle/operators/gather_op.cu | 9 ++------- paddle/operators/scatter_op.cu | 4 ++-- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu index f3ed692666..f7533cdd64 100644 --- a/paddle/operators/gather_op.cu +++ b/paddle/operators/gather_op.cu @@ -20,13 +20,8 @@ namespace paddle { namespace operators { -// template -__global__ void print_arr(const float *params, const int N) { - CUDA_1D_KERNEL_LOOP(i, N) { printf("device: %d, %f\n", i, params[i]); } -} - template -class GatherOpCUDAKernel : public framework::OpKernel { +class GatherOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -42,7 +37,7 @@ class GatherOpCUDAKernel : public framework::OpKernel { }; template -class GatherGradOpCUDAKernel : public framework::OpKernel { +class GatherGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu index e27a926c6a..89d23945e0 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/scatter_op.cu @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class ScatterOpCUDAKernel : public framework::OpKernel { +class ScatterOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), @@ -37,7 +37,7 @@ class ScatterOpCUDAKernel : public framework::OpKernel { }; template -class ScatterGradOpCUDAKernel : public framework::OpKernel { +class ScatterGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), From 708cff32b1f8b9cf6da2c1a4a5d8458fea6eea0a Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Thu, 28 Sep 2017 16:13:06 -0700 Subject: [PATCH 060/342] Few typos in refactorization file --- doc/design/refactorization.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/doc/design/refactorization.md b/doc/design/refactorization.md index a07675b3e0..715f209d3f 100644 --- a/doc/design/refactorization.md +++ b/doc/design/refactorization.md @@ -137,19 +137,18 @@ Compile Time -> IR -> Runtime * `Eigen::Tensor` contains basic math and element-wise functions. * Note that `Eigen::Tensor` has broadcast implementation. * Limit the number of `tensor.device(dev) = ` in your code. -* `thrust::tranform` and `std::transform`. - * `thrust` has the same API as C++ standard library. Using `transform`, one can quickly implement customized elementwise kernels. +* `thrust::transform` and `std::transform`. + * `thrust` has the same API as C++ standard library. Using `transform`, one can quickly implement customized element-wise kernels. * `thrust` also has more complex APIs, like `scan`, `reduce`, `reduce_by_key`. * Hand-writing `GPUKernel` and `CPU` code * Do not write in header (`.h`) files. CPU Kernel should be in cpp source (`.cc`) and GPU kernels should be in cuda (`.cu`) files. (GCC cannot compile GPU code.) --- # Operator Registration -## Why registration is necessary? +## Why is registration necessary? We need a method to build mappings between Op type names and Op classes. ## How is registration implemented? - Maintaining a map, whose key is the type name and the value is the corresponding Op constructor. --- @@ -170,7 +169,7 @@ Maintaining a map, whose key is the type name and the value is the corresponding # Related Concepts ### Op_Maker -It's constructor takes `proto` and `checker`. They are compeleted during Op_Maker's construction. ([ScaleOpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)) +It's constructor takes `proto` and `checker`. They are completed during Op_Maker's construction. ([ScaleOpMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/scale_op.cc#L37)) ### Register Macros ```cpp @@ -200,7 +199,7 @@ Make sure the registration process is executed and linked. --- # Backward Module (2/2) ### Build Backward Network -- **Input**: graph of forwarding operators +- **Input**: graph of forward operators - **Output**: graph of backward operators - **Corner cases in construction** - Shared Variables => insert an `Add` operator to combine gradients @@ -224,7 +223,7 @@ Make sure the registration process is executed and linked. --- # Block (in design) -## the difference with original RNNOp +## the difference between original RNNOp and Block - As an operator is more intuitive than `RNNOp`, - Offers a new interface `Eval(targets)` to deduce the minimal block to `Run`, - Fits the compile-time/ runtime separation design paradigm. From eea9bedd9b6d449fab174798b15cf70993483ffc Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 28 Sep 2017 16:32:34 -0700 Subject: [PATCH 061/342] Add grad_op_desc_builder --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/grad_op_builder.cc | 39 ++++++++++++++++++++++++ paddle/framework/grad_op_builder.h | 3 ++ paddle/framework/grad_op_builder_test.cc | 37 ++++++++++++++++++++++ paddle/framework/op_desc.cc | 11 +++++++ paddle/framework/op_desc.h | 6 ++++ 6 files changed, 97 insertions(+), 1 deletion(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 4aaa43d796..8a5d8532bb 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,7 +26,7 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) -cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator) +cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op) diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index b02a599a80..3661ce41be 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -54,5 +54,44 @@ OperatorBase* BuildGradOp(const OperatorBase* op) { return grad_info.Creator()(info.grad_op_type_, inputs, outputs, op->Attrs()); } +static void TransOpDescArg(const OpDescBind* src_op, const OpArgType& src_type, + bool is_grad, OpDescBind* dst_op, + const OpArgType& dst_type) { + PADDLE_ENFORCE(dst_op != nullptr, + "Protobuf desc of gradient op must be initialized first."); + const auto& proto = OpInfoMap::Instance().Get(src_op->Type()).Proto(); + const auto& src_arg_list = + src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); + for (const auto& arg : src_arg_list) { + if (arg.not_in_gradient() && !is_grad) continue; + const std::string src_name = arg.name(); + std::vector vars = src_type == OpArgType::IN + ? src_op->Input(src_name) + : src_op->Output(src_name); + if (is_grad) { + for (std::string& var : vars) { + var = GradVarName(var); + } + } + std::string dst_name = is_grad ? GradVarName(src_name) : src_name; + dst_type == OpArgType::IN ? dst_op->SetInput(dst_name, vars) + : dst_op->SetOutput(dst_name, vars); + } +} + +void CompleteGradOpDesc(const OpDescBind* forw_op, OpDescBind* grad_op) { + auto& info = OpInfoMap::Instance().Get(forw_op->Type()); + PADDLE_ENFORCE(info.HasGradientOp()); + + grad_op->SetType(info.grad_op_type_); + + TransOpDescArg(forw_op, OpArgType::IN, false, grad_op, OpArgType::IN); + TransOpDescArg(forw_op, OpArgType::OUT, false, grad_op, OpArgType::IN); + TransOpDescArg(forw_op, OpArgType::OUT, true, grad_op, OpArgType::IN); + TransOpDescArg(forw_op, OpArgType::IN, true, grad_op, OpArgType::OUT); + + grad_op->SetAttrMap(forw_op->GetAttrMap()); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/grad_op_builder.h b/paddle/framework/grad_op_builder.h index 998f8ebbb5..b601406061 100644 --- a/paddle/framework/grad_op_builder.h +++ b/paddle/framework/grad_op_builder.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include "paddle/framework/op_desc.h" #include "paddle/framework/operator.h" namespace paddle { @@ -21,5 +22,7 @@ namespace framework { OperatorBase* BuildGradOp(const OperatorBase* op); +void CompleteGradOpDesc(const OpDescBind* forw_op, OpDescBind* grad_op); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 9e3ca563c6..85184e02b6 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -120,3 +120,40 @@ TEST(GradOpBuilder, IOIgnoredInGradient) { std::vector( {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); } + +TEST(GradOpDescBuilder, MutiInOut) { + f::OpDescBind *forw_op = new f::OpDescBind(); + forw_op->SetType("mult_io"); + forw_op->SetInput("In1", {"in1"}); + forw_op->SetInput("In2_mult", {"in2_1", "in2_2", "in2_3"}); + forw_op->SetInput("In3", {"in3"}); + forw_op->SetOutput("Out1", {"out1"}); + forw_op->SetOutput("Out2_mult", {"out2_1", "out2_2"}); + + f::OpDescBind *grad_op = new f::OpDescBind(); + f::CompleteGradOpDesc(forw_op, grad_op); + + ASSERT_EQ(grad_op->InputNames().size(), 3UL + 2UL + 2UL); + EXPECT_EQ(grad_op->Input("In1"), std::vector({"in1"})); + EXPECT_EQ(grad_op->Input("In2_mult"), + std::vector({"in2_1", "in2_2", "in2_3"})); + EXPECT_EQ(grad_op->Input("In3"), std::vector({"in3"})); + EXPECT_EQ(grad_op->Input("Out1"), std::vector({"out1"})); + EXPECT_EQ(grad_op->Input("Out2_mult"), + std::vector({"out2_1", "out2_2"})); + EXPECT_EQ(grad_op->Input(f::GradVarName("Out1")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op->Input(f::GradVarName("Out2_mult")), + std::vector( + {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); + + ASSERT_EQ(grad_op->OutputNames().size(), 3UL); + EXPECT_EQ(grad_op->Output(f::GradVarName("In1")), + std::vector({f::GradVarName("in1")})); + EXPECT_EQ(grad_op->Output(f::GradVarName("In2_mult")), + std::vector({f::GradVarName("in2_1"), + f::GradVarName("in2_2"), + f::GradVarName("in2_3")})); + EXPECT_EQ(grad_op->Output(f::GradVarName("In3")), + std::vector({f::GradVarName("in3")})); +} diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 99b5a9c377..0c12c55dc0 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -89,6 +89,12 @@ void OpDescBind::SetAttr(const std::string &name, const Attribute &v) { need_update_ = true; } +void OpDescBind::SetAttrMap( + const std::unordered_map &attr_map) { + attrs_ = attr_map; + need_update_ = true; +} + Attribute OpDescBind::GetAttr(const std::string &name) const { auto it = attrs_.find(name); PADDLE_ENFORCE(it != attrs_.end(), "Attribute %s is not found", name); @@ -101,6 +107,11 @@ int OpDescBind::GetBlockAttr(const std::string &name) const { return boost::get(it->second)->idx(); } +const std::unordered_map &OpDescBind::GetAttrMap() + const { + return attrs_; +} + void OpDescBind::Sync() { if (need_update_) { this->op_desc_.mutable_inputs()->Clear(); diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index ffc8ac61ab..0cf7d13971 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -60,10 +60,16 @@ class OpDescBind { void SetBlockAttr(const std::string &name, BlockDescBind &block); + // Only be used in C++ + void SetAttrMap(const std::unordered_map &attr_map); + Attribute GetAttr(const std::string &name) const; int GetBlockAttr(const std::string &name) const; + // Only be used in C++ + const std::unordered_map &GetAttrMap() const; + private: struct SetAttrDescVisitor : public boost::static_visitor { explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} From 6efcbc4fcb5dc2dcf85b877e9dabdc09fb1534f6 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 16:45:13 -0700 Subject: [PATCH 062/342] Fix bug in test_prelu and test_xe They were using float64 for FP32 kernel before. --- python/paddle/v2/framework/tests/test_cross_entropy_op.py | 2 +- python/paddle/v2/framework/tests/test_prelu_op.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index 1de514dff4..4ea14da7fd 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -80,7 +80,7 @@ class TestCrossEntropyOp3(OpTest): cross_entropy2 = (-label * np.log(X)).sum( axis=1, keepdims=True).astype("float32") - self.inputs = {"X": X, "Label": label} + self.inputs = {"X": X, "Label": label.astype(np.float32)} self.outputs = {"Y": cross_entropy} self.attrs = {"softLabel": True} diff --git a/python/paddle/v2/framework/tests/test_prelu_op.py b/python/paddle/v2/framework/tests/test_prelu_op.py index 676fd9f7c5..7be932ac8f 100644 --- a/python/paddle/v2/framework/tests/test_prelu_op.py +++ b/python/paddle/v2/framework/tests/test_prelu_op.py @@ -17,7 +17,7 @@ class PReluTest(OpTest): x_np_sign = np.sign(x_np) x_np = x_np_sign * np.maximum(x_np, .005) - alpha_np = np.array([.1]) + alpha_np = np.array([.1], dtype="float32") self.inputs = {'X': x_np, 'Alpha': alpha_np} out_np = np.maximum(self.inputs['X'], 0.) out_np = out_np + np.minimum(self.inputs['X'], From 099b2c19e1c549376b1dba0de1349f03bb9bb659 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 28 Sep 2017 16:45:14 -0700 Subject: [PATCH 063/342] Add unit tests --- paddle/framework/grad_op_builder_test.cc | 42 ++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 85184e02b6..d09892f81b 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -133,6 +133,7 @@ TEST(GradOpDescBuilder, MutiInOut) { f::OpDescBind *grad_op = new f::OpDescBind(); f::CompleteGradOpDesc(forw_op, grad_op); + EXPECT_EQ(grad_op->Type(), "mult_io_grad"); ASSERT_EQ(grad_op->InputNames().size(), 3UL + 2UL + 2UL); EXPECT_EQ(grad_op->Input("In1"), std::vector({"in1"})); EXPECT_EQ(grad_op->Input("In2_mult"), @@ -156,4 +157,45 @@ TEST(GradOpDescBuilder, MutiInOut) { f::GradVarName("in2_3")})); EXPECT_EQ(grad_op->Output(f::GradVarName("In3")), std::vector({f::GradVarName("in3")})); + delete forw_op; + delete grad_op; } + +TEST(GradOpDescBuilder, IOIgnoredInGradient) { + f::OpDescBind *forw_op = new f::OpDescBind(); + forw_op->SetType("io_ignored"); + forw_op->SetInput("In1", {"in1"}); + forw_op->SetInput("In2_mult", {"in2_1", "in2_2"}); + forw_op->SetInput("In3_mult", {"in3_1", "in3_2"}); + forw_op->SetOutput("Out1_mult", {"out1_1", "out1_2"}); + forw_op->SetOutput("Out2", {"out2"}); + + f::OpDescBind *grad_op = new f::OpDescBind(); + f::CompleteGradOpDesc(forw_op, grad_op); + + EXPECT_EQ(grad_op->Type(), "io_ignored_grad"); + // 'In2' and 'Out2' are ignored in gradient calculating + ASSERT_EQ(grad_op->InputNames().size(), 2UL + 1UL + 2UL); + EXPECT_EQ(grad_op->Input("In1"), std::vector({"in1"})); + EXPECT_EQ(grad_op->Input("In3_mult"), + std::vector({"in3_1", "in3_2"})); + EXPECT_EQ(grad_op->Input("Out1_mult"), + std::vector({"out1_1", "out1_2"})); + EXPECT_EQ(grad_op->Input(f::GradVarName("Out1_mult")), + std::vector( + {f::GradVarName("out1_1"), f::GradVarName("out1_2")})); + EXPECT_EQ(grad_op->Input(f::GradVarName("Out2")), + std::vector({f::GradVarName("out2")})); + + ASSERT_EQ(grad_op->OutputNames().size(), 3UL); + EXPECT_EQ(grad_op->Output(f::GradVarName("In1")), + std::vector({f::GradVarName("in1")})); + EXPECT_EQ(grad_op->Output(f::GradVarName("In2_mult")), + std::vector( + {f::GradVarName("in2_1"), f::GradVarName("in2_2")})); + EXPECT_EQ(grad_op->Output(f::GradVarName("In3_mult")), + std::vector( + {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); + delete forw_op; + delete grad_op; +} \ No newline at end of file From 337b7ebe7787a522f679344509523f02047c67aa Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 16:56:51 -0700 Subject: [PATCH 064/342] Unify Activation functions and simplify register code --- paddle/operators/activation_op.cc | 83 ++------ paddle/operators/activation_op.cu | 94 +-------- paddle/operators/activation_op.h | 324 +++++++++++++++--------------- 3 files changed, 190 insertions(+), 311 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index f77e1c572e..33826fc206 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -195,111 +195,54 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { } // namespace paddle namespace ops = paddle::operators; + REGISTER_OP(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker, sigmoid_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(sigmoid, - ops::ActivationKernel>); -REGISTER_OP_CPU_KERNEL( - sigmoid_grad, ops::ActivationGradKernel>); REGISTER_OP(exp, ops::ActivationOp, ops::ExpOpMaker, exp_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL( - exp, - ops::ActivationKernel); -REGISTER_OP_CPU_KERNEL(exp_grad, - ops::ActivationGradKernel); REGISTER_OP(relu, ops::ActivationOp, ops::ReluOpMaker, relu_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(relu, - ops::ActivationKernel>); -REGISTER_OP_CPU_KERNEL( - relu_grad, ops::ActivationGradKernel>); REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL( - tanh, - ops::ActivationKernel); -REGISTER_OP_CPU_KERNEL( - tanh_grad, ops::ActivationGradKernel>); REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL( - sqrt, - ops::ActivationKernel); -REGISTER_OP_CPU_KERNEL( - sqrt_grad, ops::ActivationGradKernel>); REGISTER_OP(abs, ops::ActivationOp, ops::AbsOpMaker, abs_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL( - abs, - ops::ActivationKernel); -REGISTER_OP_CPU_KERNEL(abs_grad, - ops::ActivationGradKernel); REGISTER_OP(reciprocal, ops::ActivationOp, ops::ReciprocalOpMaker, reciprocal_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(reciprocal, - ops::ActivationKernel>); -REGISTER_OP_CPU_KERNEL( - reciprocal_grad, - ops::ActivationGradKernel>); REGISTER_OP(log, ops::ActivationOp, ops::LogOpMaker, log_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL( - log, - ops::ActivationKernel); -REGISTER_OP_CPU_KERNEL( - log_grad, ops::ActivationGradKernel>); REGISTER_OP(square, ops::ActivationOp, ops::SquareOpMaker, square_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(square, - ops::ActivationKernel); -REGISTER_OP_CPU_KERNEL( - square_grad, ops::ActivationGradKernel>); REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker, brelu_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(brelu, - ops::BReluKernel); -REGISTER_OP_CPU_KERNEL(brelu_grad, - ops::BReluGradKernel); REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, soft_relu_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(soft_relu, - ops::SoftReluKernel); -REGISTER_OP_CPU_KERNEL( - soft_relu_grad, ops::SoftReluGradKernel); REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(pow, ops::PowKernel); -REGISTER_OP_CPU_KERNEL(pow_grad, - ops::PowGradKernel); REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad, ops::ActivationOpGrad); -REGISTER_OP_CPU_KERNEL(stanh, - ops::STanhKernel); -REGISTER_OP_CPU_KERNEL(stanh_grad, - ops::STanhGradKernel); + +#define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ + REGISTER_OP_CPU_KERNEL( \ + act_type, \ + paddle::operators::ActivationKernel>); \ + REGISTER_OP_CPU_KERNEL(act_type##_grad, \ + paddle::operators::ActivationGradKernel< \ + paddle::platform::CPUPlace, \ + paddle::operators::grad_functor>); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index feed1302b2..93e9f1c694 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -15,86 +15,14 @@ #define EIGEN_USE_GPU #include "paddle/operators/activation_op.h" -namespace ops = paddle::operators; - -REGISTER_OP_GPU_KERNEL(sigmoid, - ops::ActivationKernel>); -REGISTER_OP_GPU_KERNEL( - sigmoid_grad, ops::ActivationGradKernel>); - -REGISTER_OP_GPU_KERNEL( - exp, - ops::ActivationKernel); -REGISTER_OP_GPU_KERNEL(exp_grad, - ops::ActivationGradKernel); -REGISTER_OP_GPU_KERNEL(relu, - ops::ActivationKernel>); -REGISTER_OP_GPU_KERNEL( - relu_grad, ops::ActivationGradKernel>); - -REGISTER_OP_GPU_KERNEL( - tanh, - ops::ActivationKernel); -REGISTER_OP_GPU_KERNEL( - tanh_grad, ops::ActivationGradKernel>); - -REGISTER_OP_GPU_KERNEL( - sqrt, - ops::ActivationKernel); -REGISTER_OP_GPU_KERNEL( - sqrt_grad, ops::ActivationGradKernel>); - -REGISTER_OP_GPU_KERNEL( - abs, - ops::ActivationKernel); -REGISTER_OP_GPU_KERNEL(abs_grad, - ops::ActivationGradKernel); - -REGISTER_OP_GPU_KERNEL(reciprocal, - ops::ActivationKernel>); -REGISTER_OP_GPU_KERNEL( - reciprocal_grad, - ops::ActivationGradKernel>); - -REGISTER_OP_GPU_KERNEL( - log, - ops::ActivationKernel); -REGISTER_OP_GPU_KERNEL( - log_grad, ops::ActivationGradKernel>); - -REGISTER_OP_GPU_KERNEL(square, - ops::ActivationKernel); -REGISTER_OP_GPU_KERNEL( - square_grad, ops::ActivationGradKernel>); - -REGISTER_OP_GPU_KERNEL(brelu, - ops::BReluKernel); -REGISTER_OP_GPU_KERNEL(brelu_grad, - ops::BReluGradKernel); - -REGISTER_OP_GPU_KERNEL(soft_relu, - ops::SoftReluKernel); -REGISTER_OP_GPU_KERNEL( - soft_relu_grad, ops::SoftReluGradKernel); - -REGISTER_OP_GPU_KERNEL(pow, ops::PowKernel); -REGISTER_OP_GPU_KERNEL(pow_grad, - ops::PowGradKernel); - -REGISTER_OP_GPU_KERNEL(stanh, - ops::STanhKernel); -REGISTER_OP_GPU_KERNEL(stanh_grad, - ops::STanhGradKernel); +#define REGISTER_ACTIVATION_GPU_KERNEL(act_type, functor, grad_functor) \ + REGISTER_OP_GPU_KERNEL( \ + act_type, \ + paddle::operators::ActivationKernel>); \ + REGISTER_OP_GPU_KERNEL(act_type##_grad, \ + paddle::operators::ActivationGradKernel< \ + paddle::platform::GPUPlace, \ + paddle::operators::grad_functor>); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_GPU_KERNEL); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index e400992ae2..d25bb204ff 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -19,9 +19,12 @@ namespace paddle { namespace operators { -template -class ActivationKernel : public framework::OpKernel { +template +class ActivationKernel + : public framework::OpKernel { public: + using T = typename Functor::ELEMENT_TYPE; + void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); auto* Y = context.Output("Y"); @@ -31,13 +34,20 @@ class ActivationKernel : public framework::OpKernel { auto y = framework::EigenVector::Flatten(*Y); auto place = context.GetEigenDevice(); Functor functor; + + auto attrs = functor.GetAttrs(); + for (auto& attr : attrs) { + *attr.second = context.Attr(attr.first); + } functor(place, x, y); } }; -template -class ActivationGradKernel : public framework::OpKernel { +template +class ActivationGradKernel + : public framework::OpKernel { public: + using T = typename Functor::ELEMENT_TYPE; void Compute(const framework::ExecutionContext& context) const override { auto* X = context.Input("X"); auto* Y = context.Input("Y"); @@ -51,303 +61,301 @@ class ActivationGradKernel : public framework::OpKernel { auto dx = framework::EigenVector::Flatten(*dX); auto place = context.GetEigenDevice(); Functor functor; + auto attrs = functor.GetAttrs(); + for (auto& attr : attrs) { + *attr.second = context.Attr(attr.first); + } functor(place, x, y, dy, dx); } }; +template +struct BaseActivationFunctor { + using ELEMENT_TYPE = T; + + using AttrPair = std::vector>; + + AttrPair GetAttrs() { return AttrPair(); } +}; + // sigmoid(x) = 1 / (1 + exp(-x)) template -struct SigmoidFunctor { +struct SigmoidFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = static_cast(1) / (static_cast(1) + (-x).exp()); } }; template -struct SigmoidGradFunctor { +struct SigmoidGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * y * (static_cast(1) - y); } }; // exp(x) = e^x -struct ExpFunctor { +template +struct ExpFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = x.exp(); } }; -struct ExpGradFunctor { +template +struct ExpGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * y; } }; // relu(x) = max(x, 0) template -struct ReluFunctor { +struct ReluFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = x.cwiseMax(static_cast(0)); } }; template -struct ReluGradFunctor { +struct ReluGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * (x > static_cast(0)).template cast(); } }; // tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) -struct TanhFunctor { +template +struct TanhFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = x.tanh(); } }; template -struct TanhGradFunctor { +struct TanhGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * (static_cast(1) - y * y); } }; // sqrt(x) = x^(1/2) -struct SqrtFunctor { +template +struct SqrtFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = x.sqrt(); } }; template -struct SqrtGradFunctor { +struct SqrtGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { const Y y_conj = Eigen::numext::conj(y); dx.device(d) = static_cast(0.5) * dy / y_conj; } }; // abs(x) = |x| -struct AbsFunctor { +template +struct AbsFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = x.abs(); } }; -struct AbsGradFunctor { +template +struct AbsGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * x.sign(); } }; // reciprocal(x) = 1 / x template -struct ReciprocalFunctor { +struct ReciprocalFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = static_cast(1) / x; } }; template -struct ReciprocalGradFunctor { +struct ReciprocalGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * static_cast(-1) * y * y; } }; // log(x) = natural logarithm of x -struct LogFunctor { +template +struct LogFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = x.log(); } }; template -struct LogGradFunctor { +struct LogGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * (static_cast(1) / x); } }; // square(x) = x^2 -struct SquareFunctor { +template +struct SquareFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y) { + void operator()(Device d, X x, Y y) const { y.device(d) = x.square(); } }; template -struct SquareGradFunctor { +struct SquareGradFunctor : public BaseActivationFunctor { template - void operator()(Device d, X x, Y y, dY dy, dX dx) { + void operator()(Device d, X x, Y y, dY dy, dX dx) const { dx.device(d) = dy * static_cast(2) * x; } }; -template -class BReluKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* Y = context.Output("Y"); - auto t_min = static_cast(context.Attr("t_min")); - auto t_max = static_cast(context.Attr("t_max")); - Y->mutable_data(context.GetPlace()); +template +struct BReluFunctor : public BaseActivationFunctor { + float t_min; + float t_max; + + // NOTE: Explicit hides the `BaseActivationFunctor::GetAttrs` + // not polymorphism for speed. + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"t_min", &t_min}, {"t_max", &t_max}}; + } - auto x = framework::EigenVector::Flatten(*X); - auto y = framework::EigenVector::Flatten(*Y); - auto place = context.GetEigenDevice(); - y.device(place) = x.cwiseMax(t_min).cwiseMin(t_max); + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x.cwiseMax(t_min).cwiseMin(t_max); } }; -template -class BReluGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* dY = context.Input(framework::GradVarName("Y")); - auto* dX = context.Output(framework::GradVarName("X")); - auto t_min = static_cast(context.Attr("t_min")); - auto t_max = static_cast(context.Attr("t_max")); - dX->mutable_data(context.GetPlace()); - - auto dy = framework::EigenVector::Flatten(*dY); - auto x = framework::EigenVector::Flatten(*X); - auto dx = framework::EigenVector::Flatten(*dX); - auto place = context.GetEigenDevice(); - - dx.device(place) = dy * ((x > t_min) * (x < t_max)).template cast(); +template +struct BReluGradFunctor : public BaseActivationFunctor { + float t_min; + float t_max; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"t_min", &t_min}, {"t_max", &t_max}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + dx.device(d) = dy * ((x > t_min) * (x < t_max)).template cast(); } }; -template -class SoftReluKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* Y = context.Output("Y"); - auto threshold = static_cast(context.Attr("threshold")); - Y->mutable_data(context.GetPlace()); +template +struct SoftReluFunctor : public BaseActivationFunctor { + float threshold; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } - auto x = framework::EigenVector::Flatten(*X); - auto y = framework::EigenVector::Flatten(*Y); - auto place = context.GetEigenDevice(); - auto temp = x.cwiseMax(-threshold).cwiseMin(threshold).eval(); - y.device(place) = (static_cast(1) + temp.exp()).log(); + template + void operator()(Device d, X x, Y y) const { + auto temp = x.cwiseMax(-threshold).cwiseMin(threshold); + y.device(d) = (static_cast(1) + temp.exp()).log(); } }; -template -class SoftReluGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* Y = context.Input("Y"); - auto* dY = context.Input(framework::GradVarName("Y")); - auto* dX = context.Output(framework::GradVarName("X")); - auto threshold = static_cast(context.Attr("threshold")); - dX->mutable_data(context.GetPlace()); - - auto x = framework::EigenVector::Flatten(*X); - auto y = framework::EigenVector::Flatten(*Y); - auto dy = framework::EigenVector::Flatten(*dY); - auto dx = framework::EigenVector::Flatten(*dX); - auto place = context.GetEigenDevice(); +template +struct SoftReluGradFunctor : public BaseActivationFunctor { + float threshold; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { auto temp = ((x > -threshold) * (x < threshold)).template cast().eval(); - dx.device(place) = dy * (static_cast(1) - (-y).exp()) * temp; + dx.device(d) = dy * (static_cast(1) - (-y).exp()) * temp; } }; -template -class PowKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* Y = context.Output("Y"); - auto factor = static_cast(context.Attr("factor")); - Y->mutable_data(context.GetPlace()); - - auto x = framework::EigenVector::Flatten(*X); - auto y = framework::EigenVector::Flatten(*Y); - auto place = context.GetEigenDevice(); - y.device(place) = x.pow(factor); +template +struct PowFunctor : public BaseActivationFunctor { + float factor; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"factor", &factor}}; + } + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x.pow(factor); } }; -template -class PowGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* dY = context.Input(framework::GradVarName("Y")); - auto* dX = context.Output(framework::GradVarName("X")); - auto factor = static_cast(context.Attr("factor")); - dX->mutable_data(context.GetPlace()); - - auto dy = framework::EigenVector::Flatten(*dY); - auto x = framework::EigenVector::Flatten(*X); - auto dx = framework::EigenVector::Flatten(*dX); - auto place = context.GetEigenDevice(); - - dx.device(place) = dy * factor * x.pow(factor - static_cast(1)); +template +struct PowGradFunctor : public BaseActivationFunctor { + float factor; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"factor", &factor}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + dx.device(d) = dy * factor * x.pow(factor - static_cast(1)); } }; -template -class STanhKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* Y = context.Output("Y"); - auto scale_a = static_cast(context.Attr("scale_a")); - auto scale_b = static_cast(context.Attr("scale_b")); - Y->mutable_data(context.GetPlace()); +template +struct STanhFunctor : public BaseActivationFunctor { + float scale_a; + float scale_b; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; + } - auto x = framework::EigenVector::Flatten(*X); - auto y = framework::EigenVector::Flatten(*Y); - auto place = context.GetEigenDevice(); - y.device(place) = scale_b * (scale_a * x).tanh(); + template + void operator()(Device d, X x, Y y) const { + y.device(d) = scale_b * (scale_a * x).tanh(); } }; -template -class STanhGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* X = context.Input("X"); - auto* dY = context.Input(framework::GradVarName("Y")); - auto* dX = context.Output(framework::GradVarName("X")); - auto scale_a = static_cast(context.Attr("scale_a")); - auto scale_b = static_cast(context.Attr("scale_b")); - dX->mutable_data(context.GetPlace()); - - auto dy = framework::EigenVector::Flatten(*dY); - auto x = framework::EigenVector::Flatten(*X); - auto dx = framework::EigenVector::Flatten(*dX); - auto place = context.GetEigenDevice(); +template +struct STanhGradFunctor : public BaseActivationFunctor { + float scale_a; + float scale_b; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"scale_a", &scale_a}, {"scale_b", &scale_b}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { auto temp = (scale_a * x).tanh() * (scale_a * x).tanh(); - dx.device(place) = dy * scale_a * scale_b * (static_cast(1) - temp); + dx.device(d) = dy * scale_a * scale_b * (static_cast(1) - temp); } }; } // namespace operators } // namespace paddle + +#define FOR_EACH_KERNEL_FUNCTOR(__macro) \ + __macro(sigmoid, SigmoidFunctor, SigmoidGradFunctor); \ + __macro(exp, ExpFunctor, ExpGradFunctor); \ + __macro(relu, ReluFunctor, ReluGradFunctor); \ + __macro(tanh, TanhFunctor, TanhGradFunctor); \ + __macro(sqrt, SqrtFunctor, SqrtGradFunctor); \ + __macro(abs, AbsFunctor, AbsGradFunctor); \ + __macro(reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \ + __macro(log, LogFunctor, LogGradFunctor); \ + __macro(square, SquareFunctor, SquareGradFunctor); \ + __macro(brelu, BReluFunctor, BReluGradFunctor); \ + __macro(soft_relu, SoftReluFunctor, SoftReluGradFunctor); \ + __macro(pow, PowFunctor, PowGradFunctor); \ + __macro(stanh, STanhFunctor, STanhGradFunctor) From 78808b20911dd95e1a49495c99d814b59e3290c9 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 28 Sep 2017 17:27:37 -0700 Subject: [PATCH 065/342] 1 api --- paddle/operators/cond_op.cc | 4 ++-- paddle/operators/gather.cu.h | 30 ++++++++++---------------- paddle/operators/gather.h | 38 +++++++++++++-------------------- paddle/operators/gather_op.cu | 4 ++-- paddle/operators/gather_op.h | 2 +- paddle/operators/gather_test.cc | 2 +- paddle/operators/scatter.cu.h | 36 ++++++++++++------------------- paddle/operators/scatter.h | 20 ++++++----------- paddle/operators/scatter_op.cu | 4 ++-- paddle/operators/scatter_op.h | 2 +- 10 files changed, 55 insertions(+), 87 deletions(-) diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index 157656786a..983b5142b1 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -169,8 +169,8 @@ void CondOp::Run(const Scope& scope, tensor_child->Resize(dim); tensor_child->mutable_data(dim, platform::CPUPlace()); - CPUTGather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], - tensor_child); + CPUGather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], + tensor_child); } } diff --git a/paddle/operators/gather.cu.h b/paddle/operators/gather.cu.h index c96071e295..b400c10440 100644 --- a/paddle/operators/gather.cu.h +++ b/paddle/operators/gather.cu.h @@ -38,19 +38,6 @@ __global__ void GatherCUDAKernel(const T* params, const int* indices, T* output, } } -// Implementation of GPU copy: -template -struct GPUGather { - void operator()(const T* src, const int* index, const int slice_size, - const int index_size, T* output) { - int block = 512; - int n = slice_size * index_size; - int grid = (n + block - 1) / block; - GatherCUDAKernel<<>>(src, index, output, index_size, - slice_size); - } -}; - /** * A thin wrapper on gpu tensor * Return a new tensor from source tensor, gathered according to index @@ -59,8 +46,8 @@ struct GPUGather { * return: output tensor */ template -void GPUTGather(const Place& place, const Tensor* src, const Tensor* index, - Tensor* output) { +void GPUGather(const Place& place, const Tensor* src, const Tensor* index, + Tensor* output) { PADDLE_ENFORCE(platform::is_gpu_place(place)); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); @@ -74,10 +61,15 @@ void GPUTGather(const Place& place, const Tensor* src, const Tensor* index, int slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - // Gathering - GPUGather gather_functor; - gather_functor(src->data(), index->data(), slice_size, index_size, - output->data()); + const T* p_src = src->data(); + const int* p_index = index->data(); + T* p_output = output->data(); + + int block = 512; + int n = slice_size * index_size; + int grid = (n + block - 1) / block; + GatherCUDAKernel<<>>(p_src, p_index, p_output, index_size, + slice_size); } } // namespace operators diff --git a/paddle/operators/gather.h b/paddle/operators/gather.h index a3db17bd3d..cb635f6825 100644 --- a/paddle/operators/gather.h +++ b/paddle/operators/gather.h @@ -24,32 +24,18 @@ limitations under the License. */ namespace paddle { namespace operators { -// Implementation of CPU copy -template -struct CPUGather { - void operator()(const T* src, const int* indices, const int slice_size, - const int index_size, T* output) { - const size_t slice_bytes = slice_size * sizeof(T); - - for (int i = 0; i < index_size; ++i) { - int index_ = indices[i]; - memcpy(output + i * slice_size, src + index_ * slice_size, slice_bytes); - } - } -}; - /** - * A thin wrapper on cpu tensor + * A thin wrapper for gathering on cpu tensor * Return a new tensor from source tensor, gathered according to index * input[src]: type-T source Tensor * input[index]: type-int index Tensor (1-D) * return: output tensor */ template -void CPUTGather(const platform::Place& place, - const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void CPUGather(const platform::Place& place, + const paddle::framework::Tensor* src, + const paddle::framework::Tensor* index, + paddle::framework::Tensor* output) { PADDLE_ENFORCE(platform::is_cpu_place(place)); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); @@ -59,14 +45,20 @@ void CPUTGather(const platform::Place& place, framework::DDim output_dims(src_dims); output_dims[0] = index_size; + const T* p_src = src->data(); + const int* p_index = index->data(); + T* p_output = output->data(); + // slice size int slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - // Gathering - CPUGather gather_functor; - gather_functor(src->data(), index->data(), slice_size, index_size, - output->data()); + const size_t slice_bytes = slice_size * sizeof(T); + + for (int i = 0; i < index_size; ++i) { + int index_ = p_index[i]; + memcpy(p_output + i * slice_size, p_src + index_ * slice_size, slice_bytes); + } } } // namespace operators diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu index f7533cdd64..06004614b2 100644 --- a/paddle/operators/gather_op.cu +++ b/paddle/operators/gather_op.cu @@ -32,7 +32,7 @@ class GatherOpCUDAKernel : public framework::OpKernel { output->mutable_data(ctx.GetPlace()); - GPUTGather(ctx.GetPlace(), x, index, output); + GPUGather(ctx.GetPlace(), x, index, output); } }; @@ -53,7 +53,7 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { auto place = ctx.GetEigenDevice(); dxt.device(place) = dxt.constant(static_cast(0)); - GPUTScatter(ctx.GetPlace(), dO, Index, dX); + GPUScatterAssign(ctx.GetPlace(), dO, Index, dX); } }; diff --git a/paddle/operators/gather_op.h b/paddle/operators/gather_op.h index b80a4ab370..fb065b8da7 100644 --- a/paddle/operators/gather_op.h +++ b/paddle/operators/gather_op.h @@ -36,7 +36,7 @@ class GatherOpKernel : public framework::OpKernel { output->mutable_data(ctx.GetPlace()); - CPUTGather(ctx.GetPlace(), x, index, output); + CPUGather(ctx.GetPlace(), x, index, output); } }; diff --git a/paddle/operators/gather_test.cc b/paddle/operators/gather_test.cc index ea06ae2847..3c1d06ccd1 100644 --- a/paddle/operators/gather_test.cc +++ b/paddle/operators/gather_test.cc @@ -41,7 +41,7 @@ TEST(Gather, GatherData) { int* p_output = output->mutable_data(make_ddim({2, 4}), CPUPlace()); - CPUTGather(CPUPlace(), src, index, output); + CPUGather(CPUPlace(), src, index, output); for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); diff --git a/paddle/operators/scatter.cu.h b/paddle/operators/scatter.cu.h index 82e5040305..add4791a79 100644 --- a/paddle/operators/scatter.cu.h +++ b/paddle/operators/scatter.cu.h @@ -36,20 +36,6 @@ __global__ void ScatterCUDAKernel(const T* params, const int* indices, } } -// Implementation of GPU copy: -template -struct GPUScatterAssign { - void operator()(const T* src, const int* index, const int slice_size, - const int index_size, T* output) { - int block = 512; - int n = slice_size * index_size; - int grid = (n + block - 1) / block; - // printf("grid, block: %d %d\n", grid, block); - ScatterCUDAKernel<<>>(src, index, output, index_size, - slice_size); - } -}; - /** * A thin wrapper on gpu tensor * Return a new updated tensor from source tensor, scatter-assigned according to @@ -59,10 +45,10 @@ struct GPUScatterAssign { * return: output tensor */ template -void GPUTScatter(const platform::Place& place, - const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void GPUScatterAssign(const platform::Place& place, + const paddle::framework::Tensor* src, + const paddle::framework::Tensor* index, + paddle::framework::Tensor* output) { PADDLE_ENFORCE(platform::is_gpu_place(place)); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); @@ -76,10 +62,16 @@ void GPUTScatter(const platform::Place& place, int slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - // Scatter Assign - GPUScatterAssign scatter_functor; - scatter_functor(src->data(), index->data(), slice_size, index_size, - output->data()); + const T* p_src = src->data(); + const int* p_index = index->data(); + T* p_output = output->data(); + + int block = 512; + int n = slice_size * index_size; + int grid = (n + block - 1) / block; + + ScatterCUDAKernel<<>>(p_src, p_index, p_output, index_size, + slice_size); } } // namespace operators diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h index 670204b4dd..f895f22e28 100644 --- a/paddle/operators/scatter.h +++ b/paddle/operators/scatter.h @@ -25,19 +25,6 @@ namespace operators { using Tensor = framework::Tensor; -// Implementation of CPU copy -template -void CPUScatterAssign(const T* src, const int* index, const int slice_size, - const int index_size, T* output) { - // paddle::framework::DDim output_dims = output->dims(); - const size_t slice_bytes = slice_size * sizeof(T); - - for (int i = 0; i < index_size; ++i) { - int index_ = index[i]; - memcpy(output + index_ * slice_size, src + i * slice_size, slice_bytes); - } -} - /** * Return a updated tensor from source tensor, scattered according to index: * dst[i] = src[index[i]] @@ -70,7 +57,12 @@ void ScatterAssign(const platform::Place& place, size_t slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - CPUScatterAssign(p_src, p_index, slice_size, index_size, p_output); + const size_t slice_bytes = slice_size * sizeof(T); + + for (int i = 0; i < index_size; ++i) { + int index_ = p_index[i]; + memcpy(p_output + index_ * slice_size, p_src + i * slice_size, slice_bytes); + } } } // namespace operators diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu index 89d23945e0..831eabdae4 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/scatter_op.cu @@ -32,7 +32,7 @@ class ScatterOpCUDAKernel : public framework::OpKernel { Out->ShareDataWith(*Ref); - GPUTScatter(ctx.GetPlace(), Updates, Index, Out); + GPUScatterAssign(ctx.GetPlace(), Updates, Index, Out); } }; @@ -51,7 +51,7 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates = dO[Index] - GPUTGather(ctx.GetPlace(), dOut, Index, dUpdates); + GPUGather(ctx.GetPlace(), dOut, Index, dUpdates); } }; diff --git a/paddle/operators/scatter_op.h b/paddle/operators/scatter_op.h index 74b2718f43..771a1f2ddb 100644 --- a/paddle/operators/scatter_op.h +++ b/paddle/operators/scatter_op.h @@ -56,7 +56,7 @@ class ScatterGradientOpKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates += dO[Index] - CPUTGather(ctx.GetPlace(), dOut, Index, dUpdates); + CPUGather(ctx.GetPlace(), dOut, Index, dUpdates); } }; From 61cc3ae4d13a798f341ceb5b2240b92526b3f43f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 11:52:03 -0700 Subject: [PATCH 066/342] Stablize elementwise_mul by using double precision --- paddle/pybind/pybind.cc | 16 +++-- paddle/pybind/tensor_py.h | 15 ++++- python/paddle/v2/framework/tests/op_test.py | 60 +++++++++++++------ .../tests/test_elementwise_mul_op.py | 32 +++++----- 4 files changed, 78 insertions(+), 45 deletions(-) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index d85bf6c7fa..f4121e9d71 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -77,20 +77,18 @@ PYBIND11_PLUGIN(core) { }) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) + .def("set", PyCPUTensorSetFromArray) #ifndef PADDLE_ONLY_CPU .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDATensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) - .def("set_float_element", - [](Tensor &self, size_t offset, float f) { - // TODO(yuyang18): Only support GPU now. - self.data()[offset] = f; - }) - .def("get_float_element", [](Tensor &self, size_t offset) -> float { - // TODO(yuyang18): Only support GPU now. - return self.data()[offset]; - }); + .def("set_float_element", TensorSetElement) + .def("get_float_element", TensorGetElement) + .def("set_double_element", TensorSetElement) + .def("get_double_element", TensorGetElement) + .def("dtype", [](Tensor &self) { return ToDataType(self.type()); }); py::class_(m, "LoDTensor") .def_buffer( diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 10621e90ee..3e3e6bc031 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -73,10 +73,23 @@ struct CastToPyBufferImpl { }; } // namespace details inline py::buffer_info CastToPyBuffer(framework::Tensor &tensor) { - auto buffer_info = details::CastToPyBufferImpl()(tensor); + auto buffer_info = + details::CastToPyBufferImpl()(tensor); return buffer_info; } +template +T TensorGetElement(framework::Tensor &self, size_t offset) { + PADDLE_ENFORCE(platform::is_cpu_place(self.place())); + return self.data()[offset]; +} + +template +void TensorSetElement(framework::Tensor &self, size_t offset, T elem) { + PADDLE_ENFORCE(platform::is_cpu_place(self.place())); + self.data()[offset] = elem; +} + template void PyCPUTensorSetFromArray( framework::Tensor &self, diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 89979044f2..70ae50d401 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -69,24 +69,27 @@ def set_input(scope, op, inputs, place): def set_output_grad(scope, op, outputs, place): + def __set_tensor__(name): + out_tensor = scope.find_var(name).get_tensor() + grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() + out_dtype = out_tensor.dtype() + if out_dtype == core.DataType.FP64: + data = np.ones(out_tensor.shape(), dtype=np.float64) + elif out_dtype == core.DataType.FP32: + data = np.ones(out_tensor.shape(), dtype=np.float32) + else: + raise ValueError("Not supported data type " + str(out_dtype)) + + grad_tensor.set(data, place) + for out_name, out_dup in Operator.get_op_outputs(op.type()): if out_name in outputs: if out_dup: sub_out = outputs[out_name] for sub_out_name, _ in sub_out: - out_tensor = scope.find_var(sub_out_name).get_tensor() - grad_tensor = scope.new_var(grad_var_name( - sub_out_name)).get_tensor() - grad_tensor.set_dims(out_tensor.shape()) - data = np.ones(out_tensor.shape(), dtype=np.float32) - grad_tensor.set(data, place) + __set_tensor__(sub_out_name) else: - out_tensor = scope.find_var(out_name).get_tensor() - grad_tensor = scope.new_var(grad_var_name(out_name)).get_tensor( - ) - grad_tensor.set_dims(out_tensor.shape()) - data = np.ones(out_tensor.shape(), dtype=np.float32) - grad_tensor.set(data, place) + __set_tensor__(out_name) def get_numeric_gradient(scope, @@ -96,7 +99,6 @@ def get_numeric_gradient(scope, output_names, delta=0.005, in_place=False): - set_input(scope, op, inputs, core.CPUPlace()) tensor_to_check = scope.find_var(input_to_check).get_tensor() @@ -115,7 +117,29 @@ def get_numeric_gradient(scope, tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_size = product(tensor_to_check.get_dims()) - gradient_flat = np.zeros(shape=(tensor_size, ), dtype='float32') + tensor_to_check_dtype = tensor_to_check.dtype() + if tensor_to_check_dtype == core.DataType.FP32: + tensor_to_check_dtype = np.float32 + elif tensor_to_check_dtype == core.DataType.FP64: + tensor_to_check_dtype = np.float64 + else: + raise ValueError("Not supported data type " + str( + tensor_to_check_dtype)) + + gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype) + + def __get_elem__(tensor, i): + if tensor_to_check_dtype == np.float32: + return tensor.get_float_element(i) + else: + return tensor.get_double_element(i) + + def __set_elem__(tensor, i, e): + if tensor_to_check_dtype == np.float32: + tensor.set_float_element(i, e) + else: + tensor.set_double_element(i, e) + # we only compute gradient of one element each time. # we use a for loop to compute the gradient of every element. for i in xrange(tensor_size): @@ -123,20 +147,20 @@ def get_numeric_gradient(scope, set_input(scope, op, inputs, core.CPUPlace()) # get one input element throw it's index i. - origin = tensor_to_check.get_float_element(i) + origin = __get_elem__(tensor_to_check, i) # add delta to it, run op and then get the sum of the result tensor. x_pos = origin + delta - tensor_to_check.set_float_element(i, x_pos) + __set_elem__(tensor_to_check, i, x_pos) y_pos = get_output() if in_place: set_input(scope, op, inputs, core.CPUPlace()) x_neg = origin - delta - tensor_to_check.set_float_element(i, x_neg) + __set_elem__(tensor_to_check, i, x_neg) y_neg = get_output() - tensor_to_check.set_float_element(i, origin) + __set_elem__(tensor_to_check, i, origin) gradient_flat[i] = (y_pos - y_neg) / delta / 2 return gradient_flat.reshape(tensor_to_check.get_dims()) diff --git a/python/paddle/v2/framework/tests/test_elementwise_mul_op.py b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py index cee4385a81..261ca9cb3d 100644 --- a/python/paddle/v2/framework/tests/test_elementwise_mul_op.py +++ b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py @@ -7,8 +7,8 @@ class ElementwiseMulOp(OpTest): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") + 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float64") } self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} @@ -16,23 +16,21 @@ class ElementwiseMulOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + self.check_grad(['X', 'Y'], 'Out') def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + self.check_grad(['Y'], 'Out', no_grad_set=set("X")) def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + self.check_grad(['X'], 'Out', no_grad_set=set('Y')) class TestElementwiseMulOp_Vector(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.random((32, )).astype("float32"), - 'Y': np.random.random((32, )).astype("float32") + 'X': np.random.random((32, )).astype("float64"), + 'Y': np.random.random((32, )).astype("float64") } self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} @@ -41,8 +39,8 @@ class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(2).astype(np.float32) + 'X': np.random.rand(2, 3, 4).astype(np.float64), + 'Y': np.random.rand(2).astype(np.float64) } self.attrs = {'axis': 0} @@ -55,8 +53,8 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(3).astype(np.float32) + 'X': np.random.rand(2, 3, 4).astype(np.float64), + 'Y': np.random.rand(3).astype(np.float64) } self.attrs = {'axis': 1} @@ -69,8 +67,8 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float32), - 'Y': np.random.rand(4).astype(np.float32) + 'X': np.random.rand(2, 3, 4).astype(np.float64), + 'Y': np.random.rand(4).astype(np.float64) } self.outputs = { @@ -82,8 +80,8 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4, 5).astype(np.float32), - 'Y': np.random.rand(3, 4).astype(np.float32) + 'X': np.random.rand(2, 3, 4, 5).astype(np.float64), + 'Y': np.random.rand(3, 4).astype(np.float64) } self.attrs = {'axis': 1} From 54892c079735aaffafc7388486482e06ff139439 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 11:59:17 -0700 Subject: [PATCH 067/342] Simplify op_test --- python/paddle/v2/framework/tests/op_test.py | 42 +++++++++------------ 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 70ae50d401..23794151bd 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -12,17 +12,19 @@ def grad_var_name(var_name): def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() + def __create_var__(name, var_name): + scope.new_var(var_name) + kwargs[name].append(var_name) + for in_name, in_dup in Operator.get_op_inputs(op_type): if in_name in inputs: kwargs[in_name] = [] if in_dup: sub_in = inputs[in_name] for sub_in_name, _ in sub_in: - var = scope.new_var(sub_in_name) - kwargs[in_name].append(sub_in_name) + __create_var__(in_name, sub_in_name) else: - var = scope.new_var(in_name) - kwargs[in_name].append(in_name) + __create_var__(in_name, in_name) for out_name, out_dup in Operator.get_op_outputs(op_type): if out_name in outputs: @@ -30,11 +32,9 @@ def create_op(scope, op_type, inputs, outputs, attrs): if out_dup: sub_out = outputs[out_name] for sub_out_name, _ in sub_out: - var = scope.new_var(sub_out_name) - kwargs[out_name].append(sub_out_name) + __create_var__(out_name, sub_out_name) else: - var = scope.new_var(out_name) - kwargs[out_name].append(out_name) + __create_var__(out_name, out_name) for attr_name in Operator.get_op_attr_names(op_type): if attr_name in attrs: @@ -44,28 +44,22 @@ def create_op(scope, op_type, inputs, outputs, attrs): def set_input(scope, op, inputs, place): + def __set_input__(var_name, var): + tensor = scope.find_var(var_name).get_tensor() + if isinstance(var, tuple): + tensor.set_lod(var[1]) + var = var[0] + tensor.set_dims(var.shape) + tensor.set(var, place) + for in_name, in_dup in Operator.get_op_inputs(op.type()): if in_name in inputs: if in_dup: sub_in = inputs[in_name] for sub_in_name, sub_in_val in sub_in: - var = scope.find_var(sub_in_name) - tensor = var.get_tensor() - sub_in_array = sub_in_val[0] \ - if isinstance(sub_in_val, tuple) else sub_in_val - tensor.set_dims(sub_in_array.shape) - tensor.set(sub_in_array, place) - if isinstance(sub_in_val, tuple): - tensor.set_lod(sub_in_val[1]) + __set_input__(sub_in_name, sub_in_val) else: - var = scope.find_var(in_name) - tensor = var.get_tensor() - in_val = inputs[in_name] - in_array = in_val[0] if isinstance(in_val, tuple) else in_val - tensor.set_dims(in_array.shape) - tensor.set(in_array, place) - if isinstance(in_val, tuple): - tensor.set_lod(in_val[1]) + __set_input__(in_name, inputs[in_name]) def set_output_grad(scope, op, outputs, place): From 279178e457dfb12c15ddb4e51d8f75e75ad6db1f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 16:45:13 -0700 Subject: [PATCH 068/342] Fix bug in test_prelu and test_xe They were using float64 for FP32 kernel before. --- python/paddle/v2/framework/tests/test_cross_entropy_op.py | 2 +- python/paddle/v2/framework/tests/test_prelu_op.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index 1de514dff4..4ea14da7fd 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -80,7 +80,7 @@ class TestCrossEntropyOp3(OpTest): cross_entropy2 = (-label * np.log(X)).sum( axis=1, keepdims=True).astype("float32") - self.inputs = {"X": X, "Label": label} + self.inputs = {"X": X, "Label": label.astype(np.float32)} self.outputs = {"Y": cross_entropy} self.attrs = {"softLabel": True} diff --git a/python/paddle/v2/framework/tests/test_prelu_op.py b/python/paddle/v2/framework/tests/test_prelu_op.py index 676fd9f7c5..7be932ac8f 100644 --- a/python/paddle/v2/framework/tests/test_prelu_op.py +++ b/python/paddle/v2/framework/tests/test_prelu_op.py @@ -17,7 +17,7 @@ class PReluTest(OpTest): x_np_sign = np.sign(x_np) x_np = x_np_sign * np.maximum(x_np, .005) - alpha_np = np.array([.1]) + alpha_np = np.array([.1], dtype="float32") self.inputs = {'X': x_np, 'Alpha': alpha_np} out_np = np.maximum(self.inputs['X'], 0.) out_np = out_np + np.minimum(self.inputs['X'], From 7a6fcc7d30ab8dd8f452a9974e16798dbbe05dfe Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 28 Sep 2017 17:39:50 -0700 Subject: [PATCH 069/342] move EigenDeviceConverter to device_context.h --- paddle/framework/operator.cc | 4 ++-- paddle/framework/operator.h | 19 ++----------------- paddle/platform/device_context.cc | 15 ++++++++------- paddle/platform/device_context.h | 19 +++++++++++++++++-- 4 files changed, 29 insertions(+), 28 deletions(-) diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index d7beff5bc1..8b5560ffa1 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -22,14 +22,14 @@ namespace framework { template <> Eigen::DefaultDevice& ExecutionContext::GetEigenDevice< platform::CPUPlace, Eigen::DefaultDevice>() const { - return *device_context_.get_eigen_device(); + return *device_context_.GetEigenDevice(); } #ifndef PADDLE_ONLY_CPU template <> Eigen::GpuDevice& ExecutionContext::GetEigenDevice() const { - return *device_context_.get_eigen_device(); + return *device_context_.GetEigenDevice(); } #endif diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index ba697a43e9..310d68d7c1 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -296,21 +296,6 @@ template <> std::vector InferShapeContext::MultiOutput( const std::string& name) const; -template -struct EigenDeviceConverter; - -template <> -struct EigenDeviceConverter { - using EigenDeviceType = Eigen::DefaultDevice; -}; - -#ifndef PADDLE_ONLY_CPU -template <> -struct EigenDeviceConverter { - using EigenDeviceType = Eigen::GpuDevice; -}; -#endif - class ExecutionContext : public InferShapeContext { public: ExecutionContext(const OperatorBase& op, const Scope& scope, @@ -318,8 +303,8 @@ class ExecutionContext : public InferShapeContext { : InferShapeContext(op, scope), device_context_(device_context) {} template ::EigenDeviceType> + typename DeviceType = typename platform::EigenDeviceConverter< + PlaceType>::EigenDeviceType> DeviceType& GetEigenDevice() const; platform::Place GetPlace() const { return device_context_.GetPlace(); } diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index 93b472b41c..36af1ac677 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -16,8 +16,8 @@ namespace paddle { namespace platform { template <> -Eigen::DefaultDevice* DeviceContext::get_eigen_device() - const { +Eigen::DefaultDevice* DeviceContext::GetEigenDevice< + platform::CPUPlace, Eigen::DefaultDevice>() const { return reinterpret_cast(this)->eigen_device(); } @@ -37,6 +37,12 @@ Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } #ifndef PADDLE_ONLY_CPU +template <> +Eigen::GpuDevice* +DeviceContext::GetEigenDevice() const { + return reinterpret_cast(this)->eigen_device(); +} + class EigenCudaStreamDevice : public Eigen::StreamInterface { public: EigenCudaStreamDevice() : scratch_(nullptr), semaphore_(nullptr) { @@ -90,11 +96,6 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { mutable unsigned int* semaphore_; }; -template <> -Eigen::GpuDevice* DeviceContext::get_eigen_device() const { - return reinterpret_cast(this)->eigen_device(); -} - CUDADeviceContext::CUDADeviceContext(GPUPlace place) : place_(place) { SetDeviceId(place_.device); PADDLE_ENFORCE(cudaStreamCreate(&stream_)); diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index f6a39a8e26..d805d2ab08 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -27,13 +27,23 @@ limitations under the License. */ namespace paddle { namespace platform { +template +struct EigenDeviceConverter; + +template <> +struct EigenDeviceConverter { + using EigenDeviceType = Eigen::DefaultDevice; +}; + class DeviceContext { public: virtual ~DeviceContext() {} virtual Place GetPlace() const = 0; - template - DeviceType* get_eigen_device() const; + template ::EigenDeviceType> + DeviceType* GetEigenDevice() const; virtual void Wait() const {} }; @@ -52,6 +62,11 @@ class CPUDeviceContext : public DeviceContext { }; #ifndef PADDLE_ONLY_CPU +template <> +struct EigenDeviceConverter { + using EigenDeviceType = Eigen::GpuDevice; +}; + class EigenCudaStreamDevice; class CUDADeviceContext : public DeviceContext { From c634a8480addf2e3cbbd271853f4c8aa4b10832b Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 28 Sep 2017 17:53:50 -0700 Subject: [PATCH 070/342] add SetConstant method in math_function.h --- paddle/operators/math/CMakeLists.txt | 3 ++- paddle/operators/math/math_function.h | 8 ++++++++ paddle/operators/math/math_function_test.cc | 21 +++++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 91ae3d49f1..6bea9817f1 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,6 +1,7 @@ if(WITH_GPU) nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu DEPS cblas device_context operator) + nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) nv_library(softmax_function SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy_function SRCS cross_entropy.cc cross_entropy.cu @@ -8,9 +9,9 @@ if(WITH_GPU) else() cc_library(math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator) + cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_library(softmax_function SRCS softmax.cc DEPS operator) cc_library(cross_entropy_function SRCS cross_entropy.cc DEPS operator) endif() -nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/math_function.h b/paddle/operators/math/math_function.h index 43306fca73..473eff4d19 100644 --- a/paddle/operators/math/math_function.h +++ b/paddle/operators/math/math_function.h @@ -52,6 +52,7 @@ int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, #include +#include "paddle/framework/eigen.h" #include "paddle/framework/tensor.h" #include "paddle/platform/device_context.h" #include "paddle/platform/enforce.h" @@ -84,6 +85,13 @@ void matmul(const platform::DeviceContext& context, const framework::Tensor& matrix_b, bool trans_b, T alpha, framework::Tensor* matrix_out, T beta); +template +void SetConstant(const platform::DeviceContext& context, + framework::Tensor* tensor, T num) { + auto t = framework::EigenVector::Flatten(*tensor); + t.device(*context.GetEigenDevice()) = t.constant(static_cast(num)); +} + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index f272f7e513..22468a0c4a 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -243,3 +243,24 @@ TEST(math_function, gemm_trans_clbas) { EXPECT_EQ(input3_ptr[6], 86); EXPECT_EQ(input3_ptr[7], 99); } + +TEST(math_function, zero) { + paddle::framework::Tensor tensor; + auto* cpu_place = new paddle::platform::CPUPlace(); + float* t = tensor.mutable_data({2, 2}, *cpu_place); + paddle::platform::CPUDeviceContext context(*cpu_place); + paddle::operators::math::SetConstant( + context, &tensor, 0); + EXPECT_EQ(t[0], 0); + EXPECT_EQ(t[1], 0); + EXPECT_EQ(t[2], 0); + EXPECT_EQ(t[3], 0); + + paddle::operators::math::SetConstant( + context, &tensor, 1); + + EXPECT_EQ(t[0], 1); + EXPECT_EQ(t[1], 1); + EXPECT_EQ(t[2], 1); + EXPECT_EQ(t[3], 1); +} From 79def5e6347228773a9d77966108653fb7a16c60 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 28 Sep 2017 18:03:36 -0700 Subject: [PATCH 071/342] refine CrossEntropyFunctor --- paddle/operators/cross_entropy_op.cu | 14 +------------ paddle/operators/cross_entropy_op.h | 6 +++--- paddle/operators/math/cross_entropy.cc | 6 +++--- paddle/operators/math/cross_entropy.cu | 20 +++++++++---------- paddle/operators/math/cross_entropy.h | 4 +--- .../operators/softmax_with_cross_entropy_op.h | 3 ++- 6 files changed, 19 insertions(+), 34 deletions(-) diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 76d63f77ad..04ae66de91 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -18,14 +18,6 @@ namespace paddle { namespace operators { namespace { -// TODO(qingqing): make zero setting a common function. -template -__global__ void Zero(T* X, const int N) { - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; - i += blockDim.x * gridDim.x) { - X[i] = 0.0; - } -} template __global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X, @@ -99,11 +91,7 @@ class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { .stream()>>>(dx_data, dy_data, x_data, label_data, batch_size, class_num); } else { - Zero<<( - ctx.device_context()) - .stream()>>>(dx_data, batch_size * class_num); - + math::SetConstant(ctx.device_context(), dx, 0); auto* label_data = label->data(); grid = (batch_size + block - 1) / block; CrossEntropyGradientKernel<<< diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/cross_entropy_op.h index fa81d3b431..d2d321aa7e 100644 --- a/paddle/operators/cross_entropy_op.h +++ b/paddle/operators/cross_entropy_op.h @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/math/cross_entropy.h" +#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { @@ -37,7 +38,7 @@ class CrossEntropyOpKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx, y, x, labels, ctx.Attr("softLabel")); + ctx.device_context(), y, x, labels, ctx.Attr("softLabel")); } }; @@ -69,8 +70,7 @@ class CrossEntropyGradientOpKernel : public framework::OpKernel { const T* x_data = x->data(); const int* label_data = label->data(); - // TODO(qingqing): make zero setting a common function. - memset(dx_data, 0, sizeof(T) * batch_size * class_num); + math::SetConstant(ctx.device_context(), dx, 0); for (int i = 0; i < batch_size; ++i) { PADDLE_ASSERT(label_data[i] >= 0 || label_data[i] < class_num); diff --git a/paddle/operators/math/cross_entropy.cc b/paddle/operators/math/cross_entropy.cc index a5a426bc7b..150a65f275 100644 --- a/paddle/operators/math/cross_entropy.cc +++ b/paddle/operators/math/cross_entropy.cc @@ -26,8 +26,8 @@ using EigenMatrix = framework::EigenMatrix; template class CrossEntropyFunctor { public: - void operator()(const framework::ExecutionContext& ctx, - framework::Tensor* out, const framework::Tensor* prob, + void operator()(const platform::DeviceContext& ctx, framework::Tensor* out, + const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel) { const int batch_size = prob->dims()[0]; if (softLabel) { @@ -35,7 +35,7 @@ class CrossEntropyFunctor { auto lbl = EigenMatrix::From(*labels); auto loss = EigenMatrix::From(*out); - loss.device(ctx.GetEigenDevice()) = + loss.device(*ctx.GetEigenDevice()) = -((lbl * in.log().unaryExpr(math::TolerableValue())) .sum(Eigen::DSizes(1)) .reshape(Eigen::DSizes(batch_size, 1))); diff --git a/paddle/operators/math/cross_entropy.cu b/paddle/operators/math/cross_entropy.cu index d14a75a30c..2c589521c1 100644 --- a/paddle/operators/math/cross_entropy.cu +++ b/paddle/operators/math/cross_entropy.cu @@ -74,8 +74,8 @@ using Tensor = framework::Tensor; template class CrossEntropyFunctor { public: - void operator()(const framework::ExecutionContext& ctx, - framework::Tensor* out, const framework::Tensor* prob, + void operator()(const framework::DeviceContext& ctx, framework::Tensor* out, + const framework::Tensor* prob, const framework::Tensor* labels, bool softLabel) { const T* prob_data = prob->data(); T* loss_data = out->mutable_data(ctx.GetPlace()); @@ -87,20 +87,18 @@ class CrossEntropyFunctor { const T* label_data = labels->data(); int block = class_num > 512 ? 512 : pow(2, int(std::log2(class_num))); - SoftCrossEntropyKernel< - T><<( - ctx.device_context()) - .stream()>>>(loss_data, prob_data, label_data, class_num); + SoftCrossEntropyKernel<<< + batch_size, block, block * sizeof(T), + reinterpret_cast(ctx).stream()>>>( + loss_data, prob_data, label_data, class_num); } else { const int* label_data = labels->data(); int block = 512; int grid = (batch_size + block - 1) / block; CrossEntropyKernel<<< - grid, block, 0, reinterpret_cast( - ctx.device_context()) - .stream()>>>(loss_data, prob_data, label_data, - batch_size, class_num); + grid, block, 0, + reinterpret_cast(ctx).stream()>>>( + loss_data, prob_data, label_data, batch_size, class_num); } } }; diff --git a/paddle/operators/math/cross_entropy.h b/paddle/operators/math/cross_entropy.h index 18e637cf91..0ab6827ffa 100644 --- a/paddle/operators/math/cross_entropy.h +++ b/paddle/operators/math/cross_entropy.h @@ -37,9 +37,7 @@ struct TolerableValue { template class CrossEntropyFunctor { public: - // (TODO caoying) it is much better to use DeviceContext as the first - // parameter. - void operator()(const framework::ExecutionContext& context, + void operator()(const platform::DeviceContext& context, framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel); }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index a8b18504e1..7dcb6ad9b4 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -42,7 +42,8 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { math::SoftmaxFunctor()(context, logits, softmax); math::CrossEntropyFunctor()( - context, loss, softmax, labels, context.Attr("softLabel")); + context.device_context(), loss, softmax, labels, + context.Attr("softLabel")); } }; From 0c3eee09ff7c00c3c279b21a20278c154f045923 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 28 Sep 2017 18:15:16 -0700 Subject: [PATCH 072/342] Implementing the SoftSign activation operator --- paddle/operators/activation_op.cc | 20 +++++++++++++++++++ paddle/operators/activation_op.cu | 7 +++++++ paddle/operators/activation_op.h | 20 +++++++++++++++++++ .../v2/framework/tests/test_activation_op.py | 17 ++++++++++++++++ 4 files changed, 64 insertions(+) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index f77e1c572e..1e1d3cf7f7 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -132,6 +132,17 @@ class SquareOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class SoftsignOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SoftsignOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Softsign operator"); + AddOutput("Y", "Output of Softsign operator"); + AddComment("Softsign activation operator, softsign(x) = x / (1 + |x|)"); + } +}; + template class BReluOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -277,6 +288,15 @@ REGISTER_OP_CPU_KERNEL( square_grad, ops::ActivationGradKernel>); +REGISTER_OP(softsign, ops::ActivationOp, ops::SoftsignOpMaker, softsign_grad, + ops::ActivationOpGrad); +REGISTER_OP_CPU_KERNEL(softsign, + ops::ActivationKernel>); +REGISTER_OP_CPU_KERNEL( + softsign_grad, ops::ActivationGradKernel>); + REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker, brelu_grad, ops::ActivationOpGrad); REGISTER_OP_CPU_KERNEL(brelu, diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index feed1302b2..56886d8b1b 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -80,6 +80,13 @@ REGISTER_OP_GPU_KERNEL( square_grad, ops::ActivationGradKernel>); +REGISTER_OP_GPU_KERNEL(softsign, + ops::ActivationKernel>); +REGISTER_OP_GPU_KERNEL( + softsign_grad, ops::ActivationGradKernel>); + REGISTER_OP_GPU_KERNEL(brelu, ops::BReluKernel); REGISTER_OP_GPU_KERNEL(brelu_grad, diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index e400992ae2..b9f52e1af3 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -201,6 +201,26 @@ struct SquareGradFunctor { } }; +// softsign(x) = x / (1 + |x|) +template +struct SoftsignFunctor { + template + void operator()(Device d, X x, Y y) { + y.device(d) = x / (static_cast(1) + x.abs()); + } +}; + +// d(softsign(x))/dx = 1 / (1 + |x|)^2 +// Taken from https://en.wikipedia.org/wiki/Activation_function +template +struct SoftsignGradFunctor { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) { + dx.device(d) = + dy * (static_cast(1) / (static_cast(1) + x.abs()).square()); + } +}; + template class BReluKernel : public framework::OpKernel { public: diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index 8f6d2be177..c44eb84906 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -219,5 +219,22 @@ class TestSTanh(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.007) +class TestSoftsign(OpTest): + def setUp(self): + self.op_type = "softsign" + self.inputs = { + 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") + } + self.outputs = { + 'Y': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X'])) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.007) + + if __name__ == "__main__": unittest.main() From f60f0eae11f9712f2f7955bb351f82dc2c2c412c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 20:00:23 -0700 Subject: [PATCH 073/342] Using double precision to stablize lstm gradient check --- paddle/operators/lstm_unit_op.cc | 13 +++++++------ paddle/operators/lstm_unit_op.cu | 14 ++++++++------ paddle/operators/lstm_unit_op.h | 8 ++++---- .../paddle/v2/framework/tests/test_lstm_unit_op.py | 6 +++--- 4 files changed, 22 insertions(+), 19 deletions(-) diff --git a/paddle/operators/lstm_unit_op.cc b/paddle/operators/lstm_unit_op.cc index bd75b001cb..dad56731de 100644 --- a/paddle/operators/lstm_unit_op.cc +++ b/paddle/operators/lstm_unit_op.cc @@ -47,7 +47,6 @@ class LstmUnitOp : public framework::OperatorWithKernel { } }; -template class LstmUnitOpMaker : public framework::OpProtoAndCheckerMaker { public: LstmUnitOpMaker(framework::OpProto* proto, @@ -68,7 +67,7 @@ Equation: H = C * sigm(o) )DOC"); - AddAttr("forget_bias", "The forget bias of Lstm Unit.") + AddAttr("forget_bias", "The forget bias of Lstm Unit.") .SetDefault(0.0); } }; @@ -93,9 +92,11 @@ class LstmUnitGradOp : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(lstm_unit, ops::LstmUnitOp, ops::LstmUnitOpMaker, - lstm_unit_grad, ops::LstmUnitGradOp); +REGISTER_OP(lstm_unit, ops::LstmUnitOp, ops::LstmUnitOpMaker, lstm_unit_grad, + ops::LstmUnitGradOp); REGISTER_OP_CPU_KERNEL(lstm_unit, - ops::LstmUnitKernel); + ops::LstmUnitKernel, + ops::LstmUnitKernel); REGISTER_OP_CPU_KERNEL( - lstm_unit_grad, ops::LstmUnitGradKernel); + lstm_unit_grad, ops::LstmUnitGradKernel, + ops::LstmUnitGradKernel); diff --git a/paddle/operators/lstm_unit_op.cu b/paddle/operators/lstm_unit_op.cu index b1db0d5322..49ea550b6f 100644 --- a/paddle/operators/lstm_unit_op.cu +++ b/paddle/operators/lstm_unit_op.cu @@ -89,7 +89,7 @@ __global__ void LSTMUnitGradientKernel(const int nthreads, const int dim, } } -template +template class LstmUnitOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -101,7 +101,7 @@ class LstmUnitOpCUDAKernel : public framework::OpKernel { auto* c_tensor = ctx.Output("C"); auto* h_tensor = ctx.Output("H"); - auto forget_bias = static_cast(ctx.Attr("forget_bias")); + auto forget_bias = static_cast(ctx.Attr("forget_bias")); int b_size = c_tensor->dims()[0]; int D = c_tensor->dims()[1]; @@ -120,7 +120,7 @@ class LstmUnitOpCUDAKernel : public framework::OpKernel { } }; -template +template class LstmUnitGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -153,7 +153,7 @@ class LstmUnitGradOpCUDAKernel : public framework::OpKernel { int N = c_tensor->dims()[0]; int D = c_tensor->dims()[1]; - auto forget_bias = static_cast(ctx.Attr("forget_bias")); + auto forget_bias = static_cast(ctx.Attr("forget_bias")); int block = 512; int n = N * D; @@ -169,5 +169,7 @@ class LstmUnitGradOpCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel); -REGISTER_OP_GPU_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel); +REGISTER_OP_GPU_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel, + ops::LstmUnitOpCUDAKernel); +REGISTER_OP_GPU_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel, + ops::LstmUnitGradOpCUDAKernel); diff --git a/paddle/operators/lstm_unit_op.h b/paddle/operators/lstm_unit_op.h index 0dc9a7d9a7..a0ff498c1d 100644 --- a/paddle/operators/lstm_unit_op.h +++ b/paddle/operators/lstm_unit_op.h @@ -32,7 +32,7 @@ inline T tanh(T x) { return 2. * sigmoid(2. * x) - 1.; } -template +template class LstmUnitKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -44,7 +44,7 @@ class LstmUnitKernel : public framework::OpKernel { auto* c_tensor = ctx.Output("C"); auto* h_tensor = ctx.Output("H"); - auto forget_bias = static_cast(ctx.Attr("forget_bias")); + auto forget_bias = static_cast(ctx.Attr("forget_bias")); int b_size = c_tensor->dims()[0]; int D = c_tensor->dims()[1]; @@ -75,7 +75,7 @@ class LstmUnitKernel : public framework::OpKernel { } }; -template +template class LstmUnitGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -108,7 +108,7 @@ class LstmUnitGradKernel : public framework::OpKernel { int N = c_tensor->dims()[0]; int D = c_tensor->dims()[1]; - auto forget_bias = static_cast(ctx.Attr("forget_bias")); + auto forget_bias = static_cast(ctx.Attr("forget_bias")); for (int n = 0; n < N; ++n) { for (int d = 0; d < D; ++d) { diff --git a/python/paddle/v2/framework/tests/test_lstm_unit_op.py b/python/paddle/v2/framework/tests/test_lstm_unit_op.py index 8ce65bfc31..365ee560e1 100644 --- a/python/paddle/v2/framework/tests/test_lstm_unit_op.py +++ b/python/paddle/v2/framework/tests/test_lstm_unit_op.py @@ -14,8 +14,8 @@ def tanh_np(x): class LstmUnitTest(OpTest): def setUp(self): self.op_type = "lstm_unit" - x_np = np.random.normal(size=(5, 16)).astype("float32") - c_np = np.random.normal(size=(5, 4)).astype("float32") + x_np = np.random.normal(size=(5, 16)).astype("float64") + c_np = np.random.normal(size=(5, 4)).astype("float64") i_np, f_np, o_np, j_np = np.split(x_np, 4, axis=1) forget_bias_np = 0. self.attrs = {'forget_bias': 0.} @@ -31,7 +31,7 @@ class LstmUnitTest(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['X', 'C_prev'], ['C', 'H'], max_relative_error=0.01) + self.check_grad(['X', 'C_prev'], ['C', 'H']) if __name__ == "__main__": From 84ff7e97842890e70f1baf6bf41ef54513d1a4a3 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 28 Sep 2017 20:05:15 -0700 Subject: [PATCH 074/342] refine SoftmaxFunctor --- paddle/operators/math/softmax.h | 6 +++--- paddle/operators/softmax_op.h | 2 +- paddle/operators/softmax_with_cross_entropy_op.cu | 3 ++- paddle/operators/softmax_with_cross_entropy_op.h | 3 ++- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/paddle/operators/math/softmax.h b/paddle/operators/math/softmax.h index 3d2f0d0aec..225323f05a 100644 --- a/paddle/operators/math/softmax.h +++ b/paddle/operators/math/softmax.h @@ -36,7 +36,7 @@ struct ValueClip { template class SoftmaxFunctor { public: - void operator()(const framework::ExecutionContext& context, + void operator()(const platform::DeviceContext& context, const framework::Tensor* X, framework::Tensor* Y) { auto logits = EigenMatrix::From(*X); auto softmax = EigenMatrix::From(*Y); @@ -58,8 +58,8 @@ class SoftmaxFunctor { .broadcast(one_by_class)) .unaryExpr(ValueClip()); - softmax.device(context.GetEigenDevice()) = shifted_logits.exp(); - softmax.device(context.GetEigenDevice()) = + softmax.device(*context.GetEigenDevice()) = shifted_logits.exp(); + softmax.device(*context.GetEigenDevice()) = (softmax * softmax.sum(along_class) .inverse() diff --git a/paddle/operators/softmax_op.h b/paddle/operators/softmax_op.h index 9996536454..8fdda8b1df 100644 --- a/paddle/operators/softmax_op.h +++ b/paddle/operators/softmax_op.h @@ -35,7 +35,7 @@ class SoftmaxKernel : public framework::OpKernel { // allocate memory on device. Y->mutable_data(context.GetPlace()); - math::SoftmaxFunctor()(context, X, Y); + math::SoftmaxFunctor()(context.device_context(), X, Y); } }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index c3086e729e..b5a7cda734 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -66,7 +66,8 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { softmax->mutable_data(context.GetPlace()); loss->mutable_data(context.GetPlace()); - math::SoftmaxFunctor()(context, logits, softmax); + math::SoftmaxFunctor()(context.device_context(), + logits, softmax); math::CrossEntropyFunctor()( context, loss, softmax, labels, context.Attr("softLabel")); } diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/operators/softmax_with_cross_entropy_op.h index 7dcb6ad9b4..cffd422f18 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.h +++ b/paddle/operators/softmax_with_cross_entropy_op.h @@ -40,7 +40,8 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { softmax->mutable_data(context.GetPlace()); loss->mutable_data(context.GetPlace()); - math::SoftmaxFunctor()(context, logits, softmax); + math::SoftmaxFunctor()(context.device_context(), + logits, softmax); math::CrossEntropyFunctor()( context.device_context(), loss, softmax, labels, context.Attr("softLabel")); From e1e3859e8817adfa733a7f963963c5fe1d792b16 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 29 Sep 2017 10:15:56 +0800 Subject: [PATCH 075/342] remove custom attr checker and fix code format --- paddle/operators/math/pooling.cc | 66 ++++++++-------- paddle/operators/math/pooling.cu | 132 ++++++++++++++++--------------- paddle/operators/math/pooling.h | 11 ++- paddle/operators/pool_op.cc | 124 ++++++++--------------------- paddle/operators/pool_op.h | 30 ++++--- 5 files changed, 153 insertions(+), 210 deletions(-) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 1918c8b169..3b706529d8 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -24,7 +24,7 @@ class Pool2dFunctor { void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_compute) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -54,14 +54,15 @@ class Pool2dFunctor { int wstart = pw * stride_width - padding_width; int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); - T ele = pool_compute.initial(); + + T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_compute.compute(ele, input_data[h * input_width + w]); + pool_process.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); - pool_compute.finalize(ele, (static_cast(pool_size))); + pool_process.finalize(ele, (static_cast(pool_size))); output_data[ph * output_width + pw] = ele; } } @@ -80,7 +81,7 @@ class Pool2dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_compute) { + PoolProcess pool_grad_process) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -115,11 +116,12 @@ class Pool2dGradFunctor { float scale = 1.0 / pool_size; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_compute.compute(input_data[h * input_width + w], - output_data[ph * output_width + pw], - output_grad_data[ph * output_width + pw], - input_grad_data[h * input_width + w], - static_cast(scale)); + pool_grad_process.compute( + input_data[h * input_width + w], + output_data[ph * output_width + pw], + output_grad_data[ph * output_width + pw], + input_grad_data[h * input_width + w], + static_cast(scale)); } } } @@ -198,21 +200,21 @@ template class MaxPool2dGradFunctor; // template class MaxPool2dGradFunctor; template class Pool2dFunctor, float>; + paddle::operators::math::MaxPool, float>; template class Pool2dFunctor, float>; + paddle::operators::math::AvgPool, float>; template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::maxPoolGrad, float>; + platform::CPUPlace, paddle::operators::math::MaxPoolGrad, float>; template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::avgPoolGrad, float>; + platform::CPUPlace, paddle::operators::math::AvgPoolGrad, float>; template class Pool2dFunctor, double>; + paddle::operators::math::MaxPool, double>; template class Pool2dFunctor, double>; + paddle::operators::math::AvgPool, double>; template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::maxPoolGrad, double>; + platform::CPUPlace, paddle::operators::math::MaxPoolGrad, double>; template class Pool2dGradFunctor< - platform::CPUPlace, paddle::operators::math::avgPoolGrad, double>; + platform::CPUPlace, paddle::operators::math::AvgPoolGrad, double>; template class Pool3dFunctor { @@ -220,7 +222,7 @@ class Pool3dFunctor { void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_compute) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -260,11 +262,11 @@ class Pool3dFunctor { int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); int output_idx = (pd * output_height + ph) * output_width + pw; - T ele = pool_compute.initial(); + T ele = pool_process.initial(); for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_compute.compute( + pool_process.compute( ele, input_data[(d * input_height + h) * input_width + w]); } @@ -272,7 +274,7 @@ class Pool3dFunctor { } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); - pool_compute.finalize(ele, static_cast(pool_size)); + pool_process.finalize(ele, static_cast(pool_size)); output_data[output_idx] = ele; } } @@ -292,7 +294,7 @@ class Pool3dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_compute) { + PoolProcess pool_grad_process) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -343,7 +345,7 @@ class Pool3dGradFunctor { int input_idx = (d * input_height + h) * input_width + w; int output_idx = (pd * output_height + ph) * output_width + pw; - pool_compute.compute( + pool_grad_process.compute( input_data[input_idx], output_data[output_idx], output_grad_data[output_idx], input_grad_data[input_idx], static_cast(scale)); @@ -441,21 +443,21 @@ template class MaxPool3dGradFunctor; // template class MaxPool3dGradFunctor; template class Pool3dFunctor, float>; + paddle::operators::math::MaxPool, float>; template class Pool3dFunctor, float>; + paddle::operators::math::AvgPool, float>; template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::maxPoolGrad, float>; + platform::CPUPlace, paddle::operators::math::MaxPoolGrad, float>; template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::avgPoolGrad, float>; + platform::CPUPlace, paddle::operators::math::AvgPoolGrad, float>; template class Pool3dFunctor, double>; + paddle::operators::math::MaxPool, double>; template class Pool3dFunctor, double>; + paddle::operators::math::AvgPool, double>; template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::maxPoolGrad, double>; + platform::CPUPlace, paddle::operators::math::MaxPoolGrad, double>; template class Pool3dGradFunctor< - platform::CPUPlace, paddle::operators::math::avgPoolGrad, double>; + platform::CPUPlace, paddle::operators::math::AvgPoolGrad, double>; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 4164920035..8aeccd1f8e 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -20,14 +20,16 @@ namespace operators { namespace math { template -__global__ void KernelPool2dForward( - const int nthreads, const T* input_data, T* output_data, const int channels, - const int input_height, const int input_width, const int output_height, - const int output_width, const int ksize_height, const int ksize_width, - const int stride_height, const int stride_width, const int padding_height, - const int padding_width, PoolProcess pool_compute) { - int index = blockIdx.x * blockDim.x + threadIdx.x; - if (index < nthreads) { +__global__ void KernelPool2D(const int nthreads, const T* input_data, + T* output_data, const int channels, + const int input_height, const int input_width, + const int output_height, const int output_width, + const int ksize_height, const int ksize_width, + const int stride_height, const int stride_width, + const int padding_height, const int padding_width, + PoolProcess pool_process) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; + index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; @@ -42,28 +44,28 @@ __global__ void KernelPool2dForward( wstart = max(wstart, 0); input_data += (batch_idx * channels + c) * input_height * input_width; - T ele = pool_compute.initial(); + T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_compute.compute(ele, input_data[h * input_width + w]); + pool_process.compute(ele, input_data[h * input_width + w]); } } int pool_size = (hend - hstart) * (wend - wstart); - pool_compute.finalize(ele, (static_cast(pool_size))); + pool_process.finalize(ele, (static_cast(pool_size))); output_data[index] = ele; } } template -__global__ void KernelPool2dBackward( +__global__ void KernelPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, - const int padding_width, PoolProcess pool_compute) { - int index = blockIdx.x * blockDim.x + threadIdx.x; - if (index < nthreads) { + const int padding_width, PoolProcess pool_process) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; + index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; int offsetC = (index / input_width / input_height) % channels; @@ -93,7 +95,7 @@ __global__ void KernelPool2dBackward( wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); int output_sub_idx = ph * output_width + pw; - pool_compute.compute(input, output_data[output_sub_idx], + pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast(1.0 / pool_size)); } @@ -103,15 +105,15 @@ __global__ void KernelPool2dBackward( } template -__global__ void KernelMaxPool2dBackward( +__global__ void KernelMaxPool2DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width) { - int index = blockIdx.x * blockDim.x + threadIdx.x; - if (index < nthreads) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; + index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; @@ -153,7 +155,7 @@ class Pool2dFunctor { void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_compute) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -176,7 +178,7 @@ class Pool2dFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool2dForward< + KernelPool2D< PoolProcess, T><<(context) @@ -184,7 +186,7 @@ class Pool2dFunctor { input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, - padding_width, pool_compute); + padding_width, pool_process); } }; @@ -196,7 +198,7 @@ class Pool2dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_compute) { + PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -220,7 +222,7 @@ class Pool2dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool2dBackward< + KernelPool2DGrad< PoolProcess, T><<(context) @@ -228,7 +230,7 @@ class Pool2dGradFunctor { nthreads, input_data, output_data, output_grad_data, input_grad_data, input_channels, input_height, input_width, output_height, output_width, ksize_height, ksize_width, stride_height, stride_width, padding_height, - padding_width, pool_compute); + padding_width, pool_process); } }; @@ -264,7 +266,7 @@ class MaxPool2dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool2dBackward< + KernelMaxPool2DGrad< T><<(context) .stream()>>>( @@ -276,35 +278,37 @@ class MaxPool2dGradFunctor { }; template class MaxPool2dGradFunctor; -// template class MaxPool2dGradFunctor; +// template class MaxPool2dGradFunctor; // The +// 64-bit floating-point version of atomicAdd() is only supported by devices of +// compute capability 6.x and higher. template class Pool2dFunctor, float>; + paddle::operators::math::MaxPool, float>; template class Pool2dFunctor, float>; + paddle::operators::math::AvgPool, float>; template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::maxPoolGrad, float>; + platform::GPUPlace, paddle::operators::math::MaxPoolGrad, float>; template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::avgPoolGrad, float>; + platform::GPUPlace, paddle::operators::math::AvgPoolGrad, float>; template class Pool2dFunctor, double>; + paddle::operators::math::MaxPool, double>; template class Pool2dFunctor, double>; + paddle::operators::math::AvgPool, double>; template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::maxPoolGrad, double>; + platform::GPUPlace, paddle::operators::math::MaxPoolGrad, double>; template class Pool2dGradFunctor< - platform::GPUPlace, paddle::operators::math::avgPoolGrad, double>; + platform::GPUPlace, paddle::operators::math::AvgPoolGrad, double>; template -__global__ void KernelPool3DForward( +__global__ void KernelPool3D( const int nthreads, const T* input_data, T* output_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, const int output_width, const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, - PoolProcess pool_compute) { - for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + PoolProcess pool_process) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; @@ -321,25 +325,25 @@ __global__ void KernelPool3DForward( dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); - T ele = pool_compute.initial(); + T ele = pool_process.initial(); input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_compute.compute( + pool_process.compute( ele, input_data[(d * input_height + h) * input_width + w]); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); - pool_compute.finalize(ele, static_cast(pool_size)); + pool_process.finalize(ele, static_cast(pool_size)); output_data[index] = ele; } } template -__global__ void KernelPool3DBackward( +__global__ void KernelPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_depth, const int input_height, const int input_width, @@ -347,8 +351,8 @@ __global__ void KernelPool3DBackward( const int ksize_depth, const int ksize_height, const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width, - PoolProcess pool_compute) { - for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + PoolProcess pool_process) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int offsetW = index % input_width + padding_width; int offsetH = (index / input_width) % input_height + padding_height; @@ -392,7 +396,7 @@ __global__ void KernelPool3DBackward( wstart = max(wstart, 0); int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); int output_sub_idx = (pd * output_height + ph) * output_width + pw; - pool_compute.compute(input, output_data[output_sub_idx], + pool_process.compute(input, output_data[output_sub_idx], output_grad[output_sub_idx], gradient, static_cast(1.0 / pool_size)); } @@ -403,7 +407,7 @@ __global__ void KernelPool3DBackward( } template -__global__ void KernelMaxPool3DBackward( +__global__ void KernelMaxPool3DGrad( const int nthreads, const T* input_data, const T* output_data, const T* output_grad, T* input_grad, const int channels, const int input_depth, const int input_height, const int input_width, @@ -412,7 +416,7 @@ __global__ void KernelMaxPool3DBackward( const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width) { - for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; @@ -460,7 +464,7 @@ class Pool3dFunctor { void operator()(const platform::DeviceContext& context, const framework::Tensor& input, framework::Tensor& output, std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_compute) { + std::vector& paddings, PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -489,7 +493,7 @@ class Pool3dFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool3DForward< + KernelPool3D< PoolProcess, T><<(context) @@ -498,7 +502,7 @@ class Pool3dFunctor { input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, padding_height, padding_width, - pool_compute); + pool_process); } }; @@ -510,7 +514,7 @@ class Pool3dGradFunctor { const framework::Tensor& output, const framework::Tensor& output_grad, std::vector& ksize, std::vector& strides, std::vector& paddings, - PoolProcess pool_compute) { + PoolProcess pool_process) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -541,7 +545,7 @@ class Pool3dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelPool3DBackward< + KernelPool3DGrad< PoolProcess, T><<(context) @@ -550,7 +554,7 @@ class Pool3dGradFunctor { input_channels, input_depth, input_height, input_width, output_depth, output_height, output_width, ksize_depth, ksize_height, ksize_width, stride_depth, stride_height, stride_width, padding_depth, - padding_height, padding_width, pool_compute); + padding_height, padding_width, pool_process); } }; @@ -592,7 +596,7 @@ class MaxPool3dGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool3DBackward< + KernelMaxPool3DGrad< T><<(context) .stream()>>>( @@ -605,24 +609,26 @@ class MaxPool3dGradFunctor { }; template class MaxPool3dGradFunctor; -// template class MaxPool3dGradFunctor; +// template class MaxPool3dGradFunctor; // The +// 64-bit floating-point version of atomicAdd() is only supported by devices of +// compute capability 6.x and higher. template class Pool3dFunctor, float>; + paddle::operators::math::MaxPool, float>; template class Pool3dFunctor, float>; + paddle::operators::math::AvgPool, float>; template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::maxPoolGrad, float>; + platform::GPUPlace, paddle::operators::math::MaxPoolGrad, float>; template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::avgPoolGrad, float>; + platform::GPUPlace, paddle::operators::math::AvgPoolGrad, float>; template class Pool3dFunctor, double>; + paddle::operators::math::MaxPool, double>; template class Pool3dFunctor, double>; + paddle::operators::math::AvgPool, double>; template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::maxPoolGrad, double>; + platform::GPUPlace, paddle::operators::math::MaxPoolGrad, double>; template class Pool3dGradFunctor< - platform::GPUPlace, paddle::operators::math::avgPoolGrad, double>; + platform::GPUPlace, paddle::operators::math::AvgPoolGrad, double>; } // namespace math } // namespace operators diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index cf0ab7ecae..d214c68923 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -22,11 +22,10 @@ namespace paddle { namespace operators { namespace math { ////////////////////// -#define FLT_MAX __FLT_MAX__ -///////////////////// +#define FLT_MAX __FLT_MAX__ // template -class maxPool { +class MaxPool { public: DEVICE inline T initial() { return static_cast(-FLT_MAX); } DEVICE inline void compute(T& y, const T& x) { y = y > x ? y : x; } @@ -34,14 +33,14 @@ class maxPool { }; template -class avgPool { +class AvgPool { public: DEVICE inline T initial() { return static_cast(0); } DEVICE inline void compute(T& y, const T& x) { y += x; } DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } }; template -class maxPoolGrad { +class MaxPoolGrad { public: DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx, T scale) { @@ -50,7 +49,7 @@ class maxPoolGrad { }; template -class avgPoolGrad { +class AvgPoolGrad { public: DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx, T scale) { diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index 6219c6f97c..c29f51f056 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -51,7 +51,7 @@ class PoolOp : public framework::OperatorWithKernel { ksize[i] = static_cast(in_x_dims[i + 2]); } - PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2, + PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U, "Input size and Pooling size should be consistent."); PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3, "Pooling size should be 2 elements. or 3 elements."); @@ -79,7 +79,6 @@ class PoolOpGrad : public framework::OperatorWithKernel { "X(Input) of Pooling should not be null."); PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "Input@Grad of Pooling should not be null."); - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } }; @@ -98,66 +97,36 @@ class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { "The format of output tensor is also NCHW."); AddAttr("poolingType", - "poolingType of pooling operator." - "str constant equal to 'max' or 'avg'"); + "PoolingType of pooling operator." + "Str constant equal to 'max' or 'avg'.") + .InEnum({"max", "avg"}); AddAttr>( "ksize", "Pooling size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be specified."); + "If globalPooling = true, ksize is ignored and need not be " + "specified."); // TODO(Add checker) AddAttr( "globalPooling", - "whether to use the globalPooling." - "int constant equal to false or true" - "default false" + "Whether to use the globalPooling." + "Bool constant equal to false or true." + "Default false." "If globalPooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", - "strides(height, width) of pooling operator." - "default {1,1}") - .SetDefault({1, 1}) - .AddCustomChecker(GreaterThanChecker_pool({0, 0})); + "Strides(height, width) of pooling operator." + "Default {1,1}") + .SetDefault({1, 1}); // TODO(Add checker) AddAttr>("paddings", - "paddings(height, width) of pooling operator." - "default {0,0}") - .SetDefault({0, 0}) - .AddCustomChecker(EqualGreaterThanChecker_pool({0, 0})); + "Paddings(height, width) of pooling operator." + "Default {0,0}.") + .SetDefault({0, 0}); // TODO(Add checker) AddComment(R"DOC( The pooling2d operation calculates the output based on the input, poolingType and ksize, strides, paddings parameters. )DOC"); } - - private: - struct GreaterThanChecker_pool { - public: - explicit GreaterThanChecker_pool(std::vector lower_bound) - : lower_bound_(lower_bound) {} - void operator()(std::vector &value) const { - PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); - for (size_t i = 0; i < value.size(); ++i) { - PADDLE_ENFORCE(value[i] > lower_bound_[i], "larger_than check fails."); - } - } - - private: - std::vector lower_bound_; - }; - - struct EqualGreaterThanChecker_pool { - public: - explicit EqualGreaterThanChecker_pool(std::vector lower_bound) - : lower_bound_(lower_bound) {} - void operator()(std::vector &value) const { - PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); - for (size_t i = 0; i < value.size(); ++i) { - PADDLE_ENFORCE(value[i] >= lower_bound_[i], "larger_than check fails."); - } - } - - private: - std::vector lower_bound_; - }; }; + class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { public: Pool3dOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -173,67 +142,36 @@ class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { "The format of output tensor is also NCDHW."); AddAttr("poolingType", - "poolingType of pooling operator." - "str constant equal to 'max' or 'avg'"); + "PoolingType of pooling operator." + "str constant equal to 'max' or 'avg'.") + .InEnum({"max", "avg"}); AddAttr>( "ksize", - "pooling size(depth, height, width) of pooling operator." - "If globalPooling = true, ksize is ignored and need not be specified."); + "Pooling size(depth, height, width) of pooling operator." + "If globalPooling = true, ksize is ignored and need not be " + "specified."); // TODO(Add checker) AddAttr( "globalPooling", - "whether to use the globalPooling." - "int constant equal to false or true" - "default false" + "Whether to use the globalPooling." + "Bool constant equal to false or true." + "Default false." "If globalPooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>( "strides", - "strides(depth, height, width) of pooling operator." - "default {1,1,1}") - .SetDefault({1, 1, 1}) - .AddCustomChecker(GreaterThanChecker_pool({0, 0, 0})); + "Strides(depth, height, width) of pooling operator." + "Default {1,1,1}.") + .SetDefault({1, 1, 1}); // TODO(Add checker) AddAttr>( "paddings", - "paddings(depth, height, width) of pooling operator." - "default {0,0,0}") - .SetDefault({0, 0, 0}) - .AddCustomChecker(EqualGreaterThanChecker_pool({0, 0, 0})); + "Paddings(depth, height, width) of pooling operator." + "Default {0,0,0}.") + .SetDefault({0, 0, 0}); // TODO(Add checker) AddComment(R"DOC( The pooling3d operation calculates the output based on the input, poolingType and ksize, strides, paddings parameters. )DOC"); } - - private: - struct GreaterThanChecker_pool { - public: - explicit GreaterThanChecker_pool(std::vector lower_bound) - : lower_bound_(lower_bound) {} - void operator()(std::vector &value) const { - PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); - for (size_t i = 0; i < value.size(); ++i) { - PADDLE_ENFORCE(value[i] > lower_bound_[i], "larger_than check fails."); - } - } - - private: - std::vector lower_bound_; - }; - - struct EqualGreaterThanChecker_pool { - public: - explicit EqualGreaterThanChecker_pool(std::vector lower_bound) - : lower_bound_(lower_bound) {} - void operator()(std::vector &value) const { - PADDLE_ENFORCE(value.size() == lower_bound_.size(), "equal check fails."); - for (size_t i = 0; i < value.size(); ++i) { - PADDLE_ENFORCE(value[i] >= lower_bound_[i], "larger_than check fails."); - } - } - - private: - std::vector lower_bound_; - }; }; } // namespace operators } // namespace paddle diff --git a/paddle/operators/pool_op.h b/paddle/operators/pool_op.h index 73c9721624..0c246b38ef 100644 --- a/paddle/operators/pool_op.h +++ b/paddle/operators/pool_op.h @@ -31,12 +31,11 @@ class PoolKernel : public framework::OpKernel { const Tensor* in_x = context.Input("X"); Tensor* out = context.Output("Out"); - bool global_pooling = context.Attr("globalPooling"); std::string pooling_type = context.Attr("poolingType"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (global_pooling) { + if (context.Attr("globalPooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -46,17 +45,17 @@ class PoolKernel : public framework::OpKernel { case 2: { if (pooling_type == "max") { paddle::operators::math::Pool2dFunctor< - Place, paddle::operators::math::maxPool, T> + Place, paddle::operators::math::MaxPool, T> pool2d_forward; - paddle::operators::math::maxPool pool_process; + paddle::operators::math::MaxPool pool_process; pool2d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } else if (pooling_type == "avg") { paddle::operators::math::Pool2dFunctor< - Place, paddle::operators::math::avgPool, T> + Place, paddle::operators::math::AvgPool, T> pool2d_forward; - paddle::operators::math::avgPool pool_process; + paddle::operators::math::AvgPool pool_process; pool2d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } @@ -64,16 +63,16 @@ class PoolKernel : public framework::OpKernel { case 3: { if (pooling_type == "max") { paddle::operators::math::Pool3dFunctor< - Place, paddle::operators::math::maxPool, T> + Place, paddle::operators::math::MaxPool, T> pool3d_forward; - paddle::operators::math::maxPool pool_process; + paddle::operators::math::MaxPool pool_process; pool3d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } else if (pooling_type == "avg") { paddle::operators::math::Pool3dFunctor< - Place, paddle::operators::math::avgPool, T> + Place, paddle::operators::math::AvgPool, T> pool3d_forward; - paddle::operators::math::avgPool pool_process; + paddle::operators::math::AvgPool pool_process; pool3d_forward(context.device_context(), *in_x, *out, ksize, strides, paddings, pool_process); } @@ -92,13 +91,12 @@ class PoolGradKernel : public framework::OpKernel { context.Input(framework::GradVarName("Out")); Tensor* in_x_grad = context.Output(framework::GradVarName("X")); - bool global_pooling = context.Attr("globalPooling"); std::string pooling_type = context.Attr("poolingType"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (global_pooling) { + if (context.Attr("globalPooling")) { for (size_t i = 0; i < ksize.size(); ++i) ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -118,9 +116,9 @@ class PoolGradKernel : public framework::OpKernel { *out_grad, ksize, strides, paddings); } else if (pooling_type == "avg") { paddle::operators::math::Pool2dGradFunctor< - Place, paddle::operators::math::avgPoolGrad, T> + Place, paddle::operators::math::AvgPoolGrad, T> pool2d_backward; - paddle::operators::math::avgPoolGrad pool_process; + paddle::operators::math::AvgPoolGrad pool_process; pool2d_backward(context.device_context(), *in_x, *in_x_grad, *out, *out_grad, ksize, strides, paddings, pool_process); } @@ -133,9 +131,9 @@ class PoolGradKernel : public framework::OpKernel { *out_grad, ksize, strides, paddings); } else if (pooling_type == "avg") { paddle::operators::math::Pool3dGradFunctor< - Place, paddle::operators::math::avgPoolGrad, T> + Place, paddle::operators::math::AvgPoolGrad, T> pool3d_backward; - paddle::operators::math::avgPoolGrad pool_process; + paddle::operators::math::AvgPoolGrad pool_process; pool3d_backward(context.device_context(), *in_x, *in_x_grad, *out, *out_grad, ksize, strides, paddings, pool_process); } From b611a479fcf687367c9a6808242f6a348854c645 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 28 Sep 2017 20:48:55 -0700 Subject: [PATCH 076/342] fix gpu build error --- paddle/operators/cross_entropy_op.cu | 2 +- paddle/operators/math/cross_entropy.cu | 2 +- paddle/operators/softmax_with_cross_entropy_op.cu | 3 ++- paddle/platform/device_context_test.cc | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/cross_entropy_op.cu index 04ae66de91..5e2024e0ea 100644 --- a/paddle/operators/cross_entropy_op.cu +++ b/paddle/operators/cross_entropy_op.cu @@ -56,7 +56,7 @@ class CrossEntropyOpCUDAKernel : public framework::OpKernel { y->mutable_data(ctx.GetPlace()); math::CrossEntropyFunctor()( - ctx, y, x, label, ctx.Attr("softLabel")); + ctx.device_context(), y, x, label, ctx.Attr("softLabel")); } }; diff --git a/paddle/operators/math/cross_entropy.cu b/paddle/operators/math/cross_entropy.cu index 2c589521c1..367190e6b0 100644 --- a/paddle/operators/math/cross_entropy.cu +++ b/paddle/operators/math/cross_entropy.cu @@ -74,7 +74,7 @@ using Tensor = framework::Tensor; template class CrossEntropyFunctor { public: - void operator()(const framework::DeviceContext& ctx, framework::Tensor* out, + void operator()(const platform::DeviceContext& ctx, framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, bool softLabel) { const T* prob_data = prob->data(); diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/operators/softmax_with_cross_entropy_op.cu index b5a7cda734..2bc53ecf87 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/operators/softmax_with_cross_entropy_op.cu @@ -69,7 +69,8 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { math::SoftmaxFunctor()(context.device_context(), logits, softmax); math::CrossEntropyFunctor()( - context, loss, softmax, labels, context.Attr("softLabel")); + context.device_context(), loss, softmax, labels, + context.Attr("softLabel")); } }; diff --git a/paddle/platform/device_context_test.cc b/paddle/platform/device_context_test.cc index 5883a55272..f4b00c57de 100644 --- a/paddle/platform/device_context_test.cc +++ b/paddle/platform/device_context_test.cc @@ -24,7 +24,7 @@ TEST(Device, Init) { for (int i = 0; i < count; i++) { DeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); Eigen::GpuDevice* gpu_device = - device_context->template get_eigen_device(); + device_context->template GetEigenDevice(); ASSERT_NE(nullptr, gpu_device); delete device_context; } From 4dfc10ccf73aa7872b41684cda0e407cc6a3ba00 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Fri, 29 Sep 2017 14:34:40 +0800 Subject: [PATCH 077/342] a patch for fixing random seeds in gradient checkers --- python/paddle/v2/framework/tests/op_test.py | 16 ++++++++++++++++ .../tests/test_softmax_with_cross_entropy_op.py | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 23794151bd..75df2eeddf 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -1,5 +1,6 @@ import unittest import numpy as np +import random import itertools import paddle.v2.framework.core as core from paddle.v2.framework.op import Operator @@ -192,6 +193,21 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place, class OpTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + '''Fix random seeds to remove randomness from tests''' + cls._np_rand_state = np.random.get_state() + cls._py_rand_state = random.getstate() + + np.random.seed(123) + random.seed(124) + + @classmethod + def tearDownClass(cls): + '''Restore random seeds''' + np.random.set_state(cls._np_rand_state) + random.setstate(cls._py_rand_state) + def check_output_with_place(self, place, atol): self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() diff --git a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py index 428395b76c..377d07fb59 100644 --- a/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_with_cross_entropy_op.py @@ -43,7 +43,7 @@ class TestSoftmaxWithCrossEntropyOp2(OpTest): def setUp(self): self.op_type = "softmax_with_cross_entropy" batch_size = 2 - class_num = 17 + class_num = 37 logits = np.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float32") From 884e31a59b72856ea1a807561f01a623c1138053 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 29 Sep 2017 15:28:25 +0800 Subject: [PATCH 078/342] add interpolation op --- paddle/operators/interp_op.cc | 107 ++++++++++++++++++ .../v2/framework/tests/test_interp_op.py | 28 +++++ 2 files changed, 135 insertions(+) create mode 100644 paddle/operators/interp_op.cc create mode 100644 python/paddle/v2/framework/tests/test_interp_op.py diff --git a/paddle/operators/interp_op.cc b/paddle/operators/interp_op.cc new file mode 100644 index 0000000000..04bcb9ade8 --- /dev/null +++ b/paddle/operators/interp_op.cc @@ -0,0 +1,107 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/op_registry.h" +#include "paddle/operators/net_op.h" + +namespace paddle { +namespace operators { + +class InterpOp : public NetOp { + public: + InterpOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : NetOp(type, inputs, outputs, attrs) { + PADDLE_ENFORCE_NE(Input("X"), framework::kEmptyVarName, + "Input(X) of InterpOp should not be null."); + PADDLE_ENFORCE_NE(Input("Y"), framework::kEmptyVarName, + "Input(Y) of InterpOp should not be null."); + PADDLE_ENFORCE_NE(Input("W"), framework::kEmptyVarName, + "Input(W) of InterpOp should not be null."); + PADDLE_ENFORCE_NE(Output("MinusOut"), framework::kEmptyVarName, + "Output(MinusOut) of InterpOp should not be null."); + PADDLE_ENFORCE_NE(Output("MulOut"), framework::kEmptyVarName, + "Output(MulOut) of InterpOp should not be null."); + PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName, + "Output(Out) of InterpOp should not be null."); + + // MinusOut = X - Y + auto x = Input("X"); + auto y = Input("Y"); + auto minus_out = Output("MinusOut"); + AppendOp(framework::OpRegistry::CreateOp("elementwise_sub", + {{"X", {x}}, {"Y", {y}}}, + {{"Out", {minus_out}}}, {})); + + // MulOut = MinusOut * W = (X - Y) * W + auto w = Input("W"); + auto mul_out = Output("MulOut"); + AppendOp(framework::OpRegistry::CreateOp( + "elementwise_mul", {{"X", {minus_out}}, {"Y", {w}}}, + {{"Out", {mul_out}}}, {{"axis", 0}})); + + // Out = MulOut + Y = (X - Y) * W + Y = X * W + Y * (1 - W) + AppendOp(framework::OpRegistry::CreateOp("elementwise_add", + {{"X", {mul_out}}, {"Y", {y}}}, + {{"Out", {Output("Out")}}}, {})); + + CompleteAddOp(false); + LOG(INFO) << DebugString(); + } +}; + +class InterpOpMaker : public framework::OpProtoAndCheckerMaker { + public: + InterpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "A 2-D Tensor, the first input of interp_op"); + AddInput("Y", "A 2-D Tensor, the second input of interp_op"); + AddInput("W", "A 1-D Tensor, the interpolated values"); + AddOutput("MinusOut", + "A 2-D Tensor, the intermediate outputs, saving X - Y.") + .AsIntermediate(); + AddOutput("MulOut", + "A 2-D Tensor, the intermediate outputs," + "saving the mul mul of (X - Y) and W") + .AsIntermediate(); + AddOutput("Out", + "A 2-D Tensor, the output of interp_op, same shape with X"); + AddComment(R"DOC( + Linear Interpolation with two inputs, used in NEURAL TURING MACHINE. + + Equation: + Out.row[i] = X.row[i] * W[i] + Y.row[i] * (1 - W[i]) + = (X.row[i] - Y.row[i]) * W[i] + Y.row[i] + + Example: + X = [[1,2],[3,4]], + Y = [[2,1],[4,3]], + W = [0.3, 0.4] + + Then, Out = [[1.7,1.3],[3.6,3.4]] + + where 1.7 = 1*0.3+2*(1-0.3), + 1.3 = 2*0.3+1*(1-0.3), + 3.6 = 3*0.4+4*(1-0.4), + 3.4 = 4*0.4+3*(1-0.4) +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(interp, ops::InterpOp, ops::InterpOpMaker); diff --git a/python/paddle/v2/framework/tests/test_interp_op.py b/python/paddle/v2/framework/tests/test_interp_op.py new file mode 100644 index 0000000000..f82dcc7f50 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_interp_op.py @@ -0,0 +1,28 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestInterpOp(OpTest): + def setUp(self): + self.op_type = "interp" + x = np.random.random((2, 3)).astype("float32") + y = np.random.random((2, 3)).astype("float32") + w = np.random.random(2).astype("float32") + + minus_out = x - y + mul_out = minus_out * w.reshape(2, 1) + out = mul_out + y + + self.inputs = {'X': x, 'Y': y, 'W': w} + self.outputs = {'Out': out, 'MinusOut': minus_out, 'MulOut': mul_out} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out') + + +if __name__ == "__main__": + unittest.main() From bb7f5558034e6d28da096d91d6341f829591d9a2 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 29 Sep 2017 16:56:27 +0800 Subject: [PATCH 079/342] remove rowwise_add_op --- paddle/operators/fc_op.cc | 2 +- paddle/operators/rowwise_add_op.cc | 109 ------------------ paddle/operators/rowwise_add_op.cu | 23 ---- paddle/operators/rowwise_add_op.h | 80 ------------- .../v2/framework/tests/test_rowwise_add_op.py | 51 -------- 5 files changed, 1 insertion(+), 264 deletions(-) delete mode 100644 paddle/operators/rowwise_add_op.cc delete mode 100644 paddle/operators/rowwise_add_op.cu delete mode 100644 paddle/operators/rowwise_add_op.h delete mode 100644 python/paddle/v2/framework/tests/test_rowwise_add_op.py diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 5ac0e8cc45..7c422c81fc 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -100,7 +100,7 @@ class FCOp : public NetOp { add_out = Output("AddOut"); AppendOp(framework::OpRegistry::CreateOp( - "rowwise_add", {{"X", {sum_out}}, {"b", {Input("B")}}}, + "elementwise_add", {{"X", {sum_out}}, {"Y", {Input("B")}}}, {{"Out", {add_out}}}, {})); } else { if (Output("AddOut") != framework::kEmptyVarName) { diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc deleted file mode 100644 index 1fcf0959df..0000000000 --- a/paddle/operators/rowwise_add_op.cc +++ /dev/null @@ -1,109 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/operators/rowwise_add_op.h" - -namespace paddle { -namespace operators { - -using framework::Tensor; - -class RowwiseAddOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of RowwiseAddOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("b"), - "Input(b) of RowwiseAddOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of RowwiseAddOp should not be null."); - - auto x_dims = ctx->GetInputDim("X"); - auto b_dims = ctx->GetInputDim("b"); - PADDLE_ENFORCE_GT( - x_dims.size(), b_dims.size(), - "The rank of input `X` must be larger than the one of input `b`."); - - int num_col_dims = x_dims.size() - b_dims.size(); - - PADDLE_ENFORCE_EQ( - framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, - "The width of two operands must be same"); - PADDLE_ENFORCE_EQ(ctx->Outputs("Out").size(), 1, - "The output size must be 1"); - ctx->SetOutputDim("Out", x_dims); - ctx->ShareLoD("X", /*->*/ "Out"); - } -}; - -class RowwiseAddOpMaker : public framework::OpProtoAndCheckerMaker { - public: - RowwiseAddOpMaker(framework::OpProto* proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The left input of row-wise add op, must be matrix"); - AddInput("b", "The right input of row-wise add op, must be vector"); - AddOutput("Out", "The output of row-wise add op"); - AddComment(R"DOC(Row-wise Add operator - -for i in xrange(X.shape[0]): - Out = X[i] + b -)DOC"); - } -}; -class RowwiseAddGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "X should not be null"); - PADDLE_ENFORCE(ctx->HasInput("b"), "b should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); - auto x_dims = ctx->GetInputDim("X"); - auto b_dims = ctx->GetInputDim("b"); - PADDLE_ENFORCE_GT( - x_dims.size(), b_dims.size(), - "The rank of input `X` must be larger than the one of input `b`."); - - int64_t num_col_dims = x_dims.size() - b_dims.size(); - PADDLE_ENFORCE_EQ( - framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, - "The width of two operands must be same"); - auto x_grad_name = framework::GradVarName("X"); - auto b_grad_name = framework::GradVarName("b"); - if (ctx->HasOutput(x_grad_name)) { - ctx->SetOutputDim(x_grad_name, x_dims); - } - if (ctx->HasOutput(b_grad_name)) { - ctx->SetOutputDim(b_grad_name, b_dims); - } - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(rowwise_add, ops::RowwiseAddOp, ops::RowwiseAddOpMaker, - rowwise_add_grad, ops::RowwiseAddGradOp); -REGISTER_OP_CPU_KERNEL( - rowwise_add, ops::RowwiseAddKernel); -REGISTER_OP_CPU_KERNEL( - rowwise_add_grad, - ops::RowwiseAddGradKernel); diff --git a/paddle/operators/rowwise_add_op.cu b/paddle/operators/rowwise_add_op.cu deleted file mode 100644 index 4a57f64c89..0000000000 --- a/paddle/operators/rowwise_add_op.cu +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#define EIGEN_USE_GPU -#include "paddle/operators/rowwise_add_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - rowwise_add, ops::RowwiseAddKernel); -REGISTER_OP_GPU_KERNEL( - rowwise_add_grad, - ops::RowwiseAddGradKernel); diff --git a/paddle/operators/rowwise_add_op.h b/paddle/operators/rowwise_add_op.h deleted file mode 100644 index b43e5d868b..0000000000 --- a/paddle/operators/rowwise_add_op.h +++ /dev/null @@ -1,80 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; -template -using EigenMatrix = framework::EigenMatrix; - -template -class RowwiseAddKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto out = context.Output("Out"); - out->mutable_data(context.GetPlace()); - int num_col_dims = context.Input("X")->dims().size() - - context.Input("b")->dims().size(); - auto input = - EigenMatrix::Reshape(*context.Input("X"), num_col_dims); - auto bias = EigenVector::Flatten(*context.Input("b")); - auto output = EigenMatrix::Reshape(*out, num_col_dims); - - const int bias_size = bias.dimension(0); - const int rest_size = input.size() / bias_size; - Eigen::DSizes one_d(input.size()); - Eigen::DSizes bcast(rest_size); - output.reshape(one_d).device(context.GetEigenDevice()) = - input.reshape(one_d) + bias.broadcast(bcast).reshape(one_d); - } -}; - -template -class RowwiseAddGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* dout = context.Input(framework::GradVarName("Out")); - auto* dx = context.Output(framework::GradVarName("X")); - auto* db = context.Output(framework::GradVarName("b")); - int num_col_dims = context.Input("X")->dims().size() - - context.Input("b")->dims().size(); - - auto out_grad = EigenMatrix::Reshape(*dout, num_col_dims); - auto place = context.GetEigenDevice(); - - if (dx) { - dx->mutable_data(context.GetPlace()); - EigenMatrix::Reshape(*dx, num_col_dims).device(place) = out_grad; - } - - if (db) { - db->mutable_data(context.GetPlace()); - // https://eigen.tuxfamily.org/dox/unsupported/TensorBase_8h_source.html - // colwise add - Eigen::array dims{{0}}; /* dimension to reduce */ - EigenVector::Flatten(*db).device(place) = out_grad.sum(dims); - } - } -}; -} // namespace operators -} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_rowwise_add_op.py b/python/paddle/v2/framework/tests/test_rowwise_add_op.py deleted file mode 100644 index 336645bd99..0000000000 --- a/python/paddle/v2/framework/tests/test_rowwise_add_op.py +++ /dev/null @@ -1,51 +0,0 @@ -import unittest -import numpy as np -from op_test import OpTest - - -class TestRowwiseAddOp(OpTest): - def setUp(self): - self.op_type = "rowwise_add" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"), - 'b': np.random.uniform(0.1, 1, [10]).astype("float32") - } - self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['X', 'b'], 'Out') - - def test_check_grad_ingore_b(self): - self.check_grad(['X'], 'Out', no_grad_set=set('b')) - - def test_check_grad_ingore_x(self): - self.check_grad(['b'], 'Out', no_grad_set=set('X')) - - -class TestRowwiseAddOp2(OpTest): - def setUp(self): - self.op_type = "rowwise_add" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), - 'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32") - } - self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['X', 'b'], 'Out') - - def test_check_grad_ignore_b(self): - self.check_grad(['X'], 'Out', no_grad_set=set('b')) - - def test_check_grad_ignore_x(self): - self.check_grad(['b'], 'Out', no_grad_set=set('X')) - - -if __name__ == "__main__": - unittest.main() From 0a37f2adf0ecab50f979f655e9173a2f13095ce3 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 29 Sep 2017 17:20:30 +0800 Subject: [PATCH 080/342] fix typo --- paddle/gserver/tests/test_MKLDNN.cpp | 5 +++-- paddle/scripts/submit_local.sh.in | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index 857d07df3e..a70b2f17f4 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -215,13 +215,13 @@ struct testActDesc { static void getAddtoConfig(TestConfig& cfg, const testActDesc& pm) { cfg.biasSize = 0; cfg.layerConfig.set_type("addto"); - size_t layerSize = pm.ih * pm.ih * pm.iw; + size_t layerSize = pm.ic * pm.ih * pm.iw; cfg.layerConfig.set_size(layerSize); cfg.inputDefs.push_back({INPUT_DATA, "layer_0", layerSize, 0}); cfg.layerConfig.add_inputs(); } -void testActivation(std::string& actType, const testActDesc& pm) { +void testActivation(std::string actType, const testActDesc& pm) { // TODO(TJ): remove me when paddle support elu activation if (actType == "mkldnn_elu") { return; @@ -240,6 +240,7 @@ TEST(MKLDNNActivation, Activations) { for (auto type : types) { /* bs, c, h, w*/ testActivation(type, {16, 64, 32, 32}); + testActivation(type, {2, 8, 1, 1}); } } diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index 26f9c0fcd4..5c4b5a2495 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -18,7 +18,7 @@ function version(){ echo "PaddlePaddle @PADDLE_VERSION@, compiled with" echo " with_avx: @WITH_AVX@" echo " with_gpu: @WITH_GPU@" - echo " with_mkldnn: @WITH_MKLDNN" + echo " with_mkldnn: @WITH_MKLDNN@" echo " with_mklml: @WITH_MKLML@" echo " with_double: @WITH_DOUBLE@" echo " with_python: @WITH_PYTHON@" From a815d6abcf49d4778d0a49c852c45264bd8a684a Mon Sep 17 00:00:00 2001 From: zhouxiao-coder Date: Fri, 29 Sep 2017 17:29:52 +0800 Subject: [PATCH 081/342] elu: Optimize gradient calculation;Add more comments --- paddle/operators/activation_op.cc | 25 ++++++++++++ paddle/operators/activation_op.cu | 4 ++ paddle/operators/activation_op.h | 40 +++++++++++++++++++ .../v2/framework/tests/test_activation_op.py | 20 ++++++++++ 4 files changed, 89 insertions(+) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 1e1d3cf7f7..e83666c9f9 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -174,6 +174,25 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { } }; +template +class ELUOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ELUOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "Input of ELU operator, it shouldn't be empty. Input is flattened " + "and treated as a 1D array."); + AddOutput("Y", "Output of ELU operator, has same shape as the input."); + AddComment( + "ELU activation operator. It applies this element-wise computation on " + "the input: f(x) = max(0, x) + min(0, alpha * (exp(x) - 1))." + "Check .. _Link: https://arxiv.org/abs/1511.07289 for more details"); + AddAttr("alpha", + "alpha value in the elu formulation, default to 1.") + .SetDefault(static_cast(1.)); + } +}; + template class PowOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -311,6 +330,12 @@ REGISTER_OP_CPU_KERNEL(soft_relu, REGISTER_OP_CPU_KERNEL( soft_relu_grad, ops::SoftReluGradKernel); +REGISTER_OP(elu, ops::ActivationOp, ops::ELUOpMaker, elu_grad, + ops::ActivationOpGrad); +REGISTER_OP_CPU_KERNEL(elu, ops::ELUKernel); +REGISTER_OP_CPU_KERNEL(elu_grad, + ops::ELUGradKernel); + REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad, ops::ActivationOpGrad); REGISTER_OP_CPU_KERNEL(pow, ops::PowKernel); diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 56886d8b1b..48800b11ec 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -97,6 +97,10 @@ REGISTER_OP_GPU_KERNEL(soft_relu, REGISTER_OP_GPU_KERNEL( soft_relu_grad, ops::SoftReluGradKernel); +REGISTER_OP_GPU_KERNEL(elu, ops::ELUKernel); +REGISTER_OP_GPU_KERNEL(elu_grad, + ops::ELUGradKernel); + REGISTER_OP_GPU_KERNEL(pow, ops::PowKernel); REGISTER_OP_GPU_KERNEL(pow_grad, ops::PowGradKernel); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index b9f52e1af3..3428aca817 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -296,6 +296,46 @@ class SoftReluGradKernel : public framework::OpKernel { } }; +template +class ELUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* X = context.Input("X"); + auto* Y = context.Output("Y"); + auto alpha = static_cast(context.Attr("alpha")); + Y->mutable_data(context.GetPlace()); + + auto x = framework::EigenVector::Flatten(*X); + auto y = framework::EigenVector::Flatten(*Y); + auto place = context.GetEigenDevice(); + y.device(place) = + x.cwiseMax(static_cast(0)) + + (alpha * (x.exp() - static_cast(1))).cwiseMin(static_cast(0)); + } +}; + +template +class ELUGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* X = context.Input("X"); + auto* Y = context.Input("Y"); + auto* dY = context.Input(framework::GradVarName("Y")); + auto* dX = context.Output(framework::GradVarName("X")); + auto alpha = static_cast(context.Attr("alpha")); + dX->mutable_data(context.GetPlace()); + + auto x = framework::EigenVector::Flatten(*X); + auto y = framework::EigenVector::Flatten(*Y); + auto dy = framework::EigenVector::Flatten(*dY); + auto dx = framework::EigenVector::Flatten(*dX); + auto place = context.GetEigenDevice(); + dx.device(place) = + dy * (x > static_cast(0)).template cast() + + dy * (y + alpha) * (x < static_cast(0)).template cast(); + } +}; + template class PowKernel : public framework::OpKernel { public: diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index c44eb84906..9ea01d43c5 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -144,6 +144,26 @@ class TestSoftRelu(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.02) +class TestELU(OpTest): + def setUp(self): + self.op_type = "elu" + x = np.random.uniform(-3, 3, [4, 4]).astype("float32") + alpha = 1. + # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1) + # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here + self.inputs = {'X': x} + self.attrs = {'alpha': alpha} + self.outputs = { + 'Y': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.02) + + class TestReciprocal(OpTest): def setUp(self): self.op_type = "reciprocal" From 4436ba0c56d105b0c1305a739158fdc08258f7a9 Mon Sep 17 00:00:00 2001 From: zhouxiao-coder Date: Fri, 29 Sep 2017 17:52:18 +0800 Subject: [PATCH 082/342] elu: Optimize gradient calculation;Add more comments --- paddle/operators/activation_op.cc | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index e83666c9f9..7d086ac5df 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -180,16 +180,18 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker { ELUOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", - "Input of ELU operator, it shouldn't be empty. Input is flattened " - "and treated as a 1D array."); - AddOutput("Y", "Output of ELU operator, has same shape as the input."); - AddComment( - "ELU activation operator. It applies this element-wise computation on " - "the input: f(x) = max(0, x) + min(0, alpha * (exp(x) - 1))." - "Check .. _Link: https://arxiv.org/abs/1511.07289 for more details"); - AddAttr("alpha", - "alpha value in the elu formulation, default to 1.") + "(Tensor) The input of ELU operator, it shouldn't be empty. Input " + "is flattened and treated as a 1D array."); + AddOutput("Y", + "(Tensor) The output of ELU operator. It has the same shape as " + "the input."); + AddAttr( + "alpha", "(float, default 1.0) Alpha value in the elu formulation.") .SetDefault(static_cast(1.)); + AddComment(R"DOC( + ELU activation operator. It applies this element-wise computation on + the input: f(x) = max(0, x) + min(0, alpha * (exp(x) - 1)). + Check .. _Link: https://arxiv.org/abs/1511.07289 for more details.)DOC"); } }; From ff061c25022c0b4ed41ccf3cdbc43c7f04940fe4 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 29 Sep 2017 18:05:06 +0800 Subject: [PATCH 083/342] refine "Testing Backward Operators" in new_op_cn/en.md --- doc/howto/dev/new_op_cn.md | 36 +++++++++++------------------------- doc/howto/dev/new_op_en.md | 34 ++++++++++------------------------ 2 files changed, 21 insertions(+), 49 deletions(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 264b998f50..9d3d02ffc3 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -285,41 +285,27 @@ class TestMulGradOp(GradientChecker): 'Y': np.random.random((84, 100)).astype("float32") } - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): + def test_check_grad_normal(self): # mul op will enlarge the relative error - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) - def test_ignore_x(self): + def test_check_grad_ingore_x(self): self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.5, - no_grad_set={"X"}) + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) - def test_ignore_y(self): + def test_check_grad_ingore_y(self): self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.5, - no_grad_set={"Y"}) + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) ``` 下面解释代码中一些关键的地方: - 调用`create_op("mul")`创建反向Op对应的前向Op。 -- 调用`compare_grad`函数对比CPU、GPU计算结果。 -- `test_normal`中调用`check_grad`使用数值法检测梯度正确性和稳定性。 - - 第一个参数`self.op` : 前向Op。 - - 第二个参数`self.inputs` : 输入词典,词典的Key和`ProtoMaker`定义保持一致。 - - 第三个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。 - - 第四个参数`"Out"` : 指定前向网络最终的输出目标变量`Out` -- `test_ignore_x`和`test_ignore_y`分支用来测试只需要计算一个输入梯度的情况。 +- `test_check_grad_normal`中调用`check_grad`使用数值法检测梯度正确性和稳定性。 + - 第一个参数`["X", "Y"]` : 指定对输入变量`X`、`Y`做梯度检测。 + - 第二个参数`"Out"` : 指定前向网络最终的输出目标变量`Out`。 + - 第三个参数`max_relative_error`:指定检测梯度时能容忍的最大错误值。 +- `test_check_grad_ingore_x`和`test_check_grad_ingore_y`分支用来测试只需要计算一个输入梯度的情况。 ### 编译和执行单元测试 diff --git a/doc/howto/dev/new_op_en.md b/doc/howto/dev/new_op_en.md index bad1dbc1de..57ff7caad1 100644 --- a/doc/howto/dev/new_op_en.md +++ b/doc/howto/dev/new_op_en.md @@ -293,41 +293,27 @@ class TestMulGradOp(GradientChecker): 'Y': np.random.random((84, 100)).astype("float32") } - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): + def test_check_grad_normal(self): # mul op will enlarge the relative error - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) - def test_ignore_x(self): + def test_check_grad_ingore_x(self): self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.5, - no_grad_set={"X"}) + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) - def test_ignore_y(self): + def test_check_grad_ingore_y(self): self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.5, - no_grad_set={"Y"}) + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) ``` Some key points in the code above include: - `create_op("mul")` creates the backward operator's corresponding forward operator. -- `compare_grad` compares results between utilizing the CPU and the GPU. - `test_normal` calls `check_grad` to validate scaling tests' correctness and stability through numeric methods. - - The first variable `self.op` denotes the forward operator. - - The second variable `self.inputs` denotes the input dictionary, which has its key value identical to its `ProtoMaker` definitions. - - The third variable `["X", "Y"]` appoints `X` and `Y` to be scale tested. - - The fourth variable `"Out"` points to the network's final output target `Out`. -- `test_ignore_x` and `test_ignore_y`branches test the cases where there is only one scaling input. + - The first variable `["X", "Y"]` appoints `X` and `Y` to be scale tested. + - The second variable `"Out"` points to the network's final output target `Out`. + - The third variable `max_relative_error` points to the maximum relative tolerance error during scaling tests. +- `test_check_grad_ingore_x` and `test_check_grad_ingore_y`branches test the cases where there is only one scaling input. ### Compiling and Running From 46641c6361ebcf4c9fab79a70c4251ce4329a0f7 Mon Sep 17 00:00:00 2001 From: guosheng Date: Fri, 29 Sep 2017 19:16:33 +0800 Subject: [PATCH 084/342] Fix infer when input is empty in v2/inference.py --- python/paddle/v2/inference.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index e80456d9bb..9148cb56cf 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -96,6 +96,9 @@ class Inference(object): for i, item in enumerate(result): retv[i].append(item) + if retv == None: + return [] + if flatten_result: retv = [numpy.concatenate(out) for out in retv] From 3c66b307f7b6173a69cd4ccc9cf9f7541de964d2 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 19:57:02 +0800 Subject: [PATCH 085/342] Remove the pserver, trainer, evaluators and some useless gradientmachines when compile mobile inference library. --- CMakeLists.txt | 8 +++ cmake/util.cmake | 57 ++++++++++++------- paddle/CMakeLists.txt | 35 +++++++----- paddle/capi/CMakeLists.txt | 8 +-- paddle/gserver/CMakeLists.txt | 22 +++++++ .../gradientmachines/GradientMachine.cpp | 13 ++++- .../gradientmachines/GradientMachine.h | 7 ++- .../gradientmachines/NeuralNetwork.cpp | 18 ++++-- .../gserver/gradientmachines/NeuralNetwork.h | 3 + paddle/gserver/layers/Layer.cpp | 2 + 10 files changed, 128 insertions(+), 45 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4921226ec1..ec4e6e2e86 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -86,6 +86,14 @@ if(ANDROID OR IOS) "Disable MKLDNN when cross-compiling for Android and iOS" FORCE) set(WITH_MKLML OFF CACHE STRING "Disable MKLML package when cross-compiling for Android and iOS" FORCE) + + if(WITH_C_API) + # Compile PaddlePaddle mobile inference library + set(MOBILE_INFERENCE ON) + add_definitions(-DPADDLE_MOBILE_INFERENCE) + endif() + set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling + for Android and iOS" FORCE) endif() set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING diff --git a/cmake/util.cmake b/cmake/util.cmake index d1aee3e170..5ebfc0945f 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -73,25 +73,44 @@ function(link_paddle_exe TARGET_NAME) generate_rdma_links() endif() - target_circle_link_libraries(${TARGET_NAME} - ARCHIVE_START - paddle_gserver - paddle_function - ARCHIVE_END - paddle_pserver - paddle_trainer_lib - paddle_network - paddle_math - paddle_utils - paddle_parameter - paddle_proto - paddle_cuda - paddle_optimizer - ${EXTERNAL_LIBS} - ${CMAKE_THREAD_LIBS_INIT} - ${CMAKE_DL_LIBS} - ${RDMA_LD_FLAGS} - ${RDMA_LIBS}) + if(MOBILE_INFERENCE) + target_circle_link_libraries(${TARGET_NAME} + ARCHIVE_START + paddle_gserver + paddle_function + ARCHIVE_END + paddle_math + paddle_utils + paddle_parameter + paddle_proto + paddle_cuda + paddle_optimizer + ${EXTERNAL_LIBS} + ${CMAKE_THREAD_LIBS_INIT} + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) + else() + target_circle_link_libraries(${TARGET_NAME} + ARCHIVE_START + paddle_gserver + paddle_function + ARCHIVE_END + paddle_pserver + paddle_trainer_lib + paddle_network + paddle_math + paddle_utils + paddle_parameter + paddle_proto + paddle_cuda + paddle_optimizer + ${EXTERNAL_LIBS} + ${CMAKE_THREAD_LIBS_INIT} + ${CMAKE_DL_LIBS} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) + endif() if(ANDROID) target_link_libraries(${TARGET_NAME} log) diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index b435de80a2..3eb494ae47 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -3,25 +3,30 @@ add_subdirectory(function) add_subdirectory(utils) add_subdirectory(testing) add_subdirectory(math) -add_subdirectory(parameter) add_subdirectory(gserver) -add_subdirectory(pserver) -add_subdirectory(trainer) add_subdirectory(scripts) add_subdirectory(string) +add_subdirectory(parameter) -if(Boost_FOUND) - add_subdirectory(memory) - add_subdirectory(platform) - add_subdirectory(framework) - add_subdirectory(operators) - add_subdirectory(pybind) -endif() - -if(WITH_C_API) +if(MOBILE_INFERENCE) add_subdirectory(capi) -endif() +else() + add_subdirectory(pserver) + add_subdirectory(trainer) + + if(WITH_C_API) + add_subdirectory(capi) + endif() + + if(Boost_FOUND) + add_subdirectory(memory) + add_subdirectory(platform) + add_subdirectory(framework) + add_subdirectory(operators) + add_subdirectory(pybind) + endif() -if(WITH_SWIG_PY) - add_subdirectory(api) + if(WITH_SWIG_PY) + add_subdirectory(api) + endif() endif() diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index b9bbe58951..a19a19d719 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS paddle_cuda paddle_function paddle_gserver - paddle_proto - paddle_pserver - paddle_network) + paddle_proto) cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS}) @@ -50,7 +48,9 @@ if(NOT IOS) add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) - link_paddle_exe(paddle_capi_shared) + +link_paddle_exe(paddle_capi_shared) + endif() # install library & headers. diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index 62cff9361c..cd469875df 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -60,6 +60,28 @@ if(NOT WITH_PYTHON) dataproviders/PyDataProvider.h) endif() +if(MOBILE_INFERENCE) + # Remove evaluators + list(REMOVE_ITEM GSERVER_SOURCES + layers/ValidationLayer.cpp + evaluators/Evaluator.cpp + evaluators/DetectionMAPEvaluator.cpp + evaluators/CTCErrorEvaluator.cpp + evaluators/ChunkEvaluator.cpp) + + # Remove useless gradientmachines + list(REMOVE_ITEM GSERVER_SOURCES + gradientmachines/MultiNetwork.cpp + gradientmachines/RecurrentGradientMachine.cpp + gradientmachines/ParallelNeuralNetwork.cpp + gradientmachines/GradientMachineMode.cpp + gradientmachines/MultiGradientMachine.cpp) + + # Remove useless layers + list(REMOVE_ITEM GSERVER_SOURCES + layers/RecurrentLayerGroup.cpp) +endif() + if(WITH_GPU) cuda_add_library(paddle_gserver ${GSERVER_SOURCES}) else() diff --git a/paddle/gserver/gradientmachines/GradientMachine.cpp b/paddle/gserver/gradientmachines/GradientMachine.cpp index b44e4dc202..de5faf5e1e 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.cpp +++ b/paddle/gserver/gradientmachines/GradientMachine.cpp @@ -17,12 +17,15 @@ limitations under the License. */ #include #include "paddle/utils/Logging.h" +#include "NeuralNetwork.h" +#include "hl_gpu.h" + +#ifndef PADDLE_MOBILE_INFERENCE #include "GradientMachineMode.h" #include "MultiGradientMachine.h" #include "MultiNetwork.h" -#include "NeuralNetwork.h" #include "ParallelNeuralNetwork.h" -#include "hl_gpu.h" +#endif namespace paddle { @@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create( const ModelConfig& config, int mode, const std::vector& parameterTypes) { +#ifndef PADDLE_MOBILE_INFERENCE if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) { return gm; } if (FLAGS_trainer_count > 1) { return new MultiGradientMachine(config, FLAGS_use_gpu); } +#endif if (FLAGS_trainer_count == 1) { // single +#ifndef PADDLE_MOBILE_INFERENCE NeuralNetwork* nn; if (config.type() == "multi_nn") { /* multi submodel calculate, thread(s) will be initialized inside */ @@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create( /* single thread calculate */ nn = NeuralNetwork::create(config); } +#else + NeuralNetwork* nn = NeuralNetwork::create(config); +#endif ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) { para->enableType(PARAMETER_VALUE); }; diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h index f9c82a2bef..ebfe0573cf 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/gserver/gradientmachines/GradientMachine.h @@ -20,13 +20,16 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "TrainerConfig.pb.h" #include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/evaluators/Evaluator.h" #include "paddle/gserver/layers/Layer.h" #include "paddle/math/Matrix.h" #include "paddle/parameter/Parameter.h" #include "paddle/parameter/ParameterUpdaterBase.h" #include "paddle/utils/Thread.h" +#ifndef PADDLE_MOBILE_INFERENCE +#include "paddle/gserver/evaluators/Evaluator.h" +#endif + namespace paddle { /** * @brief A gradient machine is capable of calculating some outputs given @@ -147,6 +150,7 @@ public: virtual void onPassEnd() = 0; +#ifndef PADDLE_MOBILE_INFERENCE /** * Create an evaluator which can be used for eval() */ @@ -156,6 +160,7 @@ public: * evaluate using the given evaluator */ virtual void eval(Evaluator* evaluator) const = 0; +#endif std::vector& getParameters() { return parameters_; } diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 26cff3e677..dcf0acb5a2 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -14,15 +14,17 @@ limitations under the License. */ #include "paddle/utils/Util.h" +#include "NeuralNetwork.h" +#include "hl_gpu.h" +#include "paddle/gserver/layers/AgentLayer.h" #include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" +#ifndef PADDLE_MOBILE_INFERENCE #include "MultiNetwork.h" -#include "NeuralNetwork.h" #include "RecurrentGradientMachine.h" -#include "hl_gpu.h" -#include "paddle/gserver/layers/AgentLayer.h" -#include "paddle/utils/Stat.h" +#endif namespace paddle { void parameterInitNN(int paramId, @@ -54,6 +56,7 @@ void parameterInitNN(int paramId, } NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { +#ifndef PADDLE_MOBILE_INFERENCE if (config.type() == "recurrent_nn") { return newNeuralNetwork("root"); } else if (config.type() == "multi_nn") { @@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) { } else { return newNeuralNetwork(); } +#else + return new NeuralNetwork(); +#endif } std::map NeuralNetwork::dllInitMap; @@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() { } } +#ifndef PADDLE_MOBILE_INFERENCE + class CombinedEvaluator : public Evaluator { public: void addEvaluator(std::unique_ptr&& evaluator) { @@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const { void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); } +#endif + void NeuralNetwork::setOutputGrad(const std::vector& args) { CHECK_GE(outputLayers_.size(), args.size()); for (size_t i = 0; i < args.size(); ++i) { diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h index 12810f6425..56a1ec7846 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/gserver/gradientmachines/NeuralNetwork.h @@ -97,9 +97,12 @@ public: virtual void onPassEnd(); +#ifndef PADDLE_MOBILE_INFERENCE virtual Evaluator* makeEvaluator() const; virtual void eval(Evaluator* evaluator) const; +#endif + virtual void resetState(); virtual void setOutputGrad(const std::vector& args); diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index e95f42c863..075e8166ef 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -103,10 +103,12 @@ LayerPtr Layer::create(const LayerConfig& config) { return LayerPtr(new MultiClassCrossEntropy(config)); else if (type == "rank-cost") return LayerPtr(new RankingCost(config)); +#ifndef PADDLE_MOBILE_INFERENCE else if (type == "auc-validation") return LayerPtr(new AucValidation(config)); else if (type == "pnpair-validation") return LayerPtr(new PnpairValidation(config)); +#endif return LayerPtr(registrar_.createByType(config.type(), config)); } From bb07120b64528ba37de75c01ec2d1d71a2e9cb03 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 20:16:04 +0800 Subject: [PATCH 086/342] Remove dataproviders. --- paddle/gserver/CMakeLists.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index cd469875df..5f39167afc 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -69,6 +69,14 @@ if(MOBILE_INFERENCE) evaluators/CTCErrorEvaluator.cpp evaluators/ChunkEvaluator.cpp) + # Remove dataproviders + list(REMOVE_ITEM GSERVER_SOURCES + dataproviders/DataProvider.cpp + dataproviders/MultiDataProvider.cpp + dataproviders/ProtoDataProvider.cpp + dataproviders/PyDataProvider2.cpp + dataproviders/PyDataProvider.cpp) + # Remove useless gradientmachines list(REMOVE_ITEM GSERVER_SOURCES gradientmachines/MultiNetwork.cpp From 33299ef972302c310cc2b117f4cb58377daa6bd1 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 20:39:36 +0800 Subject: [PATCH 087/342] Remove cuda. --- cmake/util.cmake | 1 - 1 file changed, 1 deletion(-) diff --git a/cmake/util.cmake b/cmake/util.cmake index 5ebfc0945f..45a8d66120 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -83,7 +83,6 @@ function(link_paddle_exe TARGET_NAME) paddle_utils paddle_parameter paddle_proto - paddle_cuda paddle_optimizer ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} From ea4672bea0bdef1e73f18da8802cd8a467739299 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 20:47:51 +0800 Subject: [PATCH 088/342] Remove optimizer. --- CMakeLists.txt | 8 +++++--- cmake/util.cmake | 1 - 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ec4e6e2e86..70b35154aa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -168,9 +168,11 @@ endif(USE_NNPACK) add_subdirectory(proto) -# "add_subdirectory(go)" should be placed after the following loine, -# because it depends on paddle/optimizer. -add_subdirectory(paddle/optimizer) +if(NOT MOBILE_INFERENCE) + # "add_subdirectory(go)" should be placed after the following loine, + # because it depends on paddle/optimizer. + add_subdirectory(paddle/optimizer) +endif() # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be # placed after this block, because they depends on it. diff --git a/cmake/util.cmake b/cmake/util.cmake index 45a8d66120..2ab1e8c8e4 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -83,7 +83,6 @@ function(link_paddle_exe TARGET_NAME) paddle_utils paddle_parameter paddle_proto - paddle_optimizer ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS} From d94dd75e3b932fb7e792d6d1b56701ac2d76bd16 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 29 Sep 2017 21:19:26 +0800 Subject: [PATCH 089/342] Remove string, scripts and retain cuda. --- cmake/util.cmake | 1 + paddle/CMakeLists.txt | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmake/util.cmake b/cmake/util.cmake index 2ab1e8c8e4..117ab7f49c 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -83,6 +83,7 @@ function(link_paddle_exe TARGET_NAME) paddle_utils paddle_parameter paddle_proto + paddle_cuda ${EXTERNAL_LIBS} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS} diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 3eb494ae47..7d2becbdd7 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,18 +1,18 @@ add_subdirectory(cuda) add_subdirectory(function) add_subdirectory(utils) -add_subdirectory(testing) add_subdirectory(math) add_subdirectory(gserver) -add_subdirectory(scripts) -add_subdirectory(string) add_subdirectory(parameter) +add_subdirectory(testing) if(MOBILE_INFERENCE) add_subdirectory(capi) else() add_subdirectory(pserver) add_subdirectory(trainer) + add_subdirectory(string) + add_subdirectory(scripts) if(WITH_C_API) add_subdirectory(capi) From 762a99cc064564102c6d8a6f6e68494ab74a3acd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 28 Sep 2017 19:37:42 -0700 Subject: [PATCH 090/342] Remove add_op since it can be replaced by sum_op --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/grad_op_builder_test.cc | 25 ++++--- paddle/operators/add_op.cc | 68 ------------------- paddle/operators/add_op.cu | 18 ----- paddle/operators/add_op.h | 48 ------------- paddle/operators/sum_op.cc | 6 +- .../paddle/v2/framework/tests/test_add_op.py | 20 ------ 7 files changed, 21 insertions(+), 166 deletions(-) delete mode 100644 paddle/operators/add_op.cc delete mode 100644 paddle/operators/add_op.cu delete mode 100644 paddle/operators/add_op.h delete mode 100644 python/paddle/v2/framework/tests/test_add_op.py diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8a5d8532bb..c0ad888b70 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -29,7 +29,7 @@ cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) -cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry add_op) +cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) py_proto_compile(framework_py_proto SRCS framework.proto) # Generate an empty __init__.py to make framework_py_proto as a valid python module. diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index d09892f81b..55c5fa420e 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -3,7 +3,7 @@ #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" -USE_OP(add); +USE_OP(sum); namespace paddle { namespace framework { @@ -41,17 +41,24 @@ namespace f = paddle::framework; TEST(GradOpBuilder, AddTwo) { std::shared_ptr add_op(f::OpRegistry::CreateOp( - "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); + "sum", {{"X", {"x", "y"}}}, {{"Out", {"out"}}}, {})); std::shared_ptr grad_add_op = f::OpRegistry::CreateGradOp(*add_op); - EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); - EXPECT_EQ(grad_add_op->Outputs().size(), 2UL); - EXPECT_EQ(grad_add_op->Input("X"), "x"); - EXPECT_EQ(grad_add_op->Input("Y"), "y"); - EXPECT_EQ(grad_add_op->Input("Out"), "out"); + + EXPECT_EQ(grad_add_op->Inputs().size(), 1UL); + EXPECT_EQ(grad_add_op->Outputs().size(), 1UL); EXPECT_EQ(grad_add_op->Input(f::GradVarName("Out")), f::GradVarName("out")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("X")), f::GradVarName("x")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("Y")), f::GradVarName("y")); + auto &outputs = grad_add_op->Outputs(f::GradVarName("X")); + EXPECT_EQ(2UL, outputs.size()); + auto in_output = [&outputs](const std::string &name) { + for (auto &output_name : outputs) { + if (output_name == name) return true; + } + return false; + }; + + EXPECT_TRUE(in_output(f::GradVarName("x"))); + EXPECT_TRUE(in_output(f::GradVarName("y"))); } REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc deleted file mode 100644 index 3914d13230..0000000000 --- a/paddle/operators/add_op.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/operators/add_op.h" - -namespace paddle { -namespace operators { - -class AddOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of AddOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of AddOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of AddOp should not be null."); - - auto x_dims = ctx->GetInputDim("X"); - auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims, y_dims, - "Two input of Add Op's dimension must be same."); - ctx->SetOutputDim("Out", x_dims); - } -}; - -class AddOpMaker : public framework::OpProtoAndCheckerMaker { - public: - AddOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The first input of add op"); - AddInput("Y", "The second input of add op"); - AddOutput("Out", "The output of add op"); - AddComment(R"DOC( -Two Element Add Operator. - -The equation is: Out = X + Y -)DOC"); - } -}; - -class AddOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(add, ops::AddOp, ops::AddOpMaker, add_grad, ops::AddOpGrad); - -REGISTER_OP_CPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/add_op.cu b/paddle/operators/add_op.cu deleted file mode 100644 index d9c6d20a6c..0000000000 --- a/paddle/operators/add_op.cu +++ /dev/null @@ -1,18 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/operators/add_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(add, ops::AddKernel); diff --git a/paddle/operators/add_op.h b/paddle/operators/add_op.h deleted file mode 100644 index 75163032a1..0000000000 --- a/paddle/operators/add_op.h +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; - -template -class AddKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* input0 = context.Input("X"); - auto* input1 = context.Input("Y"); - auto* output = context.Output("Out"); - - output->mutable_data(context.GetPlace()); - - auto X = EigenVector::Flatten(*input0); - auto Y = EigenVector::Flatten(*input1); - auto Z = EigenVector::Flatten(*output); - - auto place = context.GetEigenDevice(); - - Z.device(place) = X + Y; - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 8f62a9f4db..5d76313aeb 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -43,8 +43,10 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.").AsDuplicable(); - AddOutput("Out", "the output tensor of sum operator."); + AddInput("X", "the input tensors of sum operator.") + .AsDuplicable() + .NotInGradient(); + AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); AddComment(R"DOC( Sum the input tensors. diff --git a/python/paddle/v2/framework/tests/test_add_op.py b/python/paddle/v2/framework/tests/test_add_op.py deleted file mode 100644 index 3ca34d9b9f..0000000000 --- a/python/paddle/v2/framework/tests/test_add_op.py +++ /dev/null @@ -1,20 +0,0 @@ -import unittest -import numpy as np -from op_test import OpTest - - -class TestAddOp(OpTest): - def setUp(self): - self.op_type = "add" - self.inputs = { - 'X': np.random.random((102, 105)).astype("float32"), - 'Y': np.random.random((102, 105)).astype("float32") - } - self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} - - def test_check_output(self): - self.check_output() - - -if __name__ == "__main__": - unittest.main() From 37c9bc609c807d200e08e6dc11492887fa7673d9 Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Fri, 29 Sep 2017 10:52:50 -0700 Subject: [PATCH 091/342] Few more edits --- doc/design/refactorization.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/design/refactorization.md b/doc/design/refactorization.md index 715f209d3f..629422e774 100644 --- a/doc/design/refactorization.md +++ b/doc/design/refactorization.md @@ -17,7 +17,7 @@ The goals of refactoring include: 1. A graph is composed of *variables* and *operators*. -1. The description of graphs must be capable of being serialized/deserialized, so that +1. The description of graphs must be capable of being serialized/deserialized, so that: 1. It can to be sent to the cloud for distributed execution, and 1. It can be sent to clients for mobile or enterprise deployment. From 6164b8986e473daa00e1a71332cc472e21627f55 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 29 Sep 2017 12:48:40 -0700 Subject: [PATCH 092/342] Fix CI --- .../framework/tests/test_gradient_checker.py | 46 ------------------- python/paddle/v2/framework/tests/test_net.py | 4 +- .../v2/framework/tests/test_operator.py | 4 +- 3 files changed, 4 insertions(+), 50 deletions(-) delete mode 100644 python/paddle/v2/framework/tests/test_gradient_checker.py diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py deleted file mode 100644 index 85117bf960..0000000000 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ /dev/null @@ -1,46 +0,0 @@ -import unittest -import numpy as np -import paddle.v2.framework.core as core -from op_test import get_numeric_gradient -from op_test import create_op - - -class GetNumericGradientTest(unittest.TestCase): - def test_add_op(self): - x = np.random.random((10, 1)).astype("float32") - y = np.random.random((10, 1)).astype("float32") - z = x + y - scope = core.Scope() - add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) - arr = get_numeric_gradient(scope, add_op, {'X': x, - 'Y': y}, 'X', ['Out']) - self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) - - def test_softmax_op(self): - def stable_softmax(x): - """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - np.max(x) - exps = np.exp(shiftx) - return exps / np.sum(exps) - - def label_softmax_grad(Y, dY): - dX = Y * 0.0 - for i in range(Y.shape[0]): - d = np.dot(Y[i, :], dY[i, :]) - dX[i, :] = Y[i, :] * (dY[i, :] - d) - return dX - - X = np.random.random((2, 2)).astype("float32") - Y = np.apply_along_axis(stable_softmax, 1, X) - dY = np.ones(Y.shape) - dX = label_softmax_grad(Y, dY) - - scope = core.Scope() - softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict()) - - arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y") - np.testing.assert_almost_equal(arr, dX, decimal=1e-2) - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index 50cfb855f2..8503257feb 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -15,7 +15,7 @@ def fc(X, W, Y): class TestNet(unittest.TestCase): def test_net_all(self): net = core.Net.create() - op1 = Operator("add", X="X", Y="Y", Out="Out") + op1 = Operator("sum", X=["X", "Y"], Out="Out") net.append_op(op1) net2 = core.Net.create() @@ -26,7 +26,7 @@ class TestNet(unittest.TestCase): expected = ''' Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}. - Op(add), inputs:{X[X], Y[Y]}, outputs:{Out[Out]}. + Op(sum), inputs:{X[X, Y]}, outputs:{Out[Out]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. diff --git a/python/paddle/v2/framework/tests/test_operator.py b/python/paddle/v2/framework/tests/test_operator.py index 040556322d..98f6b2f5ee 100644 --- a/python/paddle/v2/framework/tests/test_operator.py +++ b/python/paddle/v2/framework/tests/test_operator.py @@ -193,10 +193,10 @@ class TestOpDescCreationMethod(unittest.TestCase): class TestOpCreations(unittest.TestCase): def test_all(self): - add_op = op.Operator("add", X="a", Y="b", Out="z") + add_op = op.Operator("sum", X=["a", "b"], Out="z") self.assertIsNotNone(add_op) # Invoke C++ DebugString() - self.assertEqual('Op(add), inputs:{X[a], Y[b]}, outputs:{Out[z]}.', + self.assertEqual('Op(sum), inputs:{X[a, b]}, outputs:{Out[z]}.', str(add_op)) From 12713e92e4a28fa7de762d9be76125efdace7ae2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 29 Sep 2017 13:49:18 -0700 Subject: [PATCH 093/342] Design doc of compile time register gradient operators --- doc/design/register_grad_op.md | 63 ++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 doc/design/register_grad_op.md diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md new file mode 100644 index 0000000000..1c961a6158 --- /dev/null +++ b/doc/design/register_grad_op.md @@ -0,0 +1,63 @@ +# Design Doc: Register Gradient Operator + +## Problem + +Since we separate users program in two stages, compile time and runtime, we should record and look up the mapping relationship between an operator and its gradient operators when compile. However, we register this relationship in runtime by these `OpInfo` fields. + +```cpp +struct OpInfo { + std::function creator_; + std::string grad_op_type_; + ... +}; +``` + +OpInfos store in a association map which key is the operator type. The `grad_op_type` indicate associated gradient operator type. Operator can create gradient operator by `OpInfo::creator_` of gradient. The pseudo code is + +```cpp +map OpInfoMap; + +OperatorBase* CreateGradientOperator(const OperatorBase& op) { + return OpInfoMap.at(op.Type()).creator_(...); +} +``` + +At the same time, an operator's gradient operator could be composed of many forward operators. For example, the gradient operator of `minus_op` could consist of an `identity` operator and a `scale` operator. To compose a gradient operator by forwarding operators could: 1) Reuse forwarding operator; 2) Calculate second derivative, third derivative, etc. + +We use `NetOp` to represent a composed operator since the `NetOp` is `vector`. However, `NetOp` is also a runtime concept. We should provide a mechanism to compose operators as a gradient operator. + +In conclusion, the problem that we want to resolve in this design doc is to register the mapping relationship between the forward operator and its gradient operators during compile time. + + +## Solution + +The mapping relationship between an operator and its gradient operators is a function. The interface of that function is: + +```cpp +// (OpDesc) --> vector +using GradOpDescMaker = std::function(const OpDesc&)>; +``` + +The function take a `OpDesc` of the forward operator and return one or many gradient operator descriptions. + +The `GradOpDescMaker` will be registered in `OpInfo`, to replace `grad_op_type_` field. The `OpInfo` should be + +```cpp +struct OpInfo { + GradOpDescMaker grad_op_maker_; + ... +}; +``` + +The `grad_op_maker_ ` is `nullptr` if the operator does not have associated gradient operators. + +We should chagne register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OP` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker ƒ`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. + +The user interface should be + +```cpp +vector SumOpGradMakerƒ(OpDesc) {...} +REGISTER_OP(sum, SumOp, SumOpProtoAndCheckerMaker, SumOpGradMaker); +// Developers can still manually implement gradient operator. +REGISTER_OP(sum_grad, SumGradOp); +``` From 5ee709380b5e03f334d5eff485cc6e37ac32e432 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 29 Sep 2017 13:53:44 -0700 Subject: [PATCH 094/342] Not change macro before --- doc/design/register_grad_op.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md index 1c961a6158..a793cfe96b 100644 --- a/doc/design/register_grad_op.md +++ b/doc/design/register_grad_op.md @@ -51,13 +51,19 @@ struct OpInfo { The `grad_op_maker_ ` is `nullptr` if the operator does not have associated gradient operators. -We should chagne register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OP` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker ƒ`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. +We should chagne register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OPERATOR ` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. The user interface should be ```cpp vector SumOpGradMakerƒ(OpDesc) {...} -REGISTER_OP(sum, SumOp, SumOpProtoAndCheckerMaker, SumOpGradMaker); +REGISTER_OPERATOR(sum, SumOp, SumOpProtoAndCheckerMaker, SumOpGradMaker); // Developers can still manually implement gradient operator. -REGISTER_OP(sum_grad, SumGradOp); +REGISTER_OPERATOR(sum_grad, SumGradOp); +``` + +The interface of current `REGISTER_OP` macro could not be changed. In `REGISTER_OP`, it will invoke `REGISTER_OPERATOR` two times and generate GradOpDescMaker inside. + +```cpp +REGISTER_OP(sum, SumOp, SumOpProtoAndCheckerMaker, sum_grad, SumGradOp); ``` From 56f60ee554696d42606fc6765dd2002cebafabe8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 29 Sep 2017 17:09:12 -0700 Subject: [PATCH 095/342] Update example --- doc/design/register_grad_op.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md index a793cfe96b..02c4d554d0 100644 --- a/doc/design/register_grad_op.md +++ b/doc/design/register_grad_op.md @@ -56,14 +56,14 @@ We should chagne register macros at the same time. In the current solution, ther The user interface should be ```cpp -vector SumOpGradMakerƒ(OpDesc) {...} -REGISTER_OPERATOR(sum, SumOp, SumOpProtoAndCheckerMaker, SumOpGradMaker); +vector MinusOpGradMaker(OpDesc) {...} +REGISTER_OPERATOR(minus, MinusOp, MinusOpProtoAndCheckerMaker, SumOpGradMaker); // Developers can still manually implement gradient operator. -REGISTER_OPERATOR(sum_grad, SumGradOp); +REGISTER_OPERATOR(minus_grad, MinusGradOp); ``` The interface of current `REGISTER_OP` macro could not be changed. In `REGISTER_OP`, it will invoke `REGISTER_OPERATOR` two times and generate GradOpDescMaker inside. ```cpp -REGISTER_OP(sum, SumOp, SumOpProtoAndCheckerMaker, sum_grad, SumGradOp); +REGISTER_OP(minus, MinusOp, MinusOpProtoAndCheckerMaker, minus_grad, MinusGradOp); ``` From 7fe0297e6451db4f18df81a8716414acca819529 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Fri, 29 Sep 2017 18:38:36 -0700 Subject: [PATCH 096/342] remove Runtime InferShape for cond op (#4518) * init remove cond_op infershape * optimize code * add PrepareDataForSubnet and MergeDataFromSubnet --- paddle/operators/cond_op.cc | 242 +++++++++--------- paddle/operators/cond_op.h | 33 +-- paddle/operators/scatter.h | 4 - .../paddle/v2/framework/tests/test_cond_op.py | 3 - 4 files changed, 140 insertions(+), 142 deletions(-) diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index aaffa6661f..db20b69f3f 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -14,12 +14,7 @@ limitations under the License. */ #include "paddle/operators/cond_op.h" -#include -#include - -#include "paddle/framework/op_registry.h" #include "paddle/operators/gather.h" -#include "paddle/operators/net_op.h" #include "paddle/operators/scatter.h" namespace paddle { @@ -31,142 +26,104 @@ using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; using DDim = framework::DDim; -void CondOp::CreateScope(const Scope& scope) const { +framework::Scope& CondOp::AddSubScope(const Scope& scope) const { auto sub_scopes_var = scope.FindVar("SubScopes"); PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, "Output(SubScopes) of CondOp should not be null."); auto sub_scopes = sub_scopes_var->GetMutable>(); auto& sub_scope = scope.NewScope(); sub_scopes->push_back(&sub_scope); + return sub_scope; } -void CondOp::CreateIndexTensor(const Scope& scope) const { +std::vector& CondOp::GetSubScopes( + const framework::Scope& scope) const { + auto sub_scopes_var = scope.FindVar("SubScopes"); + PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, + "Output(SubScopes) of CondOp should not be null."); + return *sub_scopes_var->GetMutable>(); +} + +LoDTensor& CondOp::AddIndexTensor(const Scope& scope) const { auto index_tensors_var = scope.FindVar("IndexTensors"); PADDLE_ENFORCE_NOT_NULL(index_tensors_var, "Output(IndexTensors) of CondOp should not be null."); auto& index_tensors = *index_tensors_var->GetMutable>(); index_tensors.push_back(LoDTensor()); + return index_tensors.back(); } -void CondOp::InferShape(const Scope& scope) const { - auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, - "Output(SubScopes) of CondOp should not be null."); - auto& sub_scopes = *sub_scopes_var->GetMutable>(); - - for (int i = 0; i < 2; ++i) { - // Create two sub scopes for true and false branches - // sub_scopes[0] for the true branch and sub_scopes[1] for the false - // branch - CreateScope(scope); - - // Create two tensors for true and false indices - // index_tensors[0] for the true branch and index_tensors[1] for the false - // branch - CreateIndexTensor(scope); - - PADDLE_ENFORCE(!Inputs("Xs").empty(), - "Inputs(Xs) of CondOp can't be empty."); - for (auto& input : Inputs("Xs")) { - // Create a new tensor in sub-scope for input-type tensor - Variable* v = sub_scopes[i]->NewVar(input); - LoDTensor* sub_input = v->GetMutable(); - sub_input->Resize(scope.FindVar(input)->GetMutable()->dims()); - } - - for (auto& output : (*sub_net_op_[i]).Outputs()) { - for (auto& var_name : output.second) { - sub_scopes[i]->NewVar(var_name); - } - } - - // each net calls InferShape - // sub_net_op_[i]->InferShape(*sub_scopes[i]); - } - - for (auto& output : Outputs("Outs")) { - LoDTensor* tensor_t_out = - sub_scopes[0]->FindVar(output)->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should not be NULL"); - LoDTensor* tensor_f_out = - sub_scopes[1]->FindVar(output)->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "False output should not be NULL"); - - auto* tensor_out_var = scope.FindVar(output); - PADDLE_ENFORCE_NOT_NULL(tensor_out_var, "Output not found"); - LoDTensor* tensor_out = tensor_out_var->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, - "True output tensor should not be NULL"); - - // check output size should be same - PADDLE_ENFORCE_EQ(tensor_t_out->dims(), tensor_f_out->dims(), - "Outputs not of the same shape"); - tensor_out->Resize(tensor_t_out->dims()); - // tensor_out->mutable_data(tensor_out->dims(), - // platform::CPUPlace()); - tensor_out->mutable_data(platform::CPUPlace()); - } -} - -void CondOp::Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const { - auto* sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, - "Output(SubScopes) of CondOp should not be null."); - auto sub_scopes = sub_scopes_var->Get>(); +std::vector& CondOp::GetIndexTensors( + const framework::Scope& scope) const { auto* index_tensors_var = scope.FindVar("IndexTensors"); PADDLE_ENFORCE_NOT_NULL(index_tensors_var, "Output(IndexTensors) of CondOp should not be null."); - auto index_tensors = index_tensors_var->Get>(); + return *index_tensors_var->GetMutable>(); +} - std::string cond_name = Input("Cond"); - Variable* cond_var = scope.FindVar(cond_name); +void CondOp::PrepareDataForSubnet( + const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const { + PADDLE_ENFORCE(!Inputs("Xs").empty(), "Inputs(Xs) of CondOp can't be empty."); + + for (int i = 0; i < BRANCH_NUM; ++i) { + // Create two sub scopes for true and false branches + // sub_scopes[0] for the true branch + // sub_scopes[1] for the false branch + AddSubScope(scope); + // Create two tensors for true and false indices: + // index_tensors[0] for the true branch + // index_tensors[1] for the false branch + AddIndexTensor(scope); + } + + Variable* cond_var = scope.FindVar(Input("Cond")); PADDLE_ENFORCE_NOT_NULL(cond_var, "Input(Cond) of CondOp should not be null."); const LoDTensor* cond = cond_var->GetMutable(); - // Step 1: get the true/false index at runtime - // index_[0]: vector, contains all index for cond[i] == true - // index_[1]: vector, contains all index for cond[i] == false - for (int i = 0; i < 2; ++i) index_[i].clear(); + // get the true/false index at runtime according to cond tensor + // index_vectors[0]: vector, contains all index for cond[i] == true + // index_vectors[1]: vector, contains all index for cond[i] == false + std::vector> index_vectors; + index_vectors.resize(BRANCH_NUM); const int* cond_data = cond->data(); for (int i = 0; i < cond->dims()[0]; ++i) { if (cond_data[i]) - index_[0].push_back(i); + index_vectors[TRUE_BRANCH].push_back(i); else - index_[1].push_back(i); + index_vectors[FALSE_BRANCH].push_back(i); } - // put index_[0] and index_[1] into two tensors: - // index_tensor_[0] and index_tensor_[1] - DDim dim = paddle::framework::make_ddim({0}); - for (int i = 0; i < 2; ++i) { - dim[0] = index_[i].size(); - int* tmp_ptr = + // put index_vectors[0] and index_vectors[1] into two tensors: + // index_tensors[0] and index_tensors[1] + std::vector& index_tensors = GetIndexTensors(scope); + std::vector& sub_scopes = GetSubScopes(scope); + + for (int i = 0; i < BRANCH_NUM; ++i) { + DDim dim = {static_cast(index_vectors[i].size())}; + int* index_tensor_data_ptr = index_tensors[i].mutable_data(dim, platform::CPUPlace()); - index_tensors[i].Resize(dim); - memcpy(tmp_ptr, index_[i].data(), dim[0] * sizeof(int)); + memcpy(index_tensor_data_ptr, index_vectors[i].data(), + dim[0] * sizeof(int)); } - // Step 2: collect data by calling gather - for (int i = 0; i < 2; ++i) { - // i= 0/i for True and False branches respectively - for (auto& input : Inputs("Xs")) { - // find Tensor - Variable* v = scope.FindVar(input); - PADDLE_ENFORCE_NOT_NULL(v); - LoDTensor* tensor_parent = v->GetMutable(); + // create input in subscopes according to index_vectors + for (auto& input : Inputs("Xs")) { + Variable* var_parent = scope.FindVar(input); + PADDLE_ENFORCE_NOT_NULL(var_parent); + const auto* tensor_parent = &var_parent->Get(); - v = sub_scopes[i]->FindVar(input); - PADDLE_ENFORCE_NOT_NULL(v); - LoDTensor* tensor_child = v->GetMutable(); + for (int i = 0; i < BRANCH_NUM; ++i) { + Variable* var_child = sub_scopes[i]->FindVar(input); + PADDLE_ENFORCE_NOT_NULL(var_child); + auto* tensor_child = var_child->GetMutable(); // Resize child - DDim dim = tensor_child->dims(); - dim[0] = index_[i].size(); - tensor_child->Resize(dim); + DDim dim = tensor_parent->dims(); + dim[0] = index_tensors[i].dims()[0]; tensor_child->mutable_data(dim, platform::CPUPlace()); Gather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], @@ -174,32 +131,79 @@ void CondOp::Run(const Scope& scope, } } - // Step 3: run - for (int i = 0; i < 2; ++i) { - sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); + // create output_tensors in subscope for sub_net + for (int i = 0; i < BRANCH_NUM; ++i) { + for (auto& output : (*sub_net_op_[i]).Outputs()) { + for (auto& var_name : output.second) { + sub_scopes[i]->NewVar(var_name); + } + } } +} - // Step 4: merge output results +void CondOp::MergeDataFromSubnet(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const { + std::vector& sub_scopes = GetSubScopes(scope); + const std::vector& index_tensors = + GetIndexTensors(scope); + + // Infer the output dim, out_dim[0] = true_dim[0] + false_dim[0] PADDLE_ENFORCE(!Outputs("Outs").empty(), "Outputs(Outs) of CondOp can't be empty."); - for (int i = 0; i < 2; ++i) { - // i= 0/i for True and False branches respectively - for (auto& output : Outputs("Outs")) { - // find Tensor - Variable* v = scope.FindVar(output); - PADDLE_ENFORCE_NOT_NULL(v); - LoDTensor* tensor_parent = v->GetMutable(); - - v = sub_scopes[i]->FindVar(output); - PADDLE_ENFORCE_NOT_NULL(v); - LoDTensor* tensor_child = v->GetMutable(); + for (auto& output : Outputs("Outs")) { + const LoDTensor* tensor_t_out = + &sub_scopes[TRUE_BRANCH]->FindVar(output)->Get(); + PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should not be NULL"); + const LoDTensor* tensor_f_out = + &sub_scopes[FALSE_BRANCH]->FindVar(output)->Get(); + PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "False output should not be NULL"); + + auto* var_out = scope.FindVar(output); + PADDLE_ENFORCE_NOT_NULL(var_out, "Output not found"); + LoDTensor* tensor_out = var_out->GetMutable(); + PADDLE_ENFORCE_NOT_NULL(tensor_t_out, + "True output tensor should not be NULL"); + + DDim true_dim = tensor_t_out->dims(); + DDim false_dim = tensor_f_out->dims(); + true_dim[0] = 0; + false_dim[0] = 0; + PADDLE_ENFORCE_EQ(true_dim, false_dim, + "Outputs not of the same shape except the first dim"); + + DDim out_dim = tensor_t_out->dims(); + out_dim[0] = tensor_t_out->dims()[0] + tensor_f_out->dims()[0]; + tensor_out->Resize(out_dim); + tensor_out->mutable_data(platform::CPUPlace()); + } + // merge output results: + // output_tensor = true_output_tensor + false_output_tensor + for (auto& output : Outputs("Outs")) { + Variable* var_parent = scope.FindVar(output); + PADDLE_ENFORCE_NOT_NULL(var_parent); + auto* tensor_parent = var_parent->GetMutable(); + + for (int i = 0; i < BRANCH_NUM; ++i) { + Variable* var_child = sub_scopes[i]->FindVar(output); + PADDLE_ENFORCE_NOT_NULL(var_child); + auto* tensor_child = &var_child->Get(); ScatterUpdate(dev_ctx.GetPlace(), tensor_child, &index_tensors[i], tensor_parent); } } } +void CondOp::Run(const Scope& scope, + const platform::DeviceContext& dev_ctx) const { + PrepareDataForSubnet(scope, dev_ctx); + std::vector& sub_scopes = GetSubScopes(scope); + for (int i = 0; i < BRANCH_NUM; ++i) { + sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); + } + MergeDataFromSubnet(scope, dev_ctx); +} + class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { public: CondOpProtoAndCheckerMaker(framework::OpProto* proto, diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h index 9a88ee35f1..93121fb31b 100644 --- a/paddle/operators/cond_op.h +++ b/paddle/operators/cond_op.h @@ -40,8 +40,7 @@ class CondOp : public framework::OperatorBase { const framework::VariableNameMap& outputs, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { - index_.resize(2); - sub_net_op_.resize(2); + sub_net_op_.resize(BRANCH_NUM); } CondOp(const CondOp& o) @@ -51,42 +50,44 @@ class CondOp : public framework::OperatorBase { PADDLE_THROW("Not implemented"); } - void CreateScope(const framework::Scope& scope) const; + framework::Scope& AddSubScope(const framework::Scope& scope) const; + std::vector& GetSubScopes( + const framework::Scope& scope) const; - void CreateIndexTensor(const framework::Scope& scope) const; + framework::LoDTensor& AddIndexTensor(const framework::Scope& scope) const; + std::vector& GetIndexTensors( + const framework::Scope& scope) const; - /* - * InferShape must be called before Run. - * FIXME(yuyang18): Since InferShape has been removed, this implementation - * could be wrong. - */ - void InferShape(const framework::Scope& scope) const; + void PrepareDataForSubnet(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const; + void MergeDataFromSubnet(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const; /* * Set True Block */ void set_truenet(std::unique_ptr&& net) { - sub_net_op_[0] = std::move(net); + sub_net_op_[TRUE_BRANCH] = std::move(net); } /* * Set False Block */ void set_falsenet(std::unique_ptr&& net) { - sub_net_op_[1] = std::move(net); + sub_net_op_[FALSE_BRANCH] = std::move(net); } void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const override; private: + const int TRUE_BRANCH = 0; + const int FALSE_BRANCH = 1; + const int BRANCH_NUM = 2; + // sub_net_op_[0]: subnet_t // sub_net_op_[1]: subnet_f std::vector> sub_net_op_; - - // index_[0]: True_index; - // index_[1]: False_index; - mutable std::vector> index_; }; } // namespace operators diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h index 6b542675c2..fec1046a97 100644 --- a/paddle/operators/scatter.h +++ b/paddle/operators/scatter.h @@ -78,10 +78,6 @@ void ScatterUpdate(const platform::Place& place, for (int i = 1; i < src_dims.size(); i++) PADDLE_ENFORCE(src_dims[i] == dst_dims[i]); - // slice size - size_t slice_size = 1; - for (int i = 0; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - if (platform::is_cpu_place(place)) { CPUScatterUpdate(src, index->data(), index_size, output); } else { diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py index e7a506f277..3698ce9c8e 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -112,7 +112,4 @@ class TestCondOp(unittest.TestCase): if __name__ == "__main__": - exit( - 0 - ) # FIXME(yuyang18): Since infer_shape has been removed, cond op may error unittest.main() From 2d8a5b97cc9096a84483b92be458d1388533c84f Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 29 Sep 2017 20:33:38 +0800 Subject: [PATCH 097/342] fix unit test --- paddle/operators/math/CMakeLists.txt | 5 ++- .../v2/framework/tests/test_pool2d_op.py | 36 ++++++++++++------- .../v2/framework/tests/test_pool3d_op.py | 32 +++++++++++------ 3 files changed, 48 insertions(+), 25 deletions(-) diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index f3ed0c78be..a0ceb029e3 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,11 +1,10 @@ if(WITH_GPU) - nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu pooling.cc pooling.cu DEPS cblas device_context) + nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu pooling.cc pooling.cu DEPS cblas device_context operator) nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator) else() - cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc - DEPS cblas device_context operator) + cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc DEPS cblas device_context operator) cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_library(softmax SRCS softmax.cc DEPS operator) cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator) diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index e8e5218073..0f9a548f29 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -47,7 +47,6 @@ def avg_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): class TestPool2d_Op(OpTest): def setUp(self): self.initTestCase() - self.op_type = "pool2d" input = np.random.random(self.shape).astype("float32") output = self.pool2D_forward_naive(input, self.ksize, self.strides, self.paddings, self.global_pool) @@ -71,7 +70,8 @@ class TestPool2d_Op(OpTest): self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def initTestCase(self): - self.global_pool = 0 + self.global_pool = True + self.op_type = "pool2d" self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive self.shape = [2, 3, 5, 5] @@ -82,23 +82,23 @@ class TestPool2d_Op(OpTest): class TestCase1(TestPool2d_Op): def initTestCase(self): - self.global_pool = 0 + self.global_pool = False self.op_type = "pool2d" self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive - self.shape = [2, 3, 5, 5] + self.shape = [2, 3, 7, 7] self.ksize = [3, 3] self.strides = [1, 1] - self.paddings = [1, 1] + self.paddings = [0, 0] class TestCase2(TestPool2d_Op): def initTestCase(self): - self.global_pool = 1 + self.global_pool = False self.op_type = "pool2d" self.pool_type = "avg" self.pool2D_forward_naive = avg_pool2D_forward_naive - self.shape = [2, 3, 5, 5] + self.shape = [2, 3, 7, 7] self.ksize = [3, 3] self.strides = [1, 1] self.paddings = [1, 1] @@ -106,23 +106,35 @@ class TestCase2(TestPool2d_Op): class TestCase3(TestPool2d_Op): def initTestCase(self): - self.global_pool = 0 + self.global_pool = True self.op_type = "pool2d" self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 5, 5] self.ksize = [3, 3] self.strides = [1, 1] - self.paddings = [1, 1] + self.paddings = [0, 0] -class TestCase4(TestPool2d_Op): +class TestCase3(TestPool2d_Op): def initTestCase(self): - self.global_pool = 1 + self.global_pool = False self.op_type = "pool2d" self.pool_type = "max" self.pool2D_forward_naive = max_pool2D_forward_naive - self.shape = [2, 3, 5, 5] + self.shape = [2, 3, 7, 7] + self.ksize = [3, 3] + self.strides = [1, 1] + self.paddings = [0, 0] + + +class TestCase3(TestPool2d_Op): + def initTestCase(self): + self.global_pool = False + self.op_type = "pool2d" + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + self.shape = [2, 3, 7, 7] self.ksize = [3, 3] self.strides = [1, 1] self.paddings = [1, 1] diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index 2a3de89a70..f041ff9178 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -55,7 +55,6 @@ def avg_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): class TestPool3d_Op(OpTest): def setUp(self): self.initTestCase() - self.op_type = "pool3d" input = np.random.random(self.shape).astype("float32") output = self.pool3D_forward_naive(input, self.ksize, self.strides, self.paddings, self.global_pool) @@ -79,7 +78,8 @@ class TestPool3d_Op(OpTest): self.check_grad(set(['X']), 'Out', max_relative_error=0.07) def initTestCase(self): - self.global_pool = 0 + self.global_pool = True + self.op_type = "pool3d" self.pool_type = "avg" self.pool3D_forward_naive = avg_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] @@ -90,19 +90,19 @@ class TestPool3d_Op(OpTest): class TestCase1(TestPool3d_Op): def initTestCase(self): - self.global_pool = 0 + self.global_pool = False self.op_type = "pool3d" self.pool_type = "avg" self.pool3D_forward_naive = avg_pool3D_forward_naive self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] - self.paddings = [1, 1, 1] + self.paddings = [0, 0, 0] class TestCase2(TestPool3d_Op): def initTestCase(self): - self.global_pool = 1 + self.global_pool = False self.op_type = "pool3d" self.pool_type = "avg" self.pool3D_forward_naive = avg_pool3D_forward_naive @@ -114,23 +114,35 @@ class TestCase2(TestPool3d_Op): class TestCase3(TestPool3d_Op): def initTestCase(self): - self.global_pool = 0 + self.global_pool = True self.op_type = "pool3d" self.pool_type = "max" self.pool3D_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] - self.paddings = [1, 1, 1] + self.paddings = [0, 0, 0] -class TestCase4(TestPool3d_Op): +class TestCase3(TestPool3d_Op): def initTestCase(self): - self.global_pool = 1 + self.global_pool = False self.op_type = "pool3d" self.pool_type = "max" self.pool3D_forward_naive = max_pool3D_forward_naive - self.shape = [2, 3, 5, 5, 5] + self.shape = [2, 3, 7, 7, 7] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [0, 0, 0] + + +class TestCase3(TestPool3d_Op): + def initTestCase(self): + self.global_pool = False + self.op_type = "pool3d" + self.pool_type = "max" + self.pool3D_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] self.paddings = [1, 1, 1] From 6abcb74c8f8918a52c7016cb4cc924635f6f3539 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 30 Sep 2017 10:49:02 +0800 Subject: [PATCH 098/342] fix unit test class name --- python/paddle/v2/framework/tests/test_pool2d_op.py | 4 ++-- python/paddle/v2/framework/tests/test_pool3d_op.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_pool2d_op.py b/python/paddle/v2/framework/tests/test_pool2d_op.py index 0f9a548f29..2941fda81b 100644 --- a/python/paddle/v2/framework/tests/test_pool2d_op.py +++ b/python/paddle/v2/framework/tests/test_pool2d_op.py @@ -116,7 +116,7 @@ class TestCase3(TestPool2d_Op): self.paddings = [0, 0] -class TestCase3(TestPool2d_Op): +class TestCase4(TestPool2d_Op): def initTestCase(self): self.global_pool = False self.op_type = "pool2d" @@ -128,7 +128,7 @@ class TestCase3(TestPool2d_Op): self.paddings = [0, 0] -class TestCase3(TestPool2d_Op): +class TestCase5(TestPool2d_Op): def initTestCase(self): self.global_pool = False self.op_type = "pool2d" diff --git a/python/paddle/v2/framework/tests/test_pool3d_op.py b/python/paddle/v2/framework/tests/test_pool3d_op.py index f041ff9178..8792b492e3 100644 --- a/python/paddle/v2/framework/tests/test_pool3d_op.py +++ b/python/paddle/v2/framework/tests/test_pool3d_op.py @@ -124,7 +124,7 @@ class TestCase3(TestPool3d_Op): self.paddings = [0, 0, 0] -class TestCase3(TestPool3d_Op): +class TestCase4(TestPool3d_Op): def initTestCase(self): self.global_pool = False self.op_type = "pool3d" @@ -136,7 +136,7 @@ class TestCase3(TestPool3d_Op): self.paddings = [0, 0, 0] -class TestCase3(TestPool3d_Op): +class TestCase5(TestPool3d_Op): def initTestCase(self): self.global_pool = False self.op_type = "pool3d" From e6f2da489c2d69bae2150edd7537d6d40094b8b3 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 29 Sep 2017 20:19:55 -0700 Subject: [PATCH 099/342] Update --- doc/design/python_api.md | 220 ++++++++++++++++++++++----------------- 1 file changed, 125 insertions(+), 95 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 3122f5bfd8..5c68354274 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -1,174 +1,206 @@ # Design Doc: Python API -The top level user API in Python should be as same as API in `paddle.v2` after refactoring Paddle from a layer based framework to an operator based framework. There are many new classes in C++ in [compile time] for describing neural networks, such as `Variable`, `Operator`, `Block`. The issue about current design is how to give a proper way to wrap the C++ API to `paddle.v2` API and write layers in Python. +Due to the refactorization of the PaddlePaddle core, we need Python classes to construct corresponding protobuf messages that describe a DL program. -This implementation of Python API includes two steps. - -1. Implement the Python API using current C++ runtime concepts. -2. Replace the implementation by using compile-time concepts when they are completed. - -The implementation of the first step is a temporary implementation. We should design our Python API concepts based on `compile-time` concepts. We just use `runtime` classes to implement it for now. - - -## Python Class and compile-time protobuf - -Since we design our Python API concepts based on `compile-time`, we try to map our Python classes to every compile-time result, i.e., the protobuf messages. They are: - - -| Python Class | Compile-time protobuf | +| Python classes | Protobuf messages | | --- | --- | | Program | ProgramDesc | | Block | BlockDesc | | Operator | OpDesc | | Variable | VarDesc | +Please be aware that these Python classes need to maintain some construction-time information, which are not part of the protobuf messages. + +## Core Concepts + ### Program -`Program` is the description of the whole training process and there can only be one `Program` object, which is created automatically by the system at the very beginning. `Program` is formed by a series of `Block`. +A `ProgramDesc` describes a [DL program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md), which is composed of an array of `BlockDesc`s. A `BlockDesc` refers to its parent block by its index in the array. For example, operators in the step block of an RNN operator needs to be able to access variables in its ancessor blocks. + +Whenever we create a block, we need set its parent block to the current block, so the Python class `Program` needs to maintain a data member `current_block`. ```python class Program(objects): def __init__(self): + self.proto = core.NewProgram() # a C++ ProgramDesc pointer. self.blocks = vector() - self.blocks.append(Block(None)) - self.current_block_idx = 0 + self.blocks.append(Block(self, -1)) # the global block + self.current_block = 0 # initialized to the global block - def get_block(block_idx): - return self.blocks[block_idx] + def global_block(): + return self.blocks[0] def current_block(): - return self.get_block(self.current_block_idx) - - def fallback_current_block(): - self.current_block_idx = self.current_block().parent_idx + return self.get_block(self.current_block) + def rollback(): + self.current_block = self.current_block().parent_idx def create_block(): new_block_idx = len(self.block) - self.blocks.append(Block(parent_idx=self.current_block_idx, - idx=new_block_idx)) - self.current_block_idx = new_block_idx + self.blocks.append(Block(self, self.current_block)) + self.current_block = new_block_idx + return current_block() ``` -`Program` will create the first block in its constructor. The first block is called 'global block'. It is where all parameters are stored. +`Program` is an accessor to the protobuf message `ProgramDesc`, which is created in C++ space, because the InferShape function is in C++, which manipulates `VarDesc` messages, which are in turn members of `BlockDesc`, which is a member of `ProgramDesc`. -### Block +`Program` creates the first block as the global block in its constructor. All parameters and their initializer operators are in the global block. -Block is just like programming languages `{}`, which contains many operators and variables. There are two data fields in `Block`. 1) An associate map, whose key is variable name and value is variable itself; 2) A list of operators. +### Block -The block is hierarchical because PaddlePaddle supports RNN and IfElse. For example, RNN is like `for-loop` in programming languages. There is new `block` inside a `for-loop`. To represent hierarchies, `Block` stores the index of `parent Block` inside. The 'index' means the block's position in `Program`'s `blocks`. If `parent_idx=None`, the block itself is the outermost block, i.e., the 'global block'. +A [Block](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md) includes +1. a map from variable names to an instance of the Python `Variable` class, and +1. a list of `Operator` instances. ```python class Block(objects): - def __init__(self, parent_idx, idx): + def __init__(self, program, parent_idx): + self.proto = core.NewBlock(program.proto) + self.program = program self.vars = map() self.ops = vector() - self.idx = idx self.parent_idx = parent_idx - + def create_var(self, ...): - # create variable in `self.vars` - return Variable(...) - - - def create_global_var(self, ...): - if self.parent_idx is not None: - parent_block = program.get_block(parent_idx) - return parent_block.create_global_var(...) - else: - return self.create_var(...) - - def create_parameter(self, ...): - return self.create_global_var(...) - + return Variable(self, ...) + + def _create_global_var(self, ...): + program.global_block().create_var(...) + + def create_parameter(self, name, ...): + # Parameter is a subclass of variable. See Parameter section for details. + self.vars[name] = Parameter(self._create_global_var(...), ...) + return self.vars[name] + def append_operator(self, ...): - self.ops.append(...) - - def prepend_operator(self, ...): - self.ops.prepend(...) -``` + self.ops.append(Operator(self, ...)) -Users are able to create a global variable inside any block since they many create parameters inside a RNN or IfElse. All parameters should be stored in the global block, not the step block in RNN. + def prepend_operator(self, ...): # Parameter's ctor prepands initialize operators. + self.ops.prepend(Operator(self, ...)) +``` -Users can create local variables for outputs of operators. Users can also append and prepend an operator in current block. Prepending `random initialize` operator or `load` operator is very useful to initialize parameters before training. +`create_parameter` is necessary because parameters are global variables, those defined in the global block, but can be created in some sub-blocks, e.g., an FC layer in the step block of an RNN operator. +`prepand_operator` is necessary because the constructor of `Parameter` needs to create the initialize (or load) operator of the parameter, and would like to put it in the *preamble* of the global block. ### Operator -Operator class will take inputs, outputs and attributes of the operator into `protobuf` OpDesc and create a C++ `OpDesc` instance. The `infer_shape` perform on C++ objects. +The `Operator` class fills in the `OpDesc` message and calls the C++ function `InferShape` to infer output shape from input shape. ```python class Operator(object): - def __init__(self, type, inputs, outputs, attrs): - # create OpDesc in Python - op_desc = ... - self.cpp_op_desc_ptr = core.OpDesc(op_desc) - cpp.infer_shape(self.cpp_op_desc_ptr, inputs, outputs) + def __init__(self, + block, # Block + type, # string + inputs, # dict + outputs,# dict + attrs # dict + ): + self.proto = core.NewOpDesc(block.proto, type, inputs, outputs, attrs) + core.infer_shape(self.proto, inputs, outputs) def type(self): - return self.cpp_op_desc_ptr.type() + return self.proto.type() ``` -After creating a C++ `OpDesc`, `Operator` in Python can only reads the attribute from C++ side. +`Operator` creates the `OpDesc` message in C++ space, so could it call the `InferShape` function, which is in C++. ### Variable -Operators' inputs, outputs, and parameters are all variables. In our design, a variable has four key attributes: its name(`name`), the block it belongs to(`block`), a pointer pointed to its C++ Protobuf object(`cpp_var_desc_ptr`), and the operator it is created by(`op`). All of these attributes are initialized in the constructor, except the `op`. The `op` will keep being `None` till the variable is taken as an operator's output. +Operators take Variables as its inputs and outputs. ```python class Variable(object): - def __init__(self, shape, dtype="float32", name=None, block=None): + def __init__(self, + block=None, # Block + name=None, # string + shape, # tuple + dtype="float32", # string + lod_level=None # int + ): if name is None: name = unique_name_generator() self.name = name self.block = block - # build C++ Protobuf object - self.cpp_var_desc_ptr = ... - self.op = None - - def shape(self): - cpp_shape = self.cpp_var_desc_ptr.shape() - return [None if elem < 0 else elem for elem in cpp_shape] + self.proto = core.NewVarDesc(block.proto, name, shape, lod_level) + self.writer = None ``` -The Protobuf object should be created in C++ not Python because it is needed by infershape, and infershape is implemented by C++ code. The C++ Protobuf object is accessible for Python through the `cpp_var_desc_ptr`, just like how `shape()` function does. - -The user is allowed to build a variable without specifying its name. If so, it is going to be assigned with an automatically generated unique name. +Please be aware of `self.writer`, that tracks operator who creates the variable. It possible that there are more than one operators who write a variable, but in Python space, each writes to a variable is represented by a Variable class. This is guaranteed by the fact that **`core.NewVarDesc` must NOT create a new `VarDesc` message if its name already exists in the specified block**. ### Parameter -The parameter is a kind of special variable. They need to be initialized at the very beginning and updated after each batch training. So if a variable is a parameter, our compiler will add an initializer op and an optimizer op for it during the building process of computation graph. Apart from these, there is no more difference between variable and parameter. In other words, 'parameter' is only a label attached to variables, to tell the compiler these ones require additional processing. +A parameter is a global variable with an initializer (or load) operator. ```python class Parameter(Variable): - def __init__(self, trainable, initialize_attrs, optimize_attrs): - pass + def __init__(self, + block=None, # Block + name=None, # string + shape, # tuple + dtype="float32", # string + lod_level=None # int + trainable, # bool + initialize_op_attrs, + optimize_op_attrs): + super(Parameter, self).__init__(block, name, shape, dtype, lod_level) + self.trainable = trainable + self.optimize_op_attrs = optimize_op_attrs + block.prepend(Operator(block, # Block + initialize_op_attrs['type'], # string + None, # no inputs + self, # output is the parameter + initialize_op_attrs) ``` -The class `Parameter` is derived from class `Variable`. In addition to variables have, parameters are able to hold their initializing and updating information. A parameter's `self.op` will always be `None` because it can never be an operator's output. +When users create a parameter, s/he can call +```python +program.create_parameter( + ..., + init_attr={ + type: "uniform_random", + min: -1.0, + max: 1.0, + }) +) +``` -## Layer Functions +In above example, `init_attr.type` names an initialize operator. It can also name the load operator + +```python +init_attr={ + type: "load", + filename: "something.numpy", +} +``` -A layer is a Python function. When it is invoked, it creates a series of operators and variables then inserts them into the block. It is something like the macro in C++. It is called 'Layer' because the combination of added operators acts just like what a neural network layer does. +`optimize_op_attrs` is not in the `VarDesc` message, but kept in the Python instance, as it will be used in the Python space when creating the optimize operator's `OpDesc`, and will be in the `OpDesc` message. + +## Layer Functions -Here are examples of how to write a data layer and FC layer: +A layer is a Python function that creates some operators and variables. Layers simplify the work of application programmers. ### Data Layer ```python -def data_layer(name, type): - block = program.current_block() - # type = dense_vector(size=10) / integer_value(range=10) - return block.create_global_var( - name=name, - shape=[None] + type.dims(), +def data_layer(name, type, column_name): + block = the_current_program.glolal_block() + var = block.create_global_var( + name=name, + shape=[None] + type.dims(), dtype=type.dtype) + block.prepend_operator(block, + type="Feed", + inputs = None, + outputs = [var], + {column_name: column_name}) + return var +``` -``` - -All the new variables and operators will be built in the `current block`. In the above `data_layer` code, a variable is created and be inserted into the root block to make it global. This variable is going to be used as input data of the whole network. +The input to the feed operator is a special variable in the global scope, which is the output of [Python readers](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/reader/README.md). ### FC Layer @@ -178,9 +210,7 @@ def fc_layer(input, size, ...): w = block.create_parameter(...) b = block.create_parameter(...) out = block.create_var() - op = block.append_operator(Operator("FC", X=input, W=w, b=b, Out=out)) - out.op = op + op = block.append_operator("FC", X=input, W=w, b=b, out=out) + out.writer = op return out ``` - -In the `fc_layer` code, we create two parameters(`w` and `b`), one variable(`out`) and one operator(`FC operator`), then insert all of them into the `current block`. From 2d8c7e310a23f62f3a9aad7823beec8f714bd368 Mon Sep 17 00:00:00 2001 From: Yancey Date: Sat, 30 Sep 2017 12:10:21 +0800 Subject: [PATCH 100/342] Add Inputs/Outputs check interface (#4438) Add multiple Inputs/Outputs check interface --- paddle/framework/operator.h | 26 ++++++++++++++++++++++++++ paddle/framework/shape_inference.h | 4 ++++ 2 files changed, 30 insertions(+) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 310d68d7c1..0af527c88c 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -334,6 +334,32 @@ class RuntimeInferShapeContext : public InferShapeContextBase { return var != nullptr; } + bool HasInputs(const std::string& name) const { + auto inputs = op_.Inputs(name); + if (inputs.size() == 0UL) { + return false; + } + for (auto& input : inputs) { + if (scope_.FindVar(input) == nullptr) { + return false; + } + } + return true; + } + + bool HasOutputs(const std::string& name) const { + auto outputs = op_.Outputs(name); + if (outputs.size() == 0UL) { + return false; + } + for (auto& output : outputs) { + if (scope_.FindVar(output) == nullptr) { + return false; + } + } + return true; + } + DDim GetInputDim(const std::string& name) const { return GetDim(op_.Input(name)); } diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index b07fc78812..bc8af0eb3e 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -24,6 +24,10 @@ class InferShapeContextBase { virtual ~InferShapeContextBase() {} virtual bool HasInput(const std::string &name) const = 0; virtual bool HasOutput(const std::string &name) const = 0; + + virtual bool HasInputs(const std::string &name) const = 0; + virtual bool HasOutputs(const std::string &name) const = 0; + virtual framework::DDim GetInputDim(const std::string &name) const = 0; std::vector GetInputsDim(const std::string &name) const { const std::vector &names = Inputs(name); From 540cc2c1c1a203758346cd2ce226d7564c0dad88 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 29 Sep 2017 22:11:48 -0700 Subject: [PATCH 101/342] add executor class and interface --- paddle/framework/CMakeLists.txt | 2 + paddle/framework/executor.cc | 108 ++++++++++++++++++++++++++++++ paddle/framework/executor.h | 32 +++++++++ paddle/framework/executor_test.cc | 18 +++++ 4 files changed, 160 insertions(+) create mode 100644 paddle/framework/executor.cc create mode 100644 paddle/framework/executor.h create mode 100644 paddle/framework/executor_test.cc diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8a5d8532bb..3ee721ac93 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -43,3 +43,5 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) + +cc_library(executor SRCS executor.cc DEPS device_context framework_proto) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc new file mode 100644 index 0000000000..ccf6716949 --- /dev/null +++ b/paddle/framework/executor.cc @@ -0,0 +1,108 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/executor.h" + +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace framework { + +class LinearListView; +class GraphView; + +// Immutable view of a ProgramDesc organized for efficient execution. +class ProgramDescView { + public: + virtual ~ProgramDescView() {} + virtual void Initialize(const ProgramDesc*) = 0; + static ProgramDescView* Create(bool is_linear); +}; + +class LinearListView : public ProgramDescView { + public: + void Initialize(const ProgramDesc*) override; +}; + +class GraphView : public ProgramDescView { + public: + void Initialize(const ProgramDesc*) override; +}; + +static ProgramDescView* Create(bool is_linear) { + if (is_linear) { + return new LinearListView(); + } else { + return new GraphView(); + } +} + +void LinearListView::Initialize(const ProgramDesc*) { + // get a LinearView of ProgramDesc +} + +void GraphView::Initialize(const ProgramDesc*) { + // get a GraphView of ProgramDesc +} + +class ExecutorImpl : public Executor { + public: + ExecutorImpl(const platform::DeviceContext* ctx, const ProgramDesc* pdesc, + bool is_linear) + : device_context_(ctx), + program_desc_(pdesc), + view_(ProgramDescView::Create(is_linear)) {} + + virtual ~ExecutorImpl() { + if (view_) delete view_; + } + + void Run() override; + + void Initialize(); + + private: + const platform::DeviceContext* device_context_; + const ProgramDesc* program_desc_; + ProgramDescView* view_; +}; + +static Executor* NewLocalExecutor(const platform::Place& place, + const ProgramDesc& pdesc, bool is_linear) { + platform::DeviceContext* device_context = nullptr; + if (platform::is_cpu_place(place)) { + device_context = + new platform::CPUDeviceContext(boost::get(place)); + } +#ifndef PADDLE_ONLY_CPU + else if { + device_context = + new platform::CUDADeviceContext(boost::get(place)); + } +#endif + return new ExecutorImpl(device_context, &pdesc, is_linear); +} + +void ExecutorImpl::Run() { + // operators running + device_context_->Wait(); +} + +void ExecutorImpl::Initialize() { + // Initialize the ProgramDescView + view_->Initialize(program_desc_); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h new file mode 100644 index 0000000000..69f0e3f18f --- /dev/null +++ b/paddle/framework/executor.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/framework.pb.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace framework { + +class Executor { + public: + virtual ~Executor() {} + virtual void Run() = 0; +}; + +static Executor* NewLocalExecutor(const platform::Place&, const ProgramDesc&); + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc new file mode 100644 index 0000000000..f8a41b12ad --- /dev/null +++ b/paddle/framework/executor_test.cc @@ -0,0 +1,18 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/executor.h" +#include "gtest/gtest.h" + +TEST(Executor, Init) {} \ No newline at end of file From 3481bdc865571d2cfac1576d0913ab3f827b5955 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 29 Sep 2017 22:32:41 -0700 Subject: [PATCH 102/342] add global device context --- paddle/framework/executor.cc | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index ccf6716949..8534e70f48 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" - +#include #include "paddle/platform/device_context.h" namespace paddle { @@ -78,17 +78,28 @@ class ExecutorImpl : public Executor { ProgramDescView* view_; }; +template +std::unique_ptr make_unique(Args&&... args) { + return std::unique_ptr(new T(std::forward(args)...)); +} + +static std::unique_ptr g_cpu_device_context = + make_unique(platform::CPUPlace()); + +#ifndef PADDLE_ONLY_CPU +static std::unique_ptr g_cuda_device_context = + make_unique(platform::GPUPlace(0)); +#endif + static Executor* NewLocalExecutor(const platform::Place& place, const ProgramDesc& pdesc, bool is_linear) { platform::DeviceContext* device_context = nullptr; if (platform::is_cpu_place(place)) { - device_context = - new platform::CPUDeviceContext(boost::get(place)); + device_context = g_cpu_device_context.get(); } #ifndef PADDLE_ONLY_CPU else if { - device_context = - new platform::CUDADeviceContext(boost::get(place)); + device_context = g_cuda_device_context.get(); } #endif return new ExecutorImpl(device_context, &pdesc, is_linear); From e42cafb24f3868713958213777d798cd54140b40 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 29 Sep 2017 22:50:40 -0700 Subject: [PATCH 103/342] add executor unittest --- paddle/framework/CMakeLists.txt | 1 + paddle/framework/executor.cc | 6 +++--- paddle/framework/executor.h | 2 +- paddle/framework/executor_test.cc | 10 +++++++++- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 3ee721ac93..2cad2e54fa 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -45,3 +45,4 @@ cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) cc_library(executor SRCS executor.cc DEPS device_context framework_proto) +cc_test(executor_test SRCS executor_test.cc DEPS executor) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 8534e70f48..7fda2332b8 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -40,7 +40,7 @@ class GraphView : public ProgramDescView { void Initialize(const ProgramDesc*) override; }; -static ProgramDescView* Create(bool is_linear) { +ProgramDescView* ProgramDescView::Create(bool is_linear) { if (is_linear) { return new LinearListView(); } else { @@ -91,8 +91,8 @@ static std::unique_ptr g_cuda_device_context = make_unique(platform::GPUPlace(0)); #endif -static Executor* NewLocalExecutor(const platform::Place& place, - const ProgramDesc& pdesc, bool is_linear) { +Executor* NewLocalExecutor(const platform::Place& place, + const ProgramDesc& pdesc, bool is_linear) { platform::DeviceContext* device_context = nullptr; if (platform::is_cpu_place(place)) { device_context = g_cpu_device_context.get(); diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 69f0e3f18f..25ef2d4d48 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -26,7 +26,7 @@ class Executor { virtual void Run() = 0; }; -static Executor* NewLocalExecutor(const platform::Place&, const ProgramDesc&); +Executor* NewLocalExecutor(const platform::Place&, const ProgramDesc&, bool); } // namespace framework } // namespace paddle diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index f8a41b12ad..c046ae3158 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -15,4 +15,12 @@ limitations under the License. */ #include "paddle/framework/executor.h" #include "gtest/gtest.h" -TEST(Executor, Init) {} \ No newline at end of file +using namespace paddle::platform; +using namespace paddle::framework; + +TEST(Executor, Init) { + ProgramDesc pdesc; + CPUPlace cpu_place; + Executor* executor = NewLocalExecutor(cpu_place, pdesc, true); + executor->Run(); +} \ No newline at end of file From 8676423bc9a32496f2aca57e3853f743786fce21 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 29 Sep 2017 23:07:08 -0700 Subject: [PATCH 104/342] Update --- doc/design/register_grad_op.md | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md index 02c4d554d0..12b04fb271 100644 --- a/doc/design/register_grad_op.md +++ b/doc/design/register_grad_op.md @@ -1,8 +1,17 @@ -# Design Doc: Register Gradient Operator +# Design Doc: Gradient Operators Registration -## Problem -Since we separate users program in two stages, compile time and runtime, we should record and look up the mapping relationship between an operator and its gradient operators when compile. However, we register this relationship in runtime by these `OpInfo` fields. +## The Problem Posed + +In our current operator registration mechanism, for each operator, the programmer should register a *gradient operator creator* function, which takes a C++ operator instance, and returns the corresponding gradient instance. + +However, as we decided to separate the *compilation* and *execution* of DL models, we need to reshape the creator to take a protobuf `OpDesc` message, and returns a corresponding message. + +More than that, the new registration mechanism need to support the fact that an operators' gradient computation might be a composition of operators. + +## Current Implementation + +OpInfos store in a association map which key is the operator type. The `grad_op_type` indicate associated gradient operator type. Operator can create gradient operator by `OpInfo::creator_` of gradient. The pseudo code is ```cpp struct OpInfo { @@ -10,11 +19,7 @@ struct OpInfo { std::string grad_op_type_; ... }; -``` -OpInfos store in a association map which key is the operator type. The `grad_op_type` indicate associated gradient operator type. Operator can create gradient operator by `OpInfo::creator_` of gradient. The pseudo code is - -```cpp map OpInfoMap; OperatorBase* CreateGradientOperator(const OperatorBase& op) { @@ -22,14 +27,7 @@ OperatorBase* CreateGradientOperator(const OperatorBase& op) { } ``` -At the same time, an operator's gradient operator could be composed of many forward operators. For example, the gradient operator of `minus_op` could consist of an `identity` operator and a `scale` operator. To compose a gradient operator by forwarding operators could: 1) Reuse forwarding operator; 2) Calculate second derivative, third derivative, etc. - -We use `NetOp` to represent a composed operator since the `NetOp` is `vector`. However, `NetOp` is also a runtime concept. We should provide a mechanism to compose operators as a gradient operator. - -In conclusion, the problem that we want to resolve in this design doc is to register the mapping relationship between the forward operator and its gradient operators during compile time. - - -## Solution +## Proposed Solution The mapping relationship between an operator and its gradient operators is a function. The interface of that function is: From bee95fc8917e09f61ba46586a94d2b9003cddf13 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 29 Sep 2017 11:45:04 +0800 Subject: [PATCH 105/342] fix code format and some bug --- paddle/operators/math/pooling.cc | 20 +-- paddle/operators/math/pooling.cu | 147 ++++++++++-------- paddle/operators/math/pooling.h | 1 - paddle/operators/pool_with_index_op.cc | 71 +++++---- paddle/operators/pool_with_index_op.h | 10 +- .../v2/framework/tests/test_pool_max_op.py | 52 +++++-- 6 files changed, 180 insertions(+), 121 deletions(-) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 0e4d9007a6..da0e8ff3d2 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -26,7 +26,6 @@ class MaxPool2dWithIndexFunctor { framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings) { const int batch_size = input.dims()[0]; - const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output.dims()[1]; @@ -112,11 +111,11 @@ class MaxPool2dWithIndexGradFunctor { input_grad_data[input_idx] += output_grad_data[output_idx]; } } + // offset + input_grad_data += input_stride; + output_grad_data += output_stride; + mask_data += output_stride; } - // offset - input_grad_data += input_stride; - output_grad_data += output_stride; - mask_data += output_stride; } } }; @@ -152,6 +151,7 @@ class MaxPool3dWithIndexFunctor { const int padding_width = paddings[2]; const int input_stride = input_depth * input_height * input_width; const int output_stride = output_depth * output_height * output_width; + const T* input_data = input.data(); T* output_data = output.mutable_data(context.GetPlace()); T* mask_data = mask.mutable_data(context.GetPlace()); @@ -170,17 +170,17 @@ class MaxPool3dWithIndexFunctor { int wstart = pw * stride_width - padding_width; int wend = std::min(wstart + ksize_width, input_width); wstart = std::max(wstart, 0); + int output_idx = (pd * output_height + ph) * output_width + pw; T ele = static_cast(-FLT_MAX); int index = -1; for (int d = dstart; d < dend; ++d) { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (ele < - input_data[(d * input_height + h) * input_width + w]) { - index = (d * input_height + h) * input_width + w; - ele = - input_data[(d * input_height + h) * input_width + w]; + int input_idx = (d * input_height + h) * input_width + w; + if (ele < input_data[input_idx]) { + index = input_idx; + ele = input_data[input_idx]; } } } diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index f32e6a26d0..5321ed2163 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -20,14 +20,14 @@ namespace operators { namespace math { template -__global__ void KernelMaxPool2dWithIdxForward( +__global__ void KernelMaxPool2dWithIdx( const int nthreads, const T* input_data, T* output_data, T* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width) { - int index = blockIdx.x * blockDim.x + threadIdx.x; - if (index < nthreads) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; int c = (index / output_width / output_height) % channels; @@ -43,51 +43,58 @@ __global__ void KernelMaxPool2dWithIdxForward( input_data += (batch_idx * channels + c) * input_height * input_width; T ele = -FLT_MAX; - int index = -1; + int max_index = -1; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (ele < input_data[h * input_width + w]) { - index = h * input_width + w; - ele = input_data[h * input_width + w]; + int input_index = h * input_width + w; + if (ele < input_data[input_index]) { + max_index = input_index; + ele = input_data[input_index]; } } } output_data[index] = ele; - mask_data[index] = index; + mask_data[index] = max_index; } } template -__global__ void KernelMaxPool2DWithIdxBackward( +__global__ void KernelMaxPool2DWithIdxGrad( const int nthreads, T* input_grad, const T* output_grad, const T* mask_data, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width) { - int index = blockIdx.x * blockDim.x + threadIdx.x; - if (index < nthreads) { - int offsetW = index % input_width + padding_width; - int offsetH = (index / input_width) % input_height + padding_height; - int offsetC = (index / input_width / input_height) % channels; + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + index += blockDim.x * gridDim.x) { + int w_offset = index % input_width; + int h_offset = (index / input_width) % input_height; + int c_offset = (index / input_width / input_height) % channels; int batch_idx = index / input_width / input_height / channels; - int phstart = (offsetH < ksize_height) - ? 0 - : (offsetH - ksize_height) / stride_height + 1; - int pwstart = (offsetW < ksize_width) - ? 0 - : (offsetW - ksize_width) / stride_width + 1; - int phend = min(offsetH / stride_height + 1, output_height); - int pwend = min(offsetW / stride_width + 1, output_width); + int ph_start = + (h_offset + padding_height < ksize_height) + ? 0 + : (h_offset + padding_height - ksize_height) / stride_height + 1; + int pw_start = + (w_offset + padding_width < ksize_width) + ? 0 + : (w_offset + padding_width - ksize_width) / stride_width + 1; + int ph_end = + min((h_offset + padding_height) / stride_height + 1, output_height); + int pw_end = + min((w_offset + padding_width) / stride_width + 1, output_width); + T gradient = 0; + int input_current_featuremap_idx = h_offset * input_width + w_offset; int output_idx = - (batch_idx * channels + offsetC) * output_height * output_width; + (batch_idx * channels + c_offset) * output_height * output_width; + mask_data += output_idx; output_grad += output_idx; - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { - if ((offsetH * input_width + offsetW) == - mask_data[ph * output_width + pw]) + for (int ph = ph_start; ph < ph_end; ++ph) { + for (int pw = pw_start; pw < pw_end; ++pw) { + if (mask_data[ph * output_width + pw] == input_current_featuremap_idx) gradient += output_grad[ph * output_width + pw]; } } @@ -125,7 +132,7 @@ class MaxPool2dWithIndexFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool2dWithIdxForward< + KernelMaxPool2dWithIdx< T><<(context) .stream()>>>(nthreads, input_data, output_data, mask_data, @@ -167,7 +174,7 @@ class MaxPool2dWithIndexGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool2DWithIdxBackward< + KernelMaxPool2DWithIdxGrad< T><<(context) .stream()>>>(nthreads, input_grad_data, output_grad_data, @@ -184,7 +191,7 @@ template class MaxPool2dWithIndexFunctor; template class MaxPool2dWithIndexGradFunctor; template -__global__ void KernelMaxPool3DWithIdxForward( +__global__ void KernelMaxPool3DWithIdx( const int nthreads, const T* input_data, T* output_data, T* mask_data, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, @@ -200,6 +207,7 @@ __global__ void KernelMaxPool3DWithIdxForward( int c = (index / output_width / output_height / output_depth) % channels; int batch_idx = index / output_width / output_height / output_depth / channels; + int dstart = pd * stride_depth - padding_depth; int hstart = ph * stride_height - padding_height; int wstart = pw * stride_width - padding_width; @@ -209,8 +217,9 @@ __global__ void KernelMaxPool3DWithIdxForward( dstart = max(dstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); + T ele = -FLT_MAX; - int index = -1; + int max_index = -1; input_data += (batch_idx * channels + c) * input_depth * input_height * input_width; @@ -218,19 +227,19 @@ __global__ void KernelMaxPool3DWithIdxForward( for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (ele < input_data[(d * input_height + h) * input_width + w]) { - index = (d * input_height + h) * input_width + w; - ele = input_data[(d * input_height + h) * input_width + w]; + max_index = (d * input_height + h) * input_width + w; + ele = input_data[max_index]; } } } } output_data[index] = ele; - mask_data[index] = index; + mask_data[index] = max_index; } } template -__global__ void KernelMaxPool3DWithIdxBackward( +__global__ void KernelMaxPool3DWithIdxGrad( const int nthreads, T* input_grad, const T* output_grad, const T* mask, const int channels, const int input_depth, const int input_height, const int input_width, const int output_depth, const int output_height, @@ -240,37 +249,45 @@ __global__ void KernelMaxPool3DWithIdxBackward( const int padding_width) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); index += blockDim.x * gridDim.x) { - int offsetW = index % input_width + padding_width; - int offsetH = (index / input_width) % input_height + padding_height; - int offsetD = - (index / input_width / input_height) % input_depth + padding_depth; - int offsetC = (index / input_width / input_height / input_depth) % channels; + int w_offset = index % input_width; + int h_offset = (index / input_width) % input_height; + int d_offset = (index / input_width / input_height) % input_depth; + int c_offset = + (index / input_width / input_height / input_depth) % channels; int batch_idx = index / input_width / input_height / input_depth / channels; - int pdstart = (offsetD < ksize_depth) - ? 0 - : (offsetD - ksize_depth) / stride_depth + 1; - int phstart = (offsetH < ksize_height) - ? 0 - : (offsetH - ksize_height) / stride_height + 1; - int pwstart = (offsetW < ksize_width) - ? 0 - : (offsetW - ksize_width) / stride_width + 1; - int pdend = min((offsetD) / stride_depth + 1, output_depth); - int phend = min((offsetH) / stride_height + 1, output_height); - int pwend = min((offsetW) / stride_width + 1, output_width); + int pd_start = + (d_offset + padding_depth < ksize_depth) + ? 0 + : (d_offset + padding_depth - ksize_depth) / stride_depth + 1; + int ph_start = + (h_offset + padding_height < ksize_height) + ? 0 + : (h_offset + padding_height - ksize_height) / stride_height + 1; + int pw_start = + (w_offset + padding_width < ksize_width) + ? 0 + : (w_offset + padding_width - ksize_width) / stride_width + 1; + int pd_end = + min((d_offset + padding_depth) / stride_depth + 1, output_depth); + int ph_end = + min((h_offset + padding_height) / stride_height + 1, output_height); + int pw_end = + min((w_offset + padding_width) / stride_width + 1, output_width); T gradient = 0; - int output_idx = (batch_idx * channels + offsetC) * output_depth * + int input_current_feature_map_idx = + (d_offset * input_height + h_offset) * input_width + w_offset; + int output_idx = (batch_idx * channels + c_offset) * output_depth * output_height * output_width; mask += output_idx; output_grad += output_idx; - for (int pd = pdstart; pd < pdend; ++pd) { - for (int ph = phstart; ph < phend; ++ph) { - for (int pw = pwstart; pw < pwend; ++pw) { - if (((offsetD * input_height + offsetH) * input_width + offsetW) == - mask[(pd * output_height + ph) * output_width + pw]) + for (int pd = pd_start; pd < pd_end; ++pd) { + for (int ph = ph_start; ph < ph_end; ++ph) { + for (int pw = pw_start; pw < pw_end; ++pw) { + if (mask[(pd * output_height + ph) * output_width + pw] == + input_current_feature_map_idx) gradient += output_grad[(pd * output_height + ph) * output_width + pw]; } @@ -308,7 +325,7 @@ class MaxPool3dWithIndexFunctor { const T* input_data = input.data(); T* output_data = output.mutable_data(context.GetPlace()); - T* mask_data = output.mutable_data(context.GetPlace()); + T* mask_data = mask.mutable_data(context.GetPlace()); int nthreads = batch_size * output_channels * output_depth * output_height * output_width; @@ -316,7 +333,7 @@ class MaxPool3dWithIndexFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool3DWithIdxForward< + KernelMaxPool3DWithIdx< T><<(context) .stream()>>>( @@ -341,10 +358,10 @@ class MaxPool3dWithIndexGradFunctor { const int input_depth = input_grad.dims()[2]; const int input_height = input_grad.dims()[3]; const int input_width = input_grad.dims()[4]; - const int output_channels = input_grad.dims()[1]; - const int output_depth = input_grad.dims()[2]; - const int output_height = input_grad.dims()[3]; - const int output_width = input_grad.dims()[4]; + const int output_channels = output_grad.dims()[1]; + const int output_depth = output_grad.dims()[2]; + const int output_height = output_grad.dims()[3]; + const int output_width = output_grad.dims()[4]; const int ksize_depth = ksize[0]; const int ksize_height = ksize[1]; const int ksize_width = ksize[2]; @@ -365,7 +382,7 @@ class MaxPool3dWithIndexGradFunctor { dim3 threads(1024, 1); dim3 grid(blocks, 1); - KernelMaxPool3DWithIdxBackward< + KernelMaxPool3DWithIdxGrad< T><<(context) .stream()>>>( diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index 3a05cd98fe..308a9341b6 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -23,7 +23,6 @@ namespace operators { namespace math { ////////////////////// #define FLT_MAX __FLT_MAX__ -///////////////////// template class MaxPool2dWithIndexFunctor { diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index d7a07a403d..c51145b923 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -76,8 +76,8 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("X")), - "X(Input) of MaxPoolWithIndexOpGrad should not be null."); + PADDLE_ENFORCE(ctx->HasInput("X"), + "X(Input) of Pooling should not be null."); PADDLE_ENFORCE( ctx->HasOutput(framework::GradVarName("X")), "X@GRAD(Input@GRAD) of MaxPoolWithIndexOpGrad should not be null."); @@ -97,28 +97,37 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { "number of channels, H and W is the height and width of image."); AddOutput("Out", "The output tensor of pooling operator." - "The format of output tensor is also NCHW."); + "The format of output tensor is also NCHW." + "Where N is batch size, C is " + "the number of channels, H and W is the height and " + "width of image."); AddOutput("Mask", "The Mask tensor of pooling operator." - "The format of output tensor is also NCHW."); + "The format of output tensor is also NCHW." + "Where N is batch size, C is the number of channels, H and W " + "is the height and width of image." + "The value in it is the index in current feature map"); AddAttr>( - "ksize", "pooling size(height, width) of pooling operator."); + "ksize", + "Pooling size(height, width) of pooling operator." + "If globalPooling = true, ksize is ignored and need not be " + "specified."); // TODO(Add checker) AddAttr( "globalPooling", - "whether to use the globalPooling." - "int constant equal to false or true" - "default false" + "Whether to use the globalPooling." + "Bool constant equal to false or true." + "Default false." "If globalPooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>("strides", - "strides(height, width) of pooling operator." - "default {1,1}") - .SetDefault({1, 1}); + "Strides(height, width) of pooling operator." + "Default {1,1}.") + .SetDefault({1, 1}); // TODO(Add checker) AddAttr>("paddings", - "paddings(height, width) of pooling operator." - "default {0,0}") - .SetDefault({0, 0}); + "Paddings(height, width) of pooling operator." + "Default {0,0}.") + .SetDefault({0, 0}); // TODO(Add checker) AddComment(R"DOC( The maxPooling2d with index operation calculates the output and the mask based on @@ -140,30 +149,40 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { "image."); AddOutput("Out", "The output tensor of pooling operator." - "The format of output tensor is also NCDHW."); + "The format of output tensor is also NCDHW." + "Where N is batch size, C is " + "the number of channels, D, H and W is the depth, height and " + "width of image."); AddOutput("Mask", "The Mask tensor of pooling operator." - "The format of output tensor is also NCDHW."); + "The format of output tensor is also NCDHW." + "Where N is batch size, C is the number of channels, D, H and W " + "is the depth, height and width of image." + "The value in it is the index in current feature map"); AddAttr>( - "ksize", "pooling size(depth, height, width) of pooling operator."); + "ksize", + "Pooling size(depth, height, width) of pooling operator." + "If globalPooling = true, ksize is ignored and need not be " + "specified."); // TODO(Add checker) AddAttr( "globalPooling", - "whether to use the globalPooling." - "int constant equal to false or true" - "default false" + "Whether to use the globalPooling." + "Bool constant equal to false or true." + "Default false." "If globalPooling = true, ksize is ignored and need not be specified.") .SetDefault(false); AddAttr>( "strides", - "strides(depth, height, width) of pooling operator." - "default {1,1,1}") - .SetDefault({1, 1, 1}); + "Strides(depth, height, width) of pooling operator." + "Default {1,1,1}.") + .SetDefault({1, 1, 1}); // TODO(Add checker) AddAttr>( "paddings", - "paddings(depth, height, width) of pooling operator." - "default {0,0,0}") - .SetDefault({0, 0, 0}); + "Paddings(depth, height, width) of pooling operator." + "Default {0,0,0}.") + .SetDefault({0, 0, 0}); // TODO(Add checker) + AddComment(R"DOC( The maxpooling3d with index operation calculates the output and the mask based on the input and ksize, strides, paddings parameters. diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h index 91abeed016..5fe2f5df93 100644 --- a/paddle/operators/pool_with_index_op.h +++ b/paddle/operators/pool_with_index_op.h @@ -32,11 +32,10 @@ class MaxPoolWithIndexKernel : public framework::OpKernel { Tensor* out = context.Output("Out"); Tensor* mask = context.Output("Mask"); - bool global_pooling = context.Attr("globalPooling"); std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); - if (global_pooling) { + if (context.Attr("globalPooling")) { for (size_t i = 0; i < ksize.size(); ++i) { ksize[i] = static_cast(in_x->dims()[i + 2]); } @@ -63,7 +62,7 @@ template class MaxPoolWithIndexGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - const Tensor* mask = context.Input("Maks"); + const Tensor* mask = context.Input("Mask"); const Tensor* out_grad = context.Input(framework::GradVarName("Out")); Tensor* in_x_grad = context.Output(framework::GradVarName("X")); @@ -71,6 +70,11 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel { std::vector ksize = context.Attr>("ksize"); std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); + if (context.Attr("globalPooling")) { + for (size_t i = 0; i < ksize.size(); ++i) { + ksize[i] = static_cast(in_x_grad->dims()[i + 2]); + } + } if (in_x_grad) { in_x_grad->mutable_data(context.GetPlace()); diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py index 2945c8b7a4..ffc345198d 100644 --- a/python/paddle/v2/framework/tests/test_pool_max_op.py +++ b/python/paddle/v2/framework/tests/test_pool_max_op.py @@ -3,7 +3,11 @@ import numpy as np from op_test import OpTest -def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): +def max_pool3D_forward_naive(x, + ksize, + strides, + paddings=[0, 0, 0], + global_pool=0): N, C, D, H, W = x.shape if global_pool == 1: @@ -25,8 +29,19 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) - # mask[:,:, k, i, j] = np.argmax(x_masked, axis=(2, 3, 4)) - return out + + for n in xrange(N): + for c in xrange(C): + arr = x_masked[n, c, :, :, :] + index = np.where(arr == np.max(arr)) + sub_deep = index[0][0] + sub_row = index[1][0] + sub_col = index[2][0] + index = ((d_start + sub_deep) * H + + (h_start + sub_row)) * W + w_start + sub_col + mask[n, c, k, i, j] = index + + return out, mask def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): @@ -47,19 +62,25 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0): x_masked = x[:, :, r_start:r_end, c_start:c_end] out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) - # mask[:,:, i, j] = np.argmax(x_masked, axis=(2, 3)) - return out + for n in xrange(N): + for c in xrange(C): + arr = x_masked[n, c, :, :] + index = np.where(arr == np.max(arr)) + sub_row = index[0][0] + sub_col = index[1][0] + index = (r_start + sub_row) * W + c_start + sub_col + mask[n, c, i, j] = index + + return out, mask class TestMaxPoolWithIndex_Op(OpTest): def setUp(self): self.initTestCase() - self.op_type = "maxPool3dWithIndex" input = np.random.random(self.shape).astype("float32") - output = self.pool_forward_naive(input, self.ksize, self.strides, - self.paddings, self.global_pool) - # mask = np.zeros(output.shape) + output, mask = self.pool_forward_naive(input, self.ksize, self.strides, + self.paddings, self.global_pool) self.attrs = { 'strides': self.strides, @@ -69,7 +90,7 @@ class TestMaxPoolWithIndex_Op(OpTest): } self.inputs = {'X': input} - self.outputs = {'Out': output} + self.outputs = {'Out': output, "Mask": mask} def test_check_output(self): self.check_output() @@ -78,7 +99,8 @@ class TestMaxPoolWithIndex_Op(OpTest): # self.check_grad(set(['X']), ['Out'], max_relative_error=0.07) def initTestCase(self): - self.global_pool = 0 + self.global_pool = False + self.op_type = "maxPool3dWithIndex" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] @@ -86,10 +108,9 @@ class TestMaxPoolWithIndex_Op(OpTest): self.paddings = [1, 1, 1] -"""" class TestCase1(TestMaxPoolWithIndex_Op): def initTestCase(self): - self.global_pool = 1 + self.global_pool = True self.op_type = "maxPool3dWithIndex" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] @@ -100,7 +121,7 @@ class TestCase1(TestMaxPoolWithIndex_Op): class TestCase2(TestMaxPoolWithIndex_Op): def initTestCase(self): - self.global_pool = 0 + self.global_pool = False self.op_type = "maxPool2dWithIndex" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 7, 7] @@ -111,7 +132,7 @@ class TestCase2(TestMaxPoolWithIndex_Op): class TestCase3(TestMaxPoolWithIndex_Op): def initTestCase(self): - self.global_pool = 1 + self.global_pool = True self.op_type = "maxPool2dWithIndex" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 5, 5] @@ -122,4 +143,3 @@ class TestCase3(TestMaxPoolWithIndex_Op): if __name__ == '__main__': unittest.main() -""" From 6fc44800ed800e78822c6af5750e202c041d9173 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 30 Sep 2017 15:20:44 +0800 Subject: [PATCH 106/342] fix unit test --- .../v2/framework/tests/test_pool_max_op.py | 72 ++++++++++++++++++- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py index ffc345198d..17028c3bf6 100644 --- a/python/paddle/v2/framework/tests/test_pool_max_op.py +++ b/python/paddle/v2/framework/tests/test_pool_max_op.py @@ -98,6 +98,28 @@ class TestMaxPoolWithIndex_Op(OpTest): # def test_check_grad(self): # self.check_grad(set(['X']), ['Out'], max_relative_error=0.07) + def initTestCase(self): + self.global_pool = True + self.op_type = "maxPool3dWithIndex" + self.pool_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 5, 5, 5] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + + +class TestCase1(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = True + self.op_type = "maxPool3dWithIndex" + self.pool_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 5, 5, 5] + self.ksize = [3, 3, 3] + self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + + +class TestCase2(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = False self.op_type = "maxPool3dWithIndex" @@ -108,7 +130,18 @@ class TestMaxPoolWithIndex_Op(OpTest): self.paddings = [1, 1, 1] -class TestCase1(TestMaxPoolWithIndex_Op): +class TestCase3(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = False + self.op_type = "maxPool3dWithIndex" + self.pool_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 7, 7, 7] + self.ksize = [3, 3, 3] + self.strides = [2, 2, 2] + self.paddings = [0, 0, 0] + + +class TestCase4(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = True self.op_type = "maxPool3dWithIndex" @@ -116,10 +149,21 @@ class TestCase1(TestMaxPoolWithIndex_Op): self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] + self.paddings = [1, 1, 1] + + +class TestCase5(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = True + self.op_type = "maxPool3dWithIndex" + self.pool_forward_naive = max_pool3D_forward_naive + self.shape = [2, 3, 5, 5, 5] + self.ksize = [3, 3, 3] + self.strides = [2, 2, 2] self.paddings = [0, 0, 0] -class TestCase2(TestMaxPoolWithIndex_Op): +class TestCase6(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = False self.op_type = "maxPool2dWithIndex" @@ -130,7 +174,18 @@ class TestCase2(TestMaxPoolWithIndex_Op): self.paddings = [1, 1] -class TestCase3(TestMaxPoolWithIndex_Op): +class TestCase7(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = False + self.op_type = "maxPool2dWithIndex" + self.pool_forward_naive = max_pool2D_forward_naive + self.shape = [2, 3, 7, 7] + self.ksize = [3, 3] + self.strides = [2, 2] + self.paddings = [0, 0] + + +class TestCase8(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = True self.op_type = "maxPool2dWithIndex" @@ -138,6 +193,17 @@ class TestCase3(TestMaxPoolWithIndex_Op): self.shape = [2, 3, 5, 5] self.ksize = [3, 3] self.strides = [1, 1] + self.paddings = [1, 1] + + +class TestCase9(TestMaxPoolWithIndex_Op): + def initTestCase(self): + self.global_pool = True + self.op_type = "maxPool2dWithIndex" + self.pool_forward_naive = max_pool2D_forward_naive + self.shape = [2, 3, 5, 5] + self.ksize = [3, 3] + self.strides = [2, 2] self.paddings = [0, 0] From 6627801a586bf93f1d872c643c121e19d5c2f1bf Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Sat, 30 Sep 2017 15:32:29 +0800 Subject: [PATCH 107/342] Follow comments. --- CMakeLists.txt | 12 ++++++++---- paddle/capi/CMakeLists.txt | 4 +--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 70b35154aa..7d549b864b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -87,11 +87,15 @@ if(ANDROID OR IOS) set(WITH_MKLML OFF CACHE STRING "Disable MKLML package when cross-compiling for Android and iOS" FORCE) - if(WITH_C_API) - # Compile PaddlePaddle mobile inference library - set(MOBILE_INFERENCE ON) - add_definitions(-DPADDLE_MOBILE_INFERENCE) + # Compile PaddlePaddle mobile inference library + if (NOT WITH_C_API) + set(WITH_C_API ON CACHE STRING + "Always compile the C_API when cross-compiling for Android and iOS" FORCE) endif() + set(MOBILE_INFERENCE ON) + add_definitions(-DPADDLE_MOBILE_INFERENCE) + + # TODO: Need Open the WITH_TESTING set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling for Android and iOS" FORCE) endif() diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index a19a19d719..2c458a78c5 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -48,9 +48,7 @@ if(NOT IOS) add_library(paddle_capi_shared SHARED ${CAPI_SOURCES}) set_target_properties(paddle_capi_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) - -link_paddle_exe(paddle_capi_shared) - + link_paddle_exe(paddle_capi_shared) endif() # install library & headers. From 480154896cf3d32b646f3cab5b546c37fb2866aa Mon Sep 17 00:00:00 2001 From: caoying03 Date: Sat, 30 Sep 2017 16:06:52 +0800 Subject: [PATCH 108/342] add configuration helper for resize layer. --- doc/api/v1/index_cn.rst | 2 +- doc/api/v2/config/layer.rst | 5 ++++ .../paddle/trainer_config_helpers/layers.py | 25 ++++++++++++++++- .../tests/configs/file_list.sh | 2 +- .../protostr/test_resize_layer.protostr | 27 +++++++++++++++++++ .../tests/configs/test_resize_layer.py | 6 +++++ 6 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py diff --git a/doc/api/v1/index_cn.rst b/doc/api/v1/index_cn.rst index 3718cd73a2..cf146dc088 100644 --- a/doc/api/v1/index_cn.rst +++ b/doc/api/v1/index_cn.rst @@ -21,7 +21,7 @@ Model Config API trainer_config_helpers/optimizers.rst trainer_config_helpers/data_sources.rst trainer_config_helpers/layers.rst - trainer_config_helpers/activations.rst + trainer_config_helpers/activations.rst trainer_config_helpers/poolings.rst trainer_config_helpers/networks.rst trainer_config_helpers/evaluators.rst diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index c94627a728..d4e9d53e5c 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -345,6 +345,11 @@ clip .. autoclass:: paddle.v2.layer.clip :noindex: +resize +------ +.. autoclass:: paddle.v2.layer.resize + :noindex: + slope_intercept --------------- .. autoclass:: paddle.v2.layer.slope_intercept diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 74025d2a7b..d37f29d2c4 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -142,6 +142,7 @@ __all__ = [ 'img_pool3d_layer', 'scale_shift_layer', 'img_conv3d_layer', + 'resize_layer', ] @@ -250,6 +251,8 @@ class LayerType(object): KMAX_SEQ_SCORE = 'kmax_seq_score' SCALE_SHIFT_LAYER = 'scale_shift' + RESIZE = 'resize' + @staticmethod def is_layer_type(type_name): """ @@ -6473,7 +6476,7 @@ def switch_order_layer(input, act=None, layer_attr=None): """ - This layer switch dimension order of image input. + This layer switch dimension order of image input. From order "batchSize, channels, height, width" to order "batchSize, height, width, channels". @@ -6932,3 +6935,23 @@ def scale_shift_layer(input, name=None, param_attr=None, bias_attr=None): bias=ParamAttr.to_bias(bias_attr)) return LayerOutput( name, LayerType.SCALE_SHIFT_LAYER, parents=[input], size=input.size) + + +@wrap_name_default("resize") +def resize_layer(input, size, name=None): + """ + The resize layer resizes the input matrix with a shape of [Height, Width] + into the output matrix with a shape of [Height x Width / size, size], + where size is the parameter of this layer indicating the output dimension. + + :param input: The input to this layer. + :type input: LayerOutput. + :param name: The name of this layer. It is optional. + :type name: basestring + :param size: The resized output dimesion of this layer. + :type size: int + :return: A LayerOutput object. + :rtype: LayerOutput + """ + Layer(name=name, type=LayerType.RESIZE, inputs=Input(input.name), size=size) + return LayerOutput(name, LayerType.RESIZE, parents=[input], size=input.size) diff --git a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh index 8a204a96f3..6a4550c209 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh @@ -10,6 +10,6 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer -test_conv3d_layer test_deconv3d_layer test_BatchNorm3D) +test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer) export whole_configs=(test_split_datasource) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr new file mode 100644 index 0000000000..9399252b23 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr @@ -0,0 +1,27 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 300 + active_type: "" +} +layers { + name: "__resize_0__" + type: "resize" + size: 150 + active_type: "" + inputs { + input_layer_name: "input" + } +} +input_layer_names: "input" +output_layer_names: "__resize_0__" +sub_models { + name: "root" + layer_names: "input" + layer_names: "__resize_0__" + input_layer_names: "input" + output_layer_names: "__resize_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py new file mode 100644 index 0000000000..09a6f50733 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py @@ -0,0 +1,6 @@ +from paddle.trainer_config_helpers import * + +data = data_layer(name='input', size=300) +resized = resize_layer(input=data, size=150) + +outputs(resized) From bb33c2b3a5f2f2aa50e9ceaf1f2120edfe76455b Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 30 Sep 2017 15:44:02 +0800 Subject: [PATCH 109/342] fix kernel func --- paddle/operators/math/pooling.cc | 227 ++++++++++++++++++++++++++ paddle/operators/math/pooling.cu | 10 +- paddle/operators/math/pooling.h | 37 +++++ paddle/operators/pool_with_index_op.h | 4 +- 4 files changed, 270 insertions(+), 8 deletions(-) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 3b706529d8..5accde8b07 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -458,6 +458,233 @@ template class Pool3dGradFunctor< platform::CPUPlace, paddle::operators::math::MaxPoolGrad, double>; template class Pool3dGradFunctor< platform::CPUPlace, paddle::operators::math::AvgPoolGrad, double>; + +template +class MaxPool2dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_height = input.dims()[2]; + const int input_width = input.dims()[3]; + const int output_channels = output.dims()[1]; + const int output_height = output.dims()[2]; + const int output_width = output.dims()[3]; + const int ksize_height = ksize[0]; + const int ksize_width = ksize[1]; + const int stride_height = strides[0]; + const int stride_width = strides[1]; + const int padding_height = paddings[0]; + const int padding_width = paddings[1]; + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context.GetPlace()); + T* mask_data = mask.mutable_data(context.GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + + T ele = static_cast(-FLT_MAX); + int index = -1; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + if (ele < input_data[h * input_width + w]) { + ele = input_data[h * input_width + w]; + index = h * input_width + w; + } + } + } + output_data[ph * output_width + pw] = ele; + mask_data[ph * output_width + pw] = index; + } + } + // offset + input_data += input_stride; + output_data += output_stride; + mask_data += output_stride; + } + } + } +}; + +template +class MaxPool2dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input_grad.dims()[0]; + const int input_height = input_grad.dims()[2]; + const int input_width = input_grad.dims()[3]; + const int output_channels = output_grad.dims()[1]; + const int output_height = output_grad.dims()[2]; + const int output_width = output_grad.dims()[3]; + const int input_stride = input_height * input_width; + const int output_stride = output_height * output_width; + + const T* mask_data = mask.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + for (int n = 0; n < batch_size; ++n) { + for (int c = 0; c < output_channels; ++c) { + for (int ph = 0; ph < output_height; ++ph) { + for (int pw = 0; pw < output_width; ++pw) { + const int output_idx = ph * output_width + pw; + const int input_idx = static_cast(mask_data[output_idx]); + input_grad_data[input_idx] += output_grad_data[output_idx]; + } + } + // offset + input_grad_data += input_stride; + output_grad_data += output_stride; + mask_data += output_stride; + } + } + } +}; + +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; +template class MaxPool2dWithIndexFunctor; +template class MaxPool2dWithIndexGradFunctor; + +template +class MaxPool3dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input.dims()[0]; + const int input_depth = input.dims()[2]; + const int input_height = input.dims()[3]; + const int input_width = input.dims()[4]; + const int output_channels = output.dims()[1]; + const int output_depth = output.dims()[2]; + const int output_height = output.dims()[3]; + const int output_width = output.dims()[4]; + const int ksize_depth = ksize[0]; + const int ksize_height = ksize[1]; + const int ksize_width = ksize[2]; + const int stride_depth = strides[0]; + const int stride_height = strides[1]; + const int stride_width = strides[2]; + const int padding_depth = paddings[0]; + const int padding_height = paddings[1]; + const int padding_width = paddings[2]; + const int input_stride = input_depth * input_height * input_width; + const int output_stride = output_depth * output_height * output_width; + + const T* input_data = input.data(); + T* output_data = output.mutable_data(context.GetPlace()); + T* mask_data = mask.mutable_data(context.GetPlace()); + + for (int i = 0; i < batch_size; i++) { + for (int c = 0; c < output_channels; ++c) { + for (int pd = 0; pd < output_depth; ++pd) { + int dstart = pd * stride_depth - padding_depth; + int dend = std::min(dstart + ksize_depth, input_depth); + dstart = std::max(dstart, 0); + for (int ph = 0; ph < output_height; ++ph) { + int hstart = ph * stride_height - padding_height; + int hend = std::min(hstart + ksize_height, input_height); + hstart = std::max(hstart, 0); + for (int pw = 0; pw < output_width; ++pw) { + int wstart = pw * stride_width - padding_width; + int wend = std::min(wstart + ksize_width, input_width); + wstart = std::max(wstart, 0); + + int output_idx = (pd * output_height + ph) * output_width + pw; + T ele = static_cast(-FLT_MAX); + int index = -1; + for (int d = dstart; d < dend; ++d) { + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int input_idx = (d * input_height + h) * input_width + w; + if (ele < input_data[input_idx]) { + index = input_idx; + ele = input_data[input_idx]; + } + } + } + } + output_data[output_idx] = ele; + mask_data[output_idx] = index; + } + } + } + // offset + input_data += input_stride; + output_data += output_stride; + mask_data += output_stride; + } + } + } +}; + +template +class MaxPool3dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings) { + const int batch_size = input_grad.dims()[0]; + const int input_depth = input_grad.dims()[2]; + const int input_height = input_grad.dims()[3]; + const int input_width = input_grad.dims()[4]; + const int output_channels = output_grad.dims()[1]; + const int output_depth = output_grad.dims()[2]; + const int output_height = output_grad.dims()[3]; + const int output_width = output_grad.dims()[4]; + const int input_stride = input_depth * input_height * input_width; + const int output_stride = output_depth * output_height * output_width; + + const T* mask_data = mask.data(); + const T* output_grad_data = output_grad.data(); + T* input_grad_data = input_grad.mutable_data(context.GetPlace()); + + for (int n = 0; n < batch_size; ++n) { + for (int c = 0; c < output_channels; ++c) { + for (int pd = 0; pd < output_depth; ++pd) { + for (int ph = 0; ph < output_height; ++ph) { + for (int pw = 0; pw < output_width; ++pw) { + const int output_idx = + (pd * output_height + ph) * output_width + pw; + const int input_idx = static_cast(mask_data[output_idx]); + input_grad_data[input_idx] += output_grad_data[output_idx]; + } + } + } + // offset + input_grad_data += input_stride; + output_grad_data += output_stride; + mask_data += output_stride; + } + } + } +}; + +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; +template class MaxPool3dWithIndexFunctor; +template class MaxPool3dWithIndexGradFunctor; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 6aafedf912..06263737a9 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -637,7 +637,7 @@ __global__ void KernelMaxPool2dWithIdx( const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width) { - for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; @@ -676,7 +676,7 @@ __global__ void KernelMaxPool2DWithIdxGrad( const int output_height, const int output_width, const int ksize_height, const int ksize_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width) { - for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; @@ -766,7 +766,6 @@ class MaxPool2dWithIndexGradFunctor { const int input_channels = input_grad.dims()[1]; const int input_height = input_grad.dims()[2]; const int input_width = input_grad.dims()[3]; - const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = ksize[0]; @@ -810,7 +809,7 @@ __global__ void KernelMaxPool3DWithIdx( const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width) { - for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int pw = index % output_width; int ph = (index / output_width) % output_height; @@ -858,7 +857,7 @@ __global__ void KernelMaxPool3DWithIdxGrad( const int ksize_width, const int stride_depth, const int stride_height, const int stride_width, const int padding_depth, const int padding_height, const int padding_width) { - for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads); + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { int w_offset = index % input_width; int h_offset = (index / input_width) % input_height; @@ -969,7 +968,6 @@ class MaxPool3dWithIndexGradFunctor { const int input_depth = input_grad.dims()[2]; const int input_height = input_grad.dims()[3]; const int input_width = input_grad.dims()[4]; - const int output_channels = output_grad.dims()[1]; const int output_depth = output_grad.dims()[2]; const int output_height = output_grad.dims()[3]; const int output_width = output_grad.dims()[4]; diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index d214c68923..d819e5986e 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -117,6 +117,43 @@ class MaxPool3dGradFunctor { std::vector& strides, std::vector& paddings); }; +template +class MaxPool2dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + +template +class MaxPool2dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + +template +class MaxPool3dWithIndexFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& input, framework::Tensor& output, + framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; + +template +class MaxPool3dWithIndexGradFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& input_grad, + const framework::Tensor& output_grad, + const framework::Tensor& mask, std::vector& ksize, + std::vector& strides, std::vector& paddings); +}; } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h index 5fe2f5df93..01b961ca82 100644 --- a/paddle/operators/pool_with_index_op.h +++ b/paddle/operators/pool_with_index_op.h @@ -25,7 +25,7 @@ namespace operators { using Tensor = framework::Tensor; template -class MaxPoolWithIndexKernel : public framework::OpKernel { +class MaxPoolWithIndexKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* in_x = context.Input("X"); @@ -59,7 +59,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel { }; template -class MaxPoolWithIndexGradKernel : public framework::OpKernel { +class MaxPoolWithIndexGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* mask = context.Input("Mask"); From d9e3c4ff358cb6d9262375e37f3cc7fdd0dc7465 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 30 Sep 2017 12:51:05 -0700 Subject: [PATCH 110/342] Add GradOpDescMaker to OpInfo and complete OperatorRegistrar method --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/details/op_registry.h | 105 +++++++++++++++++++++++++ paddle/framework/op_info.h | 13 ++- paddle/framework/op_registry.h | 49 +++++------- paddle/framework/op_registry_test.cc | 11 +++ 5 files changed, 148 insertions(+), 32 deletions(-) create mode 100644 paddle/framework/details/op_registry.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8a5d8532bb..feadc9be4b 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -22,7 +22,7 @@ cc_library(attribute SRCS attribute.cc DEPS framework_proto) cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute) cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) -cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) +cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h new file mode 100644 index 0000000000..d2516ccc1e --- /dev/null +++ b/paddle/framework/details/op_registry.h @@ -0,0 +1,105 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/op_info.h" +#include "paddle/framework/op_proto_maker.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace framework { +namespace details { + +enum OpInfoFillType { + kOperator = 0, + kOpProtoAndCheckerMaker = 1, + kGradOpDescMaker = 2 +}; + +template +struct OpInfoFillTypeID { + static constexpr OpInfoFillType ID() { + return std::is_base_of::value + ? kOperator + : (std::is_base_of::value + ? kOpProtoAndCheckerMaker + : (std::is_base_of::value + ? kGradOpDescMaker + : static_cast(-1))); + } +}; + +template ::ID()> +struct OpInfoFiller; + +template +class OperatorRegistrarRecursive; + +template +class OperatorRegistrarRecursive { + public: + using T = typename std::tuple_element>::type; + OperatorRegistrarRecursive(const char* op_type, OpInfo* info) { + OpInfoFiller fill; + fill(op_type, info); + constexpr auto size = sizeof...(ARGS); + OperatorRegistrarRecursive reg(op_type, + info); + (void)(reg); + } +}; + +template +class OperatorRegistrarRecursive { + public: + OperatorRegistrarRecursive(const char* op_type, OpInfo* info) {} +}; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->creator_ = [](const std::string& type, const VariableNameMap& inputs, + const VariableNameMap& outputs, + const AttributeMap& attrs) { + return new T(type, inputs, outputs, attrs); + }; + } +}; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->proto_ = new OpProto; + info->checker_ = new OpAttrChecker(); + auto maker = T(info->proto_, info->checker_); + maker.Validate(); + info->proto_->set_type(op_type); + PADDLE_ENFORCE( + info->proto_->IsInitialized(), + "Fail to initialize %s's OpProto, because %s is not initialized", + op_type, info->proto_->InitializationErrorString()); + } +}; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->grad_op_maker_ = new T(); + } +}; +} // namespace details + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index b98d8f23a1..6d1ee4dece 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -17,8 +17,8 @@ #include #include #include - #include "paddle/framework/attribute.h" +#include "paddle/framework/op_desc.h" namespace paddle { namespace framework { @@ -29,11 +29,18 @@ using OpCreator = std::function; +class GradOpDescMakerBase { + public: + virtual ~GradOpDescMakerBase() = default; + virtual std::vector operator()(const OpDescBind&) const = 0; +}; + struct OpInfo { OpCreator creator_; std::string grad_op_type_; - OpProto* proto_; - OpAttrChecker* checker_; + GradOpDescMakerBase* grad_op_maker_{nullptr}; + OpProto* proto_{nullptr}; + OpAttrChecker* checker_{nullptr}; bool HasOpProtoAndChecker() const { return proto_ != nullptr && checker_ != nullptr; diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4db38badae..4ee2c7d275 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -21,49 +21,42 @@ limitations under the License. */ #include #include #include "paddle/framework/attribute.h" +#include "paddle/framework/details/op_registry.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/grad_op_builder.h" -#include "paddle/framework/op_info.h" -#include "paddle/framework/op_proto_maker.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" namespace paddle { namespace framework { +template +struct OperatorRegistrar { + explicit OperatorRegistrar(const char* op_type) : op_type(op_type) { + PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), + "'%s' is registered more than once.", op_type); + static_assert(sizeof...(ARGS) != 0, + "OperatorRegistrar should be invoked at least by OpClass"); + details::OperatorRegistrarRecursive<0, false, ARGS...>(op_type, &info); + } + + ~OperatorRegistrar() { OpInfoMap::Instance().Insert(op_type, info); } + + const char* op_type; + + OpInfo info; +}; + class OpRegistry { public: template static void RegisterOp(const std::string& op_type, const std::string& grad_op_type) { - PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), - "'%s' is registered more than once.", op_type); - OpInfo op_info; - op_info.creator_ = []( - const std::string& type, const VariableNameMap& inputs, - const VariableNameMap& outputs, const AttributeMap& attrs) { - return new OpType(type, inputs, outputs, attrs); - }; - op_info.grad_op_type_ = grad_op_type; - if (std::type_index(typeid(ProtoMakerType)) != - std::type_index(typeid(NOPMaker))) { - op_info.proto_ = new OpProto; - op_info.checker_ = new OpAttrChecker; - auto maker = ProtoMakerType(op_info.proto_, op_info.checker_); - maker.Validate(); - op_info.proto_->set_type(op_type); - PADDLE_ENFORCE( - op_info.proto_->IsInitialized(), - "Fail to initialize %s's OpProto, because %s is not initialized", - op_type, op_info.proto_->InitializationErrorString()); - } else { - op_info.proto_ = nullptr; - op_info.checker_ = nullptr; - } - OpInfoMap::Instance().Insert(op_type, op_info); + OperatorRegistrar reg(op_type.c_str()); + reg.info.grad_op_type_ = grad_op_type; // register gradient op if (!grad_op_type.empty()) { - RegisterOp(grad_op_type, ""); + OperatorRegistrar grad_reg(grad_op_type.c_str()); } } diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index b6fc0409d5..f89f40b444 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -173,3 +173,14 @@ TEST(OpRegistry, CustomChecker) { int test_attr = op->Attr("test_attr"); ASSERT_EQ(test_attr, 4); } + +class CosineOpComplete : public paddle::framework::CosineOp { + public: + DEFINE_OP_CONSTRUCTOR(CosineOpComplete, paddle::framework::CosineOp); + DEFINE_OP_CLONE_METHOD(CosineOpComplete); +}; + +TEST(OperatorRegistrar, Test) { + using namespace paddle::framework; + OperatorRegistrar reg("cos"); +} \ No newline at end of file From d4be9730fced2a8effaf06412fa48e2aa0a8c325 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 29 Sep 2017 23:44:52 -0700 Subject: [PATCH 111/342] fix gpu build error --- paddle/framework/executor.cc | 26 +++++++++++++++++--------- paddle/framework/executor_test.cc | 1 + 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 7fda2332b8..b38d6be16f 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -80,26 +80,34 @@ class ExecutorImpl : public Executor { template std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); + return std::unique_ptr(new T(std::forward(args)...)); } -static std::unique_ptr g_cpu_device_context = - make_unique(platform::CPUPlace()); +platform::CPUDeviceContext* GetCPUDeviceContext() { + static std::unique_ptr g_cpu_device_context = + make_unique(platform::CPUPlace()); + return g_cpu_device_context.get(); +} #ifndef PADDLE_ONLY_CPU -static std::unique_ptr g_cuda_device_context = - make_unique(platform::GPUPlace(0)); +platform::CUDADeviceContext* GetCUDADeviceContext() { + static std::unique_ptr g_cuda_device_context = + make_unique(platform::GPUPlace(0)); + return g_cuda_device_context.get(); +} #endif Executor* NewLocalExecutor(const platform::Place& place, const ProgramDesc& pdesc, bool is_linear) { platform::DeviceContext* device_context = nullptr; if (platform::is_cpu_place(place)) { - device_context = g_cpu_device_context.get(); - } + device_context = GetCPUDeviceContext(); + } else if (platform::is_gpu_place(place)) { #ifndef PADDLE_ONLY_CPU - else if { - device_context = g_cuda_device_context.get(); + device_context = GetCUDADeviceContext(); + } +#else + PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); } #endif return new ExecutorImpl(device_context, &pdesc, is_linear); diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index c046ae3158..6f8ca38768 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -23,4 +23,5 @@ TEST(Executor, Init) { CPUPlace cpu_place; Executor* executor = NewLocalExecutor(cpu_place, pdesc, true); executor->Run(); + delete executor; } \ No newline at end of file From af6f3c0423f6ffd6c5480eeee5d340a5aad3e487 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 30 Sep 2017 15:34:52 -0700 Subject: [PATCH 112/342] use float32 in cond_op --- python/paddle/v2/framework/tests/test_cond_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py index 3698ce9c8e..76323b5e10 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -15,7 +15,7 @@ class PySimpleCond(object): for i in range(1, 10, 2): array[i] = 0 self.cond = np.array(array) - self.x = np.ones(shape=(10, 1)) + self.x = np.ones(shape=(10, 1)).astype("float32") def forward(self): self.index_t = np.where(self.cond == 1) From e3a642e027e3c749e5405f26478e28887cab504a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 30 Sep 2017 15:39:42 -0700 Subject: [PATCH 113/342] Extract BaseClass of grad_op_desc_maker and add some common method --- paddle/framework/details/op_registry.h | 6 +- paddle/framework/grad_op_desc_maker.h | 115 +++++++++++++++++++++++++ paddle/framework/op_desc.h | 22 ++++- paddle/framework/op_info.h | 8 +- 4 files changed, 139 insertions(+), 12 deletions(-) create mode 100644 paddle/framework/grad_op_desc_maker.h diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h index d2516ccc1e..daa474e8c5 100644 --- a/paddle/framework/details/op_registry.h +++ b/paddle/framework/details/op_registry.h @@ -14,6 +14,7 @@ #pragma once +#include "paddle/framework/grad_op_desc_maker.h" #include "paddle/framework/op_info.h" #include "paddle/framework/op_proto_maker.h" #include "paddle/framework/operator.h" @@ -96,7 +97,10 @@ struct OpInfoFiller { template struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { - info->grad_op_maker_ = new T(); + info->grad_op_maker_ = [](const OpDescBind& fwd_op) { + T maker(fwd_op); + return maker(); + }; } }; } // namespace details diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h new file mode 100644 index 0000000000..cb4d160bd0 --- /dev/null +++ b/paddle/framework/grad_op_desc_maker.h @@ -0,0 +1,115 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/op_desc.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace framework { + +class GradOpDescMakerBase { + public: + explicit GradOpDescMakerBase(const OpDescBind& fwd_op) : fwd_op_(fwd_op) {} + + virtual ~GradOpDescMakerBase() = default; + virtual std::vector operator()() const = 0; + + protected: + static std::vector ToGradNames( + const std::vector& var_names) { + std::vector ret_val; + ret_val.reserve(var_names.size()); + std::transform(var_names.begin(), var_names.end(), + std::back_inserter(ret_val), GradVarName); + return ret_val; + } + + std::vector InputGrad(const std::string& name) const { + return ToGradNames(fwd_op_.Input(name)); + } + + std::vector OutputGrad(const std::string& name) const { + return ToGradNames(fwd_op_.Output(name)); + } + + std::vector InputParamNames() const { + return this->fwd_op_.InputParamNames(); + } + + std::vector OutputParamNames() const { + return this->fwd_op_.OutputParamNames(); + } + + std::vector Input(const std::string& name) const { + return fwd_op_.Input(name); + } + + std::vector Output(const std::string& name) const { + return fwd_op_.Output(name); + } + + const std::unordered_map& Attrs() const { + return fwd_op_.GetAttrMap(); + } + + const Attribute& GetAttr(const std::string& name) const { + auto& map = fwd_op_.GetAttrMap(); + auto it = map.find(name); + PADDLE_ENFORCE(it != map.end(), "Cannot find attribute %s", name); + return it->second; + } + + std::string ForwardOpType() const { return this->fwd_op_.Type(); } + + private: + const OpDescBind& fwd_op_; +}; + +class SingleGradOpDescMaker : public GradOpDescMakerBase { + public: + std::vector operator()() const { return {this->Apply()}; } + + protected: + virtual OpDescBind Apply() const = 0; +}; + +class DefaultGradOpDescMaker : public SingleGradOpDescMaker { + protected: + virtual OpDescBind Apply() const { + OpDescBind grad; + grad.SetType(this->GradOpType()); + + for (auto& input_param : this->InputParamNames()) { + grad.SetInput(input_param, this->Input(input_param)); + grad.SetOutput(GradVarName(input_param), this->InputGrad(input_param)); + } + + for (auto& output_param : this->OutputParamNames()) { + grad.SetInput(output_param, this->Output(output_param)); + grad.SetInput(GradVarName(output_param), this->OutputGrad(output_param)); + } + + grad.SetAttrMap(this->Attrs()); + + return grad; + } + + virtual std::string GradOpType() const { + return this->ForwardOpType() + "_grad"; + } +}; + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 0cf7d13971..851a305061 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -60,17 +60,31 @@ class OpDescBind { void SetBlockAttr(const std::string &name, BlockDescBind &block); - // Only be used in C++ - void SetAttrMap(const std::unordered_map &attr_map); - Attribute GetAttr(const std::string &name) const; int GetBlockAttr(const std::string &name) const; - // Only be used in C++ + // The following methods should only be used in C++ const std::unordered_map &GetAttrMap() const; + void SetAttrMap(const std::unordered_map &attr_map); + + std::vector InputParamNames() const { return MapKeys(inputs_); } + std::vector OutputParamNames() const { + return MapKeys(outputs_); + } + private: + template + static std::vector MapKeys(const MapType &map) { + std::vector ret_val; + ret_val.reserve(map.size()); + std::transform( + map.begin(), map.end(), ret_val.begin(), + [](const typename MapType::value_type &pair) { return pair.first; }); + return ret_val; + } + struct SetAttrDescVisitor : public boost::static_visitor { explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} mutable OpDesc::Attr *attr_; diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 6d1ee4dece..8149c0061a 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -29,16 +29,10 @@ using OpCreator = std::function; -class GradOpDescMakerBase { - public: - virtual ~GradOpDescMakerBase() = default; - virtual std::vector operator()(const OpDescBind&) const = 0; -}; - struct OpInfo { OpCreator creator_; std::string grad_op_type_; - GradOpDescMakerBase* grad_op_maker_{nullptr}; + std::function(const OpDescBind&)> grad_op_maker_; OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; From b630d4019a0bad74d694633930180912ec19a67c Mon Sep 17 00:00:00 2001 From: qijun Date: Sat, 30 Sep 2017 15:52:05 -0700 Subject: [PATCH 114/342] add scope --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/executor.cc | 24 +++++++++++++++++------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 2cad2e54fa..df79bc0e8f 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -44,5 +44,5 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS device_context framework_proto) +cc_library(executor SRCS executor.cc DEPS device_context scope framework_proto) cc_test(executor_test SRCS executor_test.cc DEPS executor) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index b38d6be16f..52963d20f0 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/framework/executor.h" #include +#include "paddle/framework/scope.h" #include "paddle/platform/device_context.h" namespace paddle { @@ -58,9 +59,10 @@ void GraphView::Initialize(const ProgramDesc*) { class ExecutorImpl : public Executor { public: - ExecutorImpl(const platform::DeviceContext* ctx, const ProgramDesc* pdesc, - bool is_linear) - : device_context_(ctx), + ExecutorImpl(Scope* scope, const platform::DeviceContext* ctx, + const ProgramDesc* pdesc, bool is_linear) + : scope_(scope), + device_context_(ctx), program_desc_(pdesc), view_(ProgramDescView::Create(is_linear)) {} @@ -73,6 +75,7 @@ class ExecutorImpl : public Executor { void Initialize(); private: + Scope* scope_; const platform::DeviceContext* device_context_; const ProgramDesc* program_desc_; ProgramDescView* view_; @@ -80,23 +83,29 @@ class ExecutorImpl : public Executor { template std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); + return std::unique_ptr(new T(std::forward(args)...)); } platform::CPUDeviceContext* GetCPUDeviceContext() { static std::unique_ptr g_cpu_device_context = - make_unique(platform::CPUPlace()); + make_unique(platform::CPUPlace()); return g_cpu_device_context.get(); } #ifndef PADDLE_ONLY_CPU platform::CUDADeviceContext* GetCUDADeviceContext() { static std::unique_ptr g_cuda_device_context = - make_unique(platform::GPUPlace(0)); + make_unique(platform::GPUPlace(0)); return g_cuda_device_context.get(); } #endif +framework::Scope* GetScope() { + static std::unique_ptr g_scope = + make_unique(); + return g_scope.get(); +} + Executor* NewLocalExecutor(const platform::Place& place, const ProgramDesc& pdesc, bool is_linear) { platform::DeviceContext* device_context = nullptr; @@ -110,11 +119,12 @@ Executor* NewLocalExecutor(const platform::Place& place, PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); } #endif - return new ExecutorImpl(device_context, &pdesc, is_linear); + return new ExecutorImpl(GetScope(), device_context, &pdesc, is_linear); } void ExecutorImpl::Run() { // operators running + scope_->NewVar(); device_context_->Wait(); } From c23af80afe4d577f8e7b0c1c4bdd2dd53d5377f1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 30 Sep 2017 16:11:40 -0700 Subject: [PATCH 115/342] Change macro --- paddle/framework/grad_op_desc_maker.h | 4 ++ paddle/framework/op_registry.h | 82 +++++++++++++++------------ 2 files changed, 50 insertions(+), 36 deletions(-) diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h index cb4d160bd0..672cd7dbaf 100644 --- a/paddle/framework/grad_op_desc_maker.h +++ b/paddle/framework/grad_op_desc_maker.h @@ -79,6 +79,7 @@ class GradOpDescMakerBase { class SingleGradOpDescMaker : public GradOpDescMakerBase { public: + using GradOpDescMakerBase::GradOpDescMakerBase; std::vector operator()() const { return {this->Apply()}; } protected: @@ -86,6 +87,9 @@ class SingleGradOpDescMaker : public GradOpDescMakerBase { }; class DefaultGradOpDescMaker : public SingleGradOpDescMaker { + public: + using SingleGradOpDescMaker::SingleGradOpDescMaker; + protected: virtual OpDescBind Apply() const { OpDescBind grad; diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4ee2c7d275..7db095369e 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -24,14 +24,27 @@ limitations under the License. */ #include "paddle/framework/details/op_registry.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/grad_op_builder.h" +#include "paddle/framework/grad_op_desc_maker.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" namespace paddle { namespace framework { +class Registrar { + public: + // In our design, various kinds of classes, e.g., operators and kernels, + // have their corresponding registry and registrar. The action of + // registration is in the constructor of a global registrar variable, which, + // however, are not used in the code that calls package framework, and would + // be removed from the generated binary file by the linker. To avoid such + // removal, we add Touch to all registrar classes and make USE_OP macros to + // call this method. So, as long as the callee code calls USE_OP, the global + // registrar variable won't be removed by the linker. + void Touch() {} +}; template -struct OperatorRegistrar { +struct OperatorRegistrar : public Registrar { explicit OperatorRegistrar(const char* op_type) : op_type(op_type) { PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), "'%s' is registered more than once.", op_type); @@ -70,19 +83,6 @@ class OpRegistry { static std::unique_ptr CreateGradOp(const OperatorBase& op); }; -class Registrar { - public: - // In our design, various kinds of classes, e.g., operators and kernels, - // have their corresponding registry and registrar. The action of - // registration is in the constructor of a global registrar variable, which, - // however, are not used in the code that calls package framework, and would - // be removed from the generated binary file by the linker. To avoid such - // removal, we add Touch to all registrar classes and make USE_OP macros to - // call this method. So, as long as the callee code calls USE_OP, the global - // registrar variable won't be removed by the linker. - void Touch() {} -}; - template class OpRegistrar : public Registrar { public: @@ -138,33 +138,43 @@ class OpKernelRegistrar : public Registrar { __test_global_namespace_##uniq_name##__>::value, \ msg) +#define VA_ARGS(...) , ##__VA_ARGS__ + +#define REGISTER_OPERATOR(op_type, op_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op__##op_type, \ + "REGISTER_OPERATOR must be called in global namespace"); \ + class _OpClass_##op_type##_ : public op_class { \ + public: \ + DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ + DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ + }; \ + static ::paddle::framework::OperatorRegistrar<_OpClass_##op_type##_ VA_ARGS( \ + __VA_ARGS__)> \ + __op_registrar_##op_type##__(#op_type); \ + int TouchOpRegistrar_##op_type() { \ + __op_registrar_##op_type##__.Touch(); \ + return 0; \ + } + /** * Macro to register Operator. */ -#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ - grad_op_class) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \ - class _OpClass_##op_type##_ : public op_class { \ - public: \ - DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ - DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ - }; \ - class _OpGradClass_##op_type##_ : public grad_op_class { \ - public: \ - DEFINE_OP_CLONE_METHOD(_OpGradClass_##op_type##_); \ - DEFINE_OP_CONSTRUCTOR(_OpGradClass_##op_type##_, grad_op_class); \ - }; \ - static ::paddle::framework::OpRegistrar< \ - _OpClass_##op_type##_, op_maker_class, _OpGradClass_##op_type##_> \ - __op_registrar_##op_type##__(#op_type, #grad_op_type); \ - int TouchOpRegistrar_##op_type() { \ - __op_registrar_##op_type##__.Touch(); \ - return 0; \ - } +#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ + grad_op_class) \ + REGISTER_OPERATOR(grad_op_type, grad_op_class); \ + class _GradOpDescMaker_##grad_op_type##_ \ + : public ::paddle::framework::DefaultGradOpDescMaker { \ + using ::paddle::framework::DefaultGradOpDescMaker::DefaultGradOpDescMaker; \ + \ + protected: \ + virtual std::string GradOpType() const { return #grad_op_type; } \ + }; \ + REGISTER_OPERATOR(op_type, op_class, _GradOpDescMaker_##grad_op_type##_, \ + op_maker_class) #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ - REGISTER_OP(op_type, op_class, op_maker_class, , ::paddle::framework::NOP) + REGISTER_OPERATOR(op_type, op_class, op_maker_class) /** * Macro to register OperatorKernel. From d64bedf638d66cc4fedb63bcfd389a1058359798 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 30 Sep 2017 16:44:16 -0700 Subject: [PATCH 116/342] Stash --- paddle/framework/backward.cc | 3 + paddle/framework/backward_test.cc | 31 ++-- paddle/framework/grad_op_builder.cc | 97 ----------- paddle/framework/grad_op_builder.h | 28 ---- paddle/framework/grad_op_builder_test.cc | 201 ----------------------- paddle/framework/op_desc.h | 12 ++ paddle/framework/op_registry.cc | 5 - paddle/framework/op_registry.h | 2 - 8 files changed, 25 insertions(+), 354 deletions(-) delete mode 100644 paddle/framework/grad_op_builder.cc delete mode 100644 paddle/framework/grad_op_builder.h delete mode 100644 paddle/framework/grad_op_builder_test.cc diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0ec18de5b8..ab2567a25c 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -154,6 +154,9 @@ static std::unique_ptr BackwardRecursive( net->InsertOp(pos.first + 1, std::move(pos.second)); } } else { + OpDescBind fwd_desc; + fwd_desc.SetInput(forwardOp.Inputs()); + std::unique_ptr grad_op(OpRegistry::CreateGradOp(forwardOp)); ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 6932f5b989..28fc6f9ced 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -159,16 +159,16 @@ REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); -TEST(Backward, simple_op_grad) { - auto fwd = f::OpRegistry::CreateOp( - "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); - ASSERT_NE(fwd, nullptr); - auto gop = f::OpRegistry::CreateGradOp(*fwd); - ASSERT_EQ(1UL, gop->Inputs().size()); - ASSERT_EQ("rowwise_add_grad", gop->Type()); - ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X"))); - ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b"))); -} +// TEST(Backward, simple_op_grad) { +// auto fwd = f::OpRegistry::CreateOp( +// "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); +// ASSERT_NE(fwd, nullptr); +// auto gop = f::OpRegistry::CreateGradOp(*fwd); +// ASSERT_EQ(1UL, gop->Inputs().size()); +// ASSERT_EQ("rowwise_add_grad", gop->Type()); +// ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X"))); +// ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b"))); +//} TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp( @@ -286,17 +286,6 @@ TEST(Backward, net_shared_weight) { ASSERT_EQ("add", bwd_net->ops_[2]->Type()); } -TEST(Backward, op_register_grad_not_for_network) { - auto fwd = - f::OpRegistry::CreateOp("fc", {{"X", {"x"}}, {"W", {"w"}}, {"b", {"b"}}}, - {{"mul_result", {"mul_out"}}, - {"add_result", {"add_out"}}, - {"Out", {"out1"}}}, - {{"temporary_index", std::vector{0, 1}}}); - - ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); -} - TEST(Backward, op_all_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp( "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc deleted file mode 100644 index 3661ce41be..0000000000 --- a/paddle/framework/grad_op_builder.cc +++ /dev/null @@ -1,97 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOpArgType::OUT WARRANTIES OR CONDITIONS OF ANY KOpArgType::IND, either -express or implied. See the License for the specific language governing -permissions and limitations under the License. */ - -#include "paddle/framework/grad_op_builder.h" -#include "paddle/framework/op_registry.h" - -namespace paddle { -namespace framework { -enum class OpArgType { IN, OUT }; - -static void TransOpArg(const OperatorBase* src_op, const OpArgType& src_type, - bool is_grad, VariableNameMap* vars) { - const auto& src_inout = - src_type == OpArgType::IN ? src_op->Inputs() : src_op->Outputs(); - auto& dst_inout = *vars; - auto& proto = OpInfoMap::Instance().Get(src_op->Type()).Proto(); - const auto& src_arg_list = - src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); - for (const auto& arg : src_arg_list) { - if (arg.not_in_gradient() && !is_grad) continue; - const std::string src_name = arg.name(); - std::string dst_name = is_grad ? GradVarName(src_name) : src_name; - dst_inout[dst_name].reserve(src_inout.at(src_name).size()); - for (auto& var_name : src_inout.at(src_name)) { - std::string s = is_grad ? GradVarName(var_name) : var_name; - dst_inout[dst_name].emplace_back(s); - } - } -} - -OperatorBase* BuildGradOp(const OperatorBase* op) { - auto& info = OpInfoMap::Instance().Get(op->Type()); - PADDLE_ENFORCE(info.HasGradientOp()); - - VariableNameMap inputs; - VariableNameMap outputs; - TransOpArg(op, OpArgType::IN, false, &inputs); // I - TransOpArg(op, OpArgType::OUT, false, &inputs); // O - TransOpArg(op, OpArgType::OUT, true, &inputs); // OG - TransOpArg(op, OpArgType::IN, true, &outputs); // IG - - auto& grad_info = OpInfoMap::Instance().Get(info.grad_op_type_); - return grad_info.Creator()(info.grad_op_type_, inputs, outputs, op->Attrs()); -} - -static void TransOpDescArg(const OpDescBind* src_op, const OpArgType& src_type, - bool is_grad, OpDescBind* dst_op, - const OpArgType& dst_type) { - PADDLE_ENFORCE(dst_op != nullptr, - "Protobuf desc of gradient op must be initialized first."); - const auto& proto = OpInfoMap::Instance().Get(src_op->Type()).Proto(); - const auto& src_arg_list = - src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); - for (const auto& arg : src_arg_list) { - if (arg.not_in_gradient() && !is_grad) continue; - const std::string src_name = arg.name(); - std::vector vars = src_type == OpArgType::IN - ? src_op->Input(src_name) - : src_op->Output(src_name); - if (is_grad) { - for (std::string& var : vars) { - var = GradVarName(var); - } - } - std::string dst_name = is_grad ? GradVarName(src_name) : src_name; - dst_type == OpArgType::IN ? dst_op->SetInput(dst_name, vars) - : dst_op->SetOutput(dst_name, vars); - } -} - -void CompleteGradOpDesc(const OpDescBind* forw_op, OpDescBind* grad_op) { - auto& info = OpInfoMap::Instance().Get(forw_op->Type()); - PADDLE_ENFORCE(info.HasGradientOp()); - - grad_op->SetType(info.grad_op_type_); - - TransOpDescArg(forw_op, OpArgType::IN, false, grad_op, OpArgType::IN); - TransOpDescArg(forw_op, OpArgType::OUT, false, grad_op, OpArgType::IN); - TransOpDescArg(forw_op, OpArgType::OUT, true, grad_op, OpArgType::IN); - TransOpDescArg(forw_op, OpArgType::IN, true, grad_op, OpArgType::OUT); - - grad_op->SetAttrMap(forw_op->GetAttrMap()); -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/grad_op_builder.h b/paddle/framework/grad_op_builder.h deleted file mode 100644 index b601406061..0000000000 --- a/paddle/framework/grad_op_builder.h +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include "paddle/framework/op_desc.h" -#include "paddle/framework/operator.h" - -namespace paddle { -namespace framework { - -OperatorBase* BuildGradOp(const OperatorBase* op); - -void CompleteGradOpDesc(const OpDescBind* forw_op, OpDescBind* grad_op); - -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc deleted file mode 100644 index d09892f81b..0000000000 --- a/paddle/framework/grad_op_builder_test.cc +++ /dev/null @@ -1,201 +0,0 @@ -#include "paddle/framework/grad_op_builder.h" -#include -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" - -USE_OP(add); - -namespace paddle { -namespace framework { - -class MutiInOutOpMaker : public OpProtoAndCheckerMaker { - public: - MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("In1", "a single input"); - AddInput("In2_mult", "a multiple input").AsDuplicable(); - AddInput("In3", "another single input"); - AddOutput("Out1", "a single output"); - AddOutput("Out2_mult", "a multiple output").AsDuplicable(); - AddComment("test op with multiple inputs and outputs"); - } -}; - -class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { - public: - IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("In1", "a single input"); - AddInput("In2_mult", "a multiple input").AsDuplicable().NotInGradient(); - AddInput("In3_mult", "another multiple input").AsDuplicable(); - AddOutput("Out1_mult", "a multiple output").AsDuplicable(); - AddOutput("Out2", "a single output").NotInGradient(); - AddComment("op with inputs and outputs ignored in gradient calculating"); - } -}; - -} // namespace framework -} // namespace paddle - -namespace f = paddle::framework; - -TEST(GradOpBuilder, AddTwo) { - std::shared_ptr add_op(f::OpRegistry::CreateOp( - "add", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); - std::shared_ptr grad_add_op = - f::OpRegistry::CreateGradOp(*add_op); - EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); - EXPECT_EQ(grad_add_op->Outputs().size(), 2UL); - EXPECT_EQ(grad_add_op->Input("X"), "x"); - EXPECT_EQ(grad_add_op->Input("Y"), "y"); - EXPECT_EQ(grad_add_op->Input("Out"), "out"); - EXPECT_EQ(grad_add_op->Input(f::GradVarName("Out")), f::GradVarName("out")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("X")), f::GradVarName("x")); - EXPECT_EQ(grad_add_op->Output(f::GradVarName("Y")), f::GradVarName("y")); -} - -REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); -REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP); - -TEST(GradOpBuilder, MutiInOut) { - std::shared_ptr test_op(f::OpRegistry::CreateOp( - "mult_io", {{"In1", {"in1"}}, - {"In2_mult", {"in2_1", "in2_2", "in2_3"}}, - {"In3", {"in3"}}}, - {{"Out1", {"out1"}}, {"Out2_mult", {"out2_1", "out2_2"}}}, {})); - std::shared_ptr grad_test_op = - f::OpRegistry::CreateGradOp(*test_op); - - ASSERT_EQ(grad_test_op->Inputs().size(), 3UL + 2UL + 2UL); - EXPECT_EQ(grad_test_op->Input("In1"), "in1"); - EXPECT_EQ(grad_test_op->Inputs("In2_mult"), - std::vector({"in2_1", "in2_2", "in2_3"})); - EXPECT_EQ(grad_test_op->Input("In3"), "in3"); - EXPECT_EQ(grad_test_op->Input("Out1"), "out1"); - EXPECT_EQ(grad_test_op->Inputs("Out2_mult"), - std::vector({"out2_1", "out2_2"})); - EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out1")), - f::GradVarName("out1")); - EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out2_mult")), - std::vector( - {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); - - ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); - EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); - EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), - std::vector({f::GradVarName("in2_1"), - f::GradVarName("in2_2"), - f::GradVarName("in2_3")})); - EXPECT_EQ(grad_test_op->Output(f::GradVarName("In3")), f::GradVarName("in3")); -} - -TEST(GradOpBuilder, IOIgnoredInGradient) { - std::shared_ptr test_op(f::OpRegistry::CreateOp( - "io_ignored", {{"In1", {"in1"}}, - {"In2_mult", {"in2_1", "in2_2"}}, - {"In3_mult", {"in3_1", "in3_2"}}}, - {{"Out1_mult", {"out1_1", "out1_2"}}, {"Out2", {"out2"}}}, {})); - std::shared_ptr grad_test_op = - f::OpRegistry::CreateGradOp(*test_op); - - // 'In2' and 'Out2' are ignored in gradient calculating - ASSERT_EQ(grad_test_op->Inputs().size(), 2UL + 1UL + 2UL); - EXPECT_EQ(grad_test_op->Input("In1"), "in1"); - EXPECT_EQ(grad_test_op->Inputs("In3_mult"), - std::vector({"in3_1", "in3_2"})); - EXPECT_EQ(grad_test_op->Inputs("Out1_mult"), - std::vector({"out1_1", "out1_2"})); - EXPECT_EQ(grad_test_op->Inputs(f::GradVarName("Out1_mult")), - std::vector( - {f::GradVarName("out1_1"), f::GradVarName("out1_2")})); - EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")), - f::GradVarName("out2")); - - ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); - EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); - EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), - std::vector( - {f::GradVarName("in2_1"), f::GradVarName("in2_2")})); - EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In3_mult")), - std::vector( - {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); -} - -TEST(GradOpDescBuilder, MutiInOut) { - f::OpDescBind *forw_op = new f::OpDescBind(); - forw_op->SetType("mult_io"); - forw_op->SetInput("In1", {"in1"}); - forw_op->SetInput("In2_mult", {"in2_1", "in2_2", "in2_3"}); - forw_op->SetInput("In3", {"in3"}); - forw_op->SetOutput("Out1", {"out1"}); - forw_op->SetOutput("Out2_mult", {"out2_1", "out2_2"}); - - f::OpDescBind *grad_op = new f::OpDescBind(); - f::CompleteGradOpDesc(forw_op, grad_op); - - EXPECT_EQ(grad_op->Type(), "mult_io_grad"); - ASSERT_EQ(grad_op->InputNames().size(), 3UL + 2UL + 2UL); - EXPECT_EQ(grad_op->Input("In1"), std::vector({"in1"})); - EXPECT_EQ(grad_op->Input("In2_mult"), - std::vector({"in2_1", "in2_2", "in2_3"})); - EXPECT_EQ(grad_op->Input("In3"), std::vector({"in3"})); - EXPECT_EQ(grad_op->Input("Out1"), std::vector({"out1"})); - EXPECT_EQ(grad_op->Input("Out2_mult"), - std::vector({"out2_1", "out2_2"})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out1")), - std::vector({f::GradVarName("out1")})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out2_mult")), - std::vector( - {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); - - ASSERT_EQ(grad_op->OutputNames().size(), 3UL); - EXPECT_EQ(grad_op->Output(f::GradVarName("In1")), - std::vector({f::GradVarName("in1")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In2_mult")), - std::vector({f::GradVarName("in2_1"), - f::GradVarName("in2_2"), - f::GradVarName("in2_3")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In3")), - std::vector({f::GradVarName("in3")})); - delete forw_op; - delete grad_op; -} - -TEST(GradOpDescBuilder, IOIgnoredInGradient) { - f::OpDescBind *forw_op = new f::OpDescBind(); - forw_op->SetType("io_ignored"); - forw_op->SetInput("In1", {"in1"}); - forw_op->SetInput("In2_mult", {"in2_1", "in2_2"}); - forw_op->SetInput("In3_mult", {"in3_1", "in3_2"}); - forw_op->SetOutput("Out1_mult", {"out1_1", "out1_2"}); - forw_op->SetOutput("Out2", {"out2"}); - - f::OpDescBind *grad_op = new f::OpDescBind(); - f::CompleteGradOpDesc(forw_op, grad_op); - - EXPECT_EQ(grad_op->Type(), "io_ignored_grad"); - // 'In2' and 'Out2' are ignored in gradient calculating - ASSERT_EQ(grad_op->InputNames().size(), 2UL + 1UL + 2UL); - EXPECT_EQ(grad_op->Input("In1"), std::vector({"in1"})); - EXPECT_EQ(grad_op->Input("In3_mult"), - std::vector({"in3_1", "in3_2"})); - EXPECT_EQ(grad_op->Input("Out1_mult"), - std::vector({"out1_1", "out1_2"})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out1_mult")), - std::vector( - {f::GradVarName("out1_1"), f::GradVarName("out1_2")})); - EXPECT_EQ(grad_op->Input(f::GradVarName("Out2")), - std::vector({f::GradVarName("out2")})); - - ASSERT_EQ(grad_op->OutputNames().size(), 3UL); - EXPECT_EQ(grad_op->Output(f::GradVarName("In1")), - std::vector({f::GradVarName("in1")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In2_mult")), - std::vector( - {f::GradVarName("in2_1"), f::GradVarName("in2_2")})); - EXPECT_EQ(grad_op->Output(f::GradVarName("In3_mult")), - std::vector( - {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); - delete forw_op; - delete grad_op; -} \ No newline at end of file diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 851a305061..4b001fb964 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -74,6 +74,18 @@ class OpDescBind { return MapKeys(outputs_); } + void SetInput( + const std::unordered_map> &input) { + this->inputs_ = input; + this->need_update_ = true; + } + + void SetOutput( + const std::unordered_map> &output) { + this->outputs_ = output; + this->need_update_ = true; + } + private: template static std::vector MapKeys(const MapType &map) { diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index b0e85dd49f..0a2b6fd582 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -52,10 +52,5 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { return CreateOp(op_desc.type(), inputs, outputs, attrs); } -std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { - PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops"); - return std::unique_ptr(BuildGradOp(&op)); -} - } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 7db095369e..0f377f34cb 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -79,8 +79,6 @@ class OpRegistry { AttributeMap attrs); static std::unique_ptr CreateOp(const OpDesc& op_desc); - - static std::unique_ptr CreateGradOp(const OperatorBase& op); }; template From 09500917eee2f3f991b1f92acbb4738d3ea5dba2 Mon Sep 17 00:00:00 2001 From: qijun Date: Sat, 30 Sep 2017 16:44:55 -0700 Subject: [PATCH 117/342] pass place to GetCUDADeviceContext --- paddle/framework/executor.cc | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 52963d20f0..74153f2449 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -86,16 +86,16 @@ std::unique_ptr make_unique(Args&&... args) { return std::unique_ptr(new T(std::forward(args)...)); } -platform::CPUDeviceContext* GetCPUDeviceContext() { +platform::CPUDeviceContext* GetCPUDeviceContext(platform::CPUPlace& place) { static std::unique_ptr g_cpu_device_context = - make_unique(platform::CPUPlace()); + make_unique(place); return g_cpu_device_context.get(); } #ifndef PADDLE_ONLY_CPU -platform::CUDADeviceContext* GetCUDADeviceContext() { +platform::CUDADeviceContext* GetCUDADeviceContext(platform::GPUPlace& place) { static std::unique_ptr g_cuda_device_context = - make_unique(platform::GPUPlace(0)); + make_unique(place); return g_cuda_device_context.get(); } #endif @@ -110,10 +110,12 @@ Executor* NewLocalExecutor(const platform::Place& place, const ProgramDesc& pdesc, bool is_linear) { platform::DeviceContext* device_context = nullptr; if (platform::is_cpu_place(place)) { - device_context = GetCPUDeviceContext(); + auto cpu_place = boost::get(place); + device_context = GetCPUDeviceContext(cpu_place); } else if (platform::is_gpu_place(place)) { #ifndef PADDLE_ONLY_CPU - device_context = GetCUDADeviceContext(); + auto gpu_place = boost::get(place); + device_context = GetCUDADeviceContext(gpu_place); } #else PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); From 8fd845e0fab40acc9539c4109feb3ed411f4dc8b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 30 Sep 2017 16:55:40 -0700 Subject: [PATCH 118/342] Unify Map in OpDescBind --- paddle/framework/op_desc.cc | 27 ++++++++++++++++++++++++++- paddle/framework/op_desc.h | 37 ++++++------------------------------- paddle/platform/enforce.h | 4 ++-- 3 files changed, 34 insertions(+), 34 deletions(-) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 0c12c55dc0..33a064890c 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -112,6 +112,30 @@ const std::unordered_map &OpDescBind::GetAttrMap() return attrs_; } +struct SetAttrDescVisitor : public boost::static_visitor { + explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} + mutable OpDesc::Attr *attr_; + void operator()(int v) const { attr_->set_i(v); } + void operator()(float v) const { attr_->set_f(v); } + void operator()(const std::string &v) const { attr_->set_s(v); } + void operator()(bool b) const { attr_->set_b(b); } + + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_ints()); + } + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_floats()); + } + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_strings()); + } + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_bools()); + } + void operator()(BlockDesc *desc) const { attr_->set_block_idx(desc->idx()); } + void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } +}; + void OpDescBind::Sync() { if (need_update_) { this->op_desc_.mutable_inputs()->Clear(); @@ -134,7 +158,8 @@ void OpDescBind::Sync() { attr_desc->set_name(attr.first); attr_desc->set_type( static_cast(attr.second.which() - 1)); - boost::apply_visitor(SetAttrDescVisitor(attr_desc), attr.second); + SetAttrDescVisitor visitor(attr_desc); + boost::apply_visitor(visitor, attr.second); } need_update_ = false; diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 0cf7d13971..e03b4d067f 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "paddle/framework/attribute.h" +#include "paddle/framework/op_info.h" #include "paddle/framework/var_desc.h" namespace paddle { @@ -61,48 +62,22 @@ class OpDescBind { void SetBlockAttr(const std::string &name, BlockDescBind &block); // Only be used in C++ - void SetAttrMap(const std::unordered_map &attr_map); + void SetAttrMap(const AttributeMap &attr_map); Attribute GetAttr(const std::string &name) const; int GetBlockAttr(const std::string &name) const; // Only be used in C++ - const std::unordered_map &GetAttrMap() const; + const AttributeMap &GetAttrMap() const; private: - struct SetAttrDescVisitor : public boost::static_visitor { - explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} - mutable OpDesc::Attr *attr_; - void operator()(int v) const { attr_->set_i(v); } - void operator()(float v) const { attr_->set_f(v); } - void operator()(const std::string &v) const { attr_->set_s(v); } - void operator()(bool b) const { attr_->set_b(b); } - - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_ints()); - } - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_floats()); - } - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_strings()); - } - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_bools()); - } - void operator()(BlockDesc *desc) const { - attr_->set_block_idx(desc->idx()); - } - void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } - }; - void Sync(); OpDesc op_desc_; - std::unordered_map> inputs_; - std::unordered_map> outputs_; - std::unordered_map attrs_; + VariableNameMap inputs_; + VariableNameMap outputs_; + AttributeMap attrs_; // need_update_ indicate there some local changes not be synchronized. If // local changes should be synchronized, need_update_ should be set to true. diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index b523ef03c0..52bd23039b 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -185,7 +185,7 @@ inline void throw_on_error(T e) { std::make_exception_ptr( \ std::runtime_error(paddle::string::Sprintf(__VA_ARGS__))), \ __FILE__, __LINE__); \ - } while (0) + } while (false) #define PADDLE_ENFORCE(...) \ do { \ @@ -195,7 +195,7 @@ inline void throw_on_error(T e) { throw ::paddle::platform::EnforceNotMet(std::current_exception(), \ __FILE__, __LINE__); \ } \ - } while (0) + } while (false) /* * Some enforce helpers here, usage: From 87efa600df84db136ae743dabd294e54b2d1805a Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Sat, 30 Sep 2017 23:16:11 -0700 Subject: [PATCH 119/342] add some check to operator.run (#4544) * fix cond_op_test and add some check to operator.run * tmp * optimize kernel check --- paddle/framework/operator.cc | 7 +++++++ paddle/framework/operator.h | 25 ++++++++++++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 8b5560ffa1..1012a30b0a 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -245,5 +245,12 @@ std::vector InferShapeContext::MultiOutput( return res; } +std::ostream& operator<<(std::ostream& os, + const OperatorWithKernel::OpKernelKey& kernel_key) { + os << "place[" << kernel_key.place_ << "]:data_type[" << kernel_key.data_type_ + << "]"; + return os; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 0af527c88c..73e53a4176 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -478,9 +478,25 @@ class OperatorWithKernel : public OperatorBase { this->InferShape(&infer_shape_ctx); ExecutionContext ctx(*this, scope, dev_ctx); - auto& opKernel = AllOpKernels().at(type_).at( - OpKernelKey(IndicateDataType(ctx), dev_ctx)); - opKernel->Compute(ctx); + + // check if op[type] has kernel registered. + auto& all_op_kernels = AllOpKernels(); + auto kernels_iter = all_op_kernels.find(type_); + if (kernels_iter == all_op_kernels.end()) { + PADDLE_THROW("op[%s] has no kernel", type_); + } + + // check if op[type] have kernel for kernel_key + OpKernelMap& kernels = kernels_iter->second; + auto kernel_key = OpKernelKey(IndicateDataType(ctx), dev_ctx); + auto kernel_iter = kernels.find(kernel_key); + + if (kernel_iter == kernels.end()) { + PADDLE_THROW("op[%s] has no kernel with kernel_key[%s]", type_, + kernel_key); + } + + kernel_iter->second->Compute(ctx); } static std::unordered_map& @@ -529,5 +545,8 @@ class OperatorWithKernel : public OperatorBase { } }; +std::ostream& operator<<(std::ostream& os, + const OperatorWithKernel::OpKernelKey& kernel_key); + } // namespace framework } // namespace paddle From 3e99b166ba147b8d954332a9be882bee25ca6591 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Sun, 1 Oct 2017 08:29:09 +0000 Subject: [PATCH 120/342] add generic add operator --- paddle/framework/backward.cc | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0ec18de5b8..c625c0caf7 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -141,9 +141,35 @@ static std::unique_ptr BackwardRecursive( net->ops_[op_offset]->Rename(name, dup_outputs.back()); } // collect all the offset to append `add` op for each alias - insert_position.push_back( - {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}}, - {{"Out", {name}}}, {})}); + // + // one variable is shared between multiple operators. + // insert add operator one by one, then add it to output + if (dup_outputs.size() == 2) { + insert_position.push_back( + {dup_op.back(), + OpRegistry::CreateOp( + "add", {{"X", {dup_outputs[0]}}, {"Y", {dup_outputs[1]}}}, + {{"Out", {name}}}, {})}); + } else { + for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; + ++output_idx) { + auto insert_add_x = dup_outputs[output_idx]; + auto insert_add_y = dup_outputs[output_idx]; + auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); + // first add op inserted + if (output_idx == dup_outputs.size() - 1) { + insert_add_out = name; + } + if (output_idx != 0) { + insert_add_y = name + "@SHARED@" + std::to_string(output_idx); + } + insert_position.push_back( + {dup_op.back(), + OpRegistry::CreateOp( + "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, + {{"Out", {insert_add_out}}}, {})}); + } + } } // make sure the inserted `add` ops follow the BFS order. From c08635898f3a57cdf19c45f12b1aac28d864c73e Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Sun, 1 Oct 2017 08:36:04 +0000 Subject: [PATCH 121/342] fix typo --- paddle/framework/backward.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c625c0caf7..b850939040 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -161,7 +161,7 @@ static std::unique_ptr BackwardRecursive( insert_add_out = name; } if (output_idx != 0) { - insert_add_y = name + "@SHARED@" + std::to_string(output_idx); + insert_add_y = name + "@SHARED@" + std::to_string(output_idx-1); } insert_position.push_back( {dup_op.back(), From e9a9dd6d4d5fb6428917cffb678cfa0b582fc018 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Sun, 1 Oct 2017 09:18:21 -0700 Subject: [PATCH 122/342] relauch ci --- paddle/framework/backward.cc | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index b850939040..fbacfeed94 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -144,31 +144,23 @@ static std::unique_ptr BackwardRecursive( // // one variable is shared between multiple operators. // insert add operator one by one, then add it to output - if (dup_outputs.size() == 2) { + for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; + ++output_idx) { + auto insert_add_x = dup_outputs[output_idx]; + auto insert_add_y = dup_outputs[output_idx]; + auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); + // first add op inserted + if (output_idx == dup_outputs.size() - 2) { + insert_add_out = name; + } + if (output_idx != 0) { + insert_add_y = name + "@SHARED@" + std::to_string(output_idx - 1); + } insert_position.push_back( {dup_op.back(), OpRegistry::CreateOp( - "add", {{"X", {dup_outputs[0]}}, {"Y", {dup_outputs[1]}}}, - {{"Out", {name}}}, {})}); - } else { - for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; - ++output_idx) { - auto insert_add_x = dup_outputs[output_idx]; - auto insert_add_y = dup_outputs[output_idx]; - auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); - // first add op inserted - if (output_idx == dup_outputs.size() - 1) { - insert_add_out = name; - } - if (output_idx != 0) { - insert_add_y = name + "@SHARED@" + std::to_string(output_idx-1); - } - insert_position.push_back( - {dup_op.back(), - OpRegistry::CreateOp( - "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, - {{"Out", {insert_add_out}}}, {})}); - } + "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, + {{"Out", {insert_add_out}}}, {})}); } } From 7163dd0413d5b99261ff95e0fab28a09f8abb74a Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 1 Oct 2017 09:27:44 -0700 Subject: [PATCH 123/342] revert code --- paddle/operators/recurrent_op.cc | 41 +++++++++++++++++++ paddle/operators/recurrent_op.h | 19 +++++++++ .../v2/framework/tests/test_recurrent_op.py | 3 -- 3 files changed, 60 insertions(+), 3 deletions(-) diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 80de229c33..b9fba3e135 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -28,6 +28,29 @@ using Variable = framework::Variable; using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; +void RecurrentAlgorithm::InferShape(const Scope& scope) const { + auto* input0 = scope.FindVar(arg_->inlinks[0]); + PADDLE_ENFORCE_NOT_NULL(input0); + seq_len_ = input0->GetMutable()->dims()[0]; + PADDLE_ENFORCE_GT(seq_len_, 0); + + CreateScopes(scope); + auto& step_scopes = GetStepScopes(scope); + rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, + true /*infer_shape_mode*/); + InitMemories(step_scopes[0], true /*infer_shape_mode*/); + + for (size_t i = 0; i < seq_len_; i++) { + if (i > 0) { + rnn::LinkMemories(step_scopes, arg_->memories, i, -1, + true /*infer_shape_mode*/); + } + (*stepnet_)->InferShape(*step_scopes[i]); + } + rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, + true /*infer_shape_mode*/); +} + void RecurrentAlgorithm::Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const { auto step_scopes = GetStepScopes(scope); @@ -179,6 +202,24 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( } } +void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { + seq_len_ = + scope.FindVar(arg_->inlinks[0])->GetMutable()->dims()[0]; + auto step_scopes = GetStepScopes(scope); + rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, + true /*infer_shape_mode*/); + for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { + if (static_cast(step_id) != seq_len_ - 1) { + rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, + true /*infer_shape_mode*/); + } + (*stepnet_)->InferShape(*step_scopes[step_id]); + } + rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, + true /*infer_shape_mode*/); + LinkBootMemoryGradients(step_scopes[0], true /*infer_shape_mode*/); +} + RecurrentGradientOp::RecurrentGradientOp( const std::string& type, const framework::VariableNameMap& inputs, const framework::VariableNameMap& outputs, diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index c6b9a5533e..18f8c53e18 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -41,6 +41,11 @@ class RecurrentAlgorithm { stepnet_ = stepnet; } + /** + * InferShape must be called before Run. + */ + void InferShape(const framework::Scope& scope) const; + protected: /* * The step scopes will be stored in the father scope as a variable. @@ -89,6 +94,11 @@ class RecurrentGradientAlgorithm { void LinkBootMemoryGradients(framework::Scope* step_scopes, bool infer_shape_mode) const; + /** + * InferShape must be called before Run. + */ + void InferShape(const framework::Scope& scope) const; + protected: inline const std::vector& GetStepScopes( const framework::Scope& scope) const { @@ -123,8 +133,13 @@ class RecurrentOp : public framework::OperatorBase { void set_stepnet(std::unique_ptr net) { stepnet_ = std::move(net); } + const OperatorBase& stepnet() const { return *stepnet_; } + void InferShape(const framework::Scope& scope) const { + alg_.InferShape(scope); + } + static const rnn::ArgumentName kArgName; private: @@ -147,6 +162,10 @@ class RecurrentGradientOp : public framework::OperatorBase { PADDLE_THROW("Not Implemented"); } + void InferShape(const framework::Scope& scope) const { + alg_.InferShape(scope); + } + void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const override { alg_.Run(scope, dev_ctx); diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 92161ae5dd..6b9e7a88ce 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -197,7 +197,4 @@ class RecurrentGradientOpTest(unittest.TestCase): if __name__ == '__main__': - exit( - 0 - ) # FIXME(yuyang18): InferShape has been removed, this unittest may error unittest.main() From d25766e571f0d08c3730c4a36090772c9342fdb7 Mon Sep 17 00:00:00 2001 From: kexinzhao <19hskevin87@gmail.com> Date: Sun, 1 Oct 2017 09:36:02 -0700 Subject: [PATCH 124/342] Fix typo in new_op_cn.md --- doc/howto/dev/new_op_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 264b998f50..9c5c90ca03 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -206,7 +206,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, - `REGISTER_OP` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad`。 - `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op。 - - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulKernel`类。 + - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulGradKernel`类。 - 在 `.cu`文件中注册GPU Kernel。 From 3851fa4d8b15d0876c40c3fadc3dd54b87b5e42b Mon Sep 17 00:00:00 2001 From: kexinzhao <19hskevin87@gmail.com> Date: Sun, 1 Oct 2017 09:40:49 -0700 Subject: [PATCH 125/342] Update new_op_en.md --- doc/howto/dev/new_op_en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/dev/new_op_en.md b/doc/howto/dev/new_op_en.md index 60681cdd71..56ed22bc34 100644 --- a/doc/howto/dev/new_op_en.md +++ b/doc/howto/dev/new_op_en.md @@ -205,7 +205,7 @@ The definition of its corresponding backward operator, if applicable, is similar - `REGISTER_OP` registers the `ops::MulOp` class, type named `mul`, its type `ProtoMaker` is `ops::MulOpMaker`, registering `ops::MulOpGrad` as `mul_grad`. - `REGISTER_OP_WITHOUT_GRADIENT` registers an operator without gradient. - - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specialized template types `paddle::platform::CPUPlace` and `float`, which also registers `ops::MulKernel`. + - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specialized template types `paddle::platform::CPUPlace` and `float`, which also registers `ops::MulGradKernel`. - Registering GPU Kernel in `.cu` files From 5423cb3e57949fc2885e39016422bf92b70b5260 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Sun, 1 Oct 2017 09:54:08 -0700 Subject: [PATCH 126/342] format --- paddle/framework/block_desc.h | 6 +++--- paddle/framework/op_info.h | 8 +++----- paddle/framework/program_desc.h | 6 +++--- paddle/framework/scope.h | 8 +++----- paddle/platform/macros.h | 10 ++++++---- 5 files changed, 18 insertions(+), 20 deletions(-) diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 1a1135bab4..59513ede33 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include "paddle/framework/op_desc.h" #include "paddle/framework/var_desc.h" +#include "paddle/platform/macros.h" namespace paddle { namespace framework { @@ -34,9 +35,6 @@ class BlockDescBind { BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} - BlockDescBind(const BlockDescBind &o) = delete; - BlockDescBind &operator=(const BlockDescBind &o) = delete; - int32_t ID() const { return desc_->idx(); } int32_t Parent() const { return desc_->parent_idx(); } @@ -66,6 +64,8 @@ class BlockDescBind { std::deque> ops_; std::unordered_map> vars_; + + DISABLE_COPY_AND_ASSIGN(BlockDescBind); }; } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 6d1ee4dece..5df3093318 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -19,6 +19,7 @@ #include #include "paddle/framework/attribute.h" #include "paddle/framework/op_desc.h" +#include "paddle/platform/macros.h" namespace paddle { namespace framework { @@ -72,11 +73,6 @@ class OpInfoMap { public: static OpInfoMap& Instance(); - OpInfoMap(const OpInfoMap& o) = delete; - OpInfoMap(OpInfoMap&& o) = delete; - OpInfoMap& operator=(const OpInfoMap& o) = delete; - OpInfoMap& operator=(OpInfoMap&& o) = delete; - bool Has(const std::string& op_type) const { return map_.find(op_type) != map_.end(); } @@ -112,6 +108,8 @@ class OpInfoMap { private: OpInfoMap() = default; std::unordered_map map_; + + DISABLE_COPY_AND_ASSIGN(OpInfoMap); }; } // namespace framework diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index 06ffcd4b15..9b34a06aef 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include "paddle/framework/framework.pb.h" +#include "paddle/platform/macros.h" namespace paddle { namespace framework { @@ -26,9 +27,6 @@ class ProgramDescBind { public: static ProgramDescBind &Instance(ProgramDesc *prog); - ProgramDescBind(const ProgramDescBind &o) = delete; - ProgramDescBind &operator=(const ProgramDescBind &o) = delete; - BlockDescBind *AppendBlock(const BlockDescBind &parent); BlockDescBind *Block(size_t idx) { return blocks_[idx].get(); } @@ -46,6 +44,8 @@ class ProgramDescBind { ProgramDesc *prog_; std::vector> blocks_; + + DISABLE_COPY_AND_ASSIGN(ProgramDescBind); }; } // namespace framework } // namespace paddle diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index c93b03e481..7047f0d55e 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include "paddle/framework/variable.h" +#include "paddle/platform/macros.h" namespace paddle { namespace framework { @@ -38,11 +39,6 @@ class Scope { Scope() {} ~Scope(); - // Disable Copy, Assign, Move. - Scope(const Scope& other) = delete; - Scope& operator=(const Scope& other) = delete; - Scope(Scope&& other) = delete; - /// Create a sub-scope. Returns a reference other than a pointer so /// to prevent from manual deletion. /// Mark it to const because that new kid scope cannot change parent scope. @@ -73,6 +69,8 @@ class Scope { std::unordered_map vars_; mutable std::list kids_; Scope const* parent_{nullptr}; + + DISABLE_COPY_AND_ASSIGN(Scope); }; } // namespace framework diff --git a/paddle/platform/macros.h b/paddle/platform/macros.h index 4a04a38c0c..feae7bdd77 100644 --- a/paddle/platform/macros.h +++ b/paddle/platform/macros.h @@ -16,8 +16,10 @@ limitations under the License. */ // Disable the copy and assignment operator for a class. #ifndef DISABLE_COPY_AND_ASSIGN -#define DISABLE_COPY_AND_ASSIGN(classname) \ - private: \ - classname(const classname&) = delete; \ - classname& operator=(const classname&) = delete +#define DISABLE_COPY_AND_ASSIGN(classname) \ + private: \ + classname(const classname&) = delete; \ + classname(const classname&&) = delete; \ + classname& operator=(const classname&) = delete; \ + classname& operator=(const classname&&) = delete #endif From 8069a3cf0a0285febe1e4785a56236aac8987e64 Mon Sep 17 00:00:00 2001 From: kexinzhao <19hskevin87@gmail.com> Date: Sun, 1 Oct 2017 09:36:02 -0700 Subject: [PATCH 127/342] Fix typo in new_op_cn.md --- doc/howto/dev/new_op_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 9d3d02ffc3..c823d7e9fc 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -206,7 +206,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, - `REGISTER_OP` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad`。 - `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op。 - - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulKernel`类。 + - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulGradKernel`类。 - 在 `.cu`文件中注册GPU Kernel。 From 646cfbfe0426973b260239575aade9470adb916c Mon Sep 17 00:00:00 2001 From: kexinzhao <19hskevin87@gmail.com> Date: Sun, 1 Oct 2017 09:40:49 -0700 Subject: [PATCH 128/342] Update new_op_en.md --- doc/howto/dev/new_op_en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/dev/new_op_en.md b/doc/howto/dev/new_op_en.md index 57ff7caad1..1e88e1f5b4 100644 --- a/doc/howto/dev/new_op_en.md +++ b/doc/howto/dev/new_op_en.md @@ -205,7 +205,7 @@ The definition of its corresponding backward operator, if applicable, is similar - `REGISTER_OP` registers the `ops::MulOp` class, type named `mul`, its type `ProtoMaker` is `ops::MulOpMaker`, registering `ops::MulOpGrad` as `mul_grad`. - `REGISTER_OP_WITHOUT_GRADIENT` registers an operator without gradient. - - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specialized template types `paddle::platform::CPUPlace` and `float`, which also registers `ops::MulKernel`. + - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specialized template types `paddle::platform::CPUPlace` and `float`, which also registers `ops::MulGradKernel`. - Registering GPU Kernel in `.cu` files From ce4d14b4ed5384dc5fb9eb4e2c6d7f1c6b9bc6dd Mon Sep 17 00:00:00 2001 From: qijun Date: Sun, 1 Oct 2017 15:08:20 -0700 Subject: [PATCH 129/342] add struct Device --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/executor.cc | 73 ++++++++++++++++++++++----------- 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 1168fc38af..129a0eb707 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -44,5 +44,5 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS device_context scope framework_proto) +cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto) cc_test(executor_test SRCS executor_test.cc DEPS executor) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 74153f2449..559cbe125f 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -14,6 +14,8 @@ limitations under the License. */ #include "paddle/framework/executor.h" #include +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" #include "paddle/framework/scope.h" #include "paddle/platform/device_context.h" @@ -34,6 +36,9 @@ class ProgramDescView { class LinearListView : public ProgramDescView { public: void Initialize(const ProgramDesc*) override; + + private: + std::vector> ops_; }; class GraphView : public ProgramDescView { @@ -49,20 +54,36 @@ ProgramDescView* ProgramDescView::Create(bool is_linear) { } } -void LinearListView::Initialize(const ProgramDesc*) { +void LinearListView::Initialize(const ProgramDesc* pdesc) { // get a LinearView of ProgramDesc + for (auto& block_desc : pdesc->blocks()) { + for (auto& op_desc : block_desc.ops()) { + ops_.emplace_back(OpRegistry::CreateOp(op_desc)); + } + } } -void GraphView::Initialize(const ProgramDesc*) { +void GraphView::Initialize(const ProgramDesc* pdesc) { // get a GraphView of ProgramDesc } +struct Device { + platform::CPUDeviceContext* cpu_device_context; +#ifndef PADDLE_ONLY_CPU + Device(platform::CPUDeviceContext* cpu, platform::CUDADeviceContext* gpu) + : cpu_device_context(cpu), cuda_device_context(gpu) {} + platform::CDUADeviceContext* cuda_device_context; +#else + explicit Device(platform::CPUDeviceContext* cpu) : cpu_device_context(cpu) {} +#endif +}; + class ExecutorImpl : public Executor { public: - ExecutorImpl(Scope* scope, const platform::DeviceContext* ctx, - const ProgramDesc* pdesc, bool is_linear) + ExecutorImpl(Scope* scope, const Device* device, const ProgramDesc* pdesc, + bool is_linear) : scope_(scope), - device_context_(ctx), + device_(device), program_desc_(pdesc), view_(ProgramDescView::Create(is_linear)) {} @@ -76,7 +97,7 @@ class ExecutorImpl : public Executor { private: Scope* scope_; - const platform::DeviceContext* device_context_; + const Device* device_; const ProgramDesc* program_desc_; ProgramDescView* view_; }; @@ -86,20 +107,36 @@ std::unique_ptr make_unique(Args&&... args) { return std::unique_ptr(new T(std::forward(args)...)); } -platform::CPUDeviceContext* GetCPUDeviceContext(platform::CPUPlace& place) { +platform::CPUDeviceContext* GetCPUDeviceContext( + const platform::CPUPlace& place) { static std::unique_ptr g_cpu_device_context = make_unique(place); return g_cpu_device_context.get(); } #ifndef PADDLE_ONLY_CPU -platform::CUDADeviceContext* GetCUDADeviceContext(platform::GPUPlace& place) { +platform::CUDADeviceContext* GetCUDADeviceContext( + const platform::GPUPlace& place) { static std::unique_ptr g_cuda_device_context = make_unique(place); return g_cuda_device_context.get(); } #endif +Device* GetDevice(const platform::Place& place) { + platform::CPUPlace cpu_place; +#ifndef PADDLE_ONLY_CPU + platform::GPUPlace gpu_place = boost::get(place); + static std::unique_ptr g_device = make_unique( + GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place)); + return g_device.get(); +#else + static std::unique_ptr g_device = + make_unique(GetCPUDeviceContext(cpu_place)); + return g_device.get(); +#endif +} + framework::Scope* GetScope() { static std::unique_ptr g_scope = make_unique(); @@ -108,26 +145,16 @@ framework::Scope* GetScope() { Executor* NewLocalExecutor(const platform::Place& place, const ProgramDesc& pdesc, bool is_linear) { - platform::DeviceContext* device_context = nullptr; - if (platform::is_cpu_place(place)) { - auto cpu_place = boost::get(place); - device_context = GetCPUDeviceContext(cpu_place); - } else if (platform::is_gpu_place(place)) { -#ifndef PADDLE_ONLY_CPU - auto gpu_place = boost::get(place); - device_context = GetCUDADeviceContext(gpu_place); - } -#else - PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); - } -#endif - return new ExecutorImpl(GetScope(), device_context, &pdesc, is_linear); + return new ExecutorImpl(GetScope(), GetDevice(place), &pdesc, is_linear); } void ExecutorImpl::Run() { // operators running scope_->NewVar(); - device_context_->Wait(); + device_->cpu_device_context->Wait(); +#ifndef PADDLE_ONLY_CPU + device_->cuda_device_context->Wait(); +#endif } void ExecutorImpl::Initialize() { From f29a6b020f633e7c69ae487b7372146c28046597 Mon Sep 17 00:00:00 2001 From: qijun Date: Sun, 1 Oct 2017 15:24:18 -0700 Subject: [PATCH 130/342] fix gpu build error --- paddle/framework/executor.cc | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 559cbe125f..ebe3259bc0 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -69,10 +69,13 @@ void GraphView::Initialize(const ProgramDesc* pdesc) { struct Device { platform::CPUDeviceContext* cpu_device_context; +#ifndef PADDLE_ONLY_CPU + platform::CUDADeviceContext* cuda_device_context; +#endif + #ifndef PADDLE_ONLY_CPU Device(platform::CPUDeviceContext* cpu, platform::CUDADeviceContext* gpu) : cpu_device_context(cpu), cuda_device_context(gpu) {} - platform::CDUADeviceContext* cuda_device_context; #else explicit Device(platform::CPUDeviceContext* cpu) : cpu_device_context(cpu) {} #endif @@ -126,10 +129,16 @@ platform::CUDADeviceContext* GetCUDADeviceContext( Device* GetDevice(const platform::Place& place) { platform::CPUPlace cpu_place; #ifndef PADDLE_ONLY_CPU - platform::GPUPlace gpu_place = boost::get(place); - static std::unique_ptr g_device = make_unique( - GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place)); - return g_device.get(); + if (platform::is_gpu_place(place)) { + platform::GPUPlace gpu_place = boost::get(place); + static std::unique_ptr g_device = make_unique( + GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place)); + return g_device.get(); + } else { + static std::unique_ptr g_device = + make_unique(GetCPUDeviceContext(cpu_place), nullptr); + return g_device.get(); + } #else static std::unique_ptr g_device = make_unique(GetCPUDeviceContext(cpu_place)); @@ -153,7 +162,9 @@ void ExecutorImpl::Run() { scope_->NewVar(); device_->cpu_device_context->Wait(); #ifndef PADDLE_ONLY_CPU - device_->cuda_device_context->Wait(); + if (device_->cuda_device_context) { + device_->cuda_device_context->Wait(); + } #endif } From 2296d81cf9560437b368354229b7ceb22b67d234 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 11:39:10 -0700 Subject: [PATCH 131/342] Use `type_defs.h` to resolve cyclic dependencies --- paddle/framework/attribute.h | 10 +--------- paddle/framework/op_desc.h | 2 +- paddle/framework/op_info.h | 7 +------ paddle/framework/type_defs.h | 38 ++++++++++++++++++++++++++++++++++++ 4 files changed, 41 insertions(+), 16 deletions(-) create mode 100644 paddle/framework/type_defs.h diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h index c7559cefb6..d13530e340 100644 --- a/paddle/framework/attribute.h +++ b/paddle/framework/attribute.h @@ -21,20 +21,12 @@ limitations under the License. */ #include #include "paddle/framework/framework.pb.h" +#include "paddle/framework/type_defs.h" #include "paddle/platform/enforce.h" -#include "paddle/platform/variant.h" namespace paddle { namespace framework { -// The order should be as same as framework.proto -typedef boost::variant, - std::vector, std::vector, bool, - std::vector, BlockDesc*> - Attribute; - -typedef std::unordered_map AttributeMap; - ProgramDesc& GetProgramDesc(); template diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index e03b4d067f..0af4169715 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "paddle/framework/attribute.h" -#include "paddle/framework/op_info.h" +#include "paddle/framework/type_defs.h" #include "paddle/framework/var_desc.h" namespace paddle { diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 6d1ee4dece..470336d367 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -19,15 +19,10 @@ #include #include "paddle/framework/attribute.h" #include "paddle/framework/op_desc.h" +#include "paddle/framework/type_defs.h" namespace paddle { namespace framework { -class OperatorBase; -using VariableNameMap = std::map>; - -using OpCreator = std::function; class GradOpDescMakerBase { public: diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h new file mode 100644 index 0000000000..dec5066f1e --- /dev/null +++ b/paddle/framework/type_defs.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/platform/variant.h" + +namespace paddle { +namespace framework { +class OperatorBase; +using VariableNameMap = std::map>; + +// The order should be as same as framework.proto +using Attribute = + boost::variant, + std::vector, std::vector, bool, + std::vector, BlockDesc*>; + +using AttributeMap = std::unordered_map; + +using OpCreator = std::function; + +} // namespace framework +} // namespace paddle From 32f5c9dd934e7de15a93a8145bf6ee4499b3bc7d Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 11:51:24 -0700 Subject: [PATCH 132/342] recurrent_op pass the unit test --- paddle/operators/recurrent_op.cc | 87 +++++-------------- paddle/operators/recurrent_op.h | 23 +---- paddle/operators/rnn/recurrent_op_utils.cc | 55 +++++------- paddle/operators/rnn/recurrent_op_utils.h | 6 +- paddle/operators/sum_op.cc | 5 +- .../v2/framework/tests/test_recurrent_op.py | 26 +++--- 6 files changed, 66 insertions(+), 136 deletions(-) diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index b9fba3e135..016e2043fd 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -28,7 +28,8 @@ using Variable = framework::Variable; using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -void RecurrentAlgorithm::InferShape(const Scope& scope) const { +void RecurrentAlgorithm::Run(const Scope& scope, + const platform::DeviceContext& dev_ctx) const { auto* input0 = scope.FindVar(arg_->inlinks[0]); PADDLE_ENFORCE_NOT_NULL(input0); seq_len_ = input0->GetMutable()->dims()[0]; @@ -36,38 +37,16 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { CreateScopes(scope); auto& step_scopes = GetStepScopes(scope); - rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, - true /*infer_shape_mode*/); - InitMemories(step_scopes[0], true /*infer_shape_mode*/); + rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); + InitMemories(step_scopes[0]); for (size_t i = 0; i < seq_len_; i++) { if (i > 0) { - rnn::LinkMemories(step_scopes, arg_->memories, i, -1, - true /*infer_shape_mode*/); - } - (*stepnet_)->InferShape(*step_scopes[i]); - } - rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, - true /*infer_shape_mode*/); -} - -void RecurrentAlgorithm::Run(const Scope& scope, - const platform::DeviceContext& dev_ctx) const { - auto step_scopes = GetStepScopes(scope); - rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, - false /*infer_shape_mode*/); - InitMemories(step_scopes[0], false /*infer_shape_mode*/); - - for (size_t step_id = 0; step_id < seq_len_; step_id++) { - // create output alias variables - if (step_id > 0) { - rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1, - false /*infer_shape_mode*/); + rnn::LinkMemories(step_scopes, arg_->memories, i, -1); } - (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); + (*stepnet_)->Run(*step_scopes[i], dev_ctx); } - rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, - false /*infer_shape_mode*/); + rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); } void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { @@ -105,8 +84,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { } } -void RecurrentAlgorithm::InitMemories(Scope* step_scope, - bool infer_shape_mode) const { +void RecurrentAlgorithm::InitMemories(Scope* step_scope) const { for (auto& attr : arg_->memories) { auto* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable(); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, @@ -114,12 +92,9 @@ void RecurrentAlgorithm::InitMemories(Scope* step_scope, attr.boot_var); auto* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable(); - if (infer_shape_mode) { - pre_mem->Resize(boot_mem->dims()); - PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2); - } else { - pre_mem->ShareDataWith(*boot_mem); - } + pre_mem->Resize(boot_mem->dims()); + PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2); + pre_mem->ShareDataWith(*boot_mem); } } @@ -169,23 +144,22 @@ class RecurrentAlgorithmProtoAndCheckerMaker void RecurrentGradientAlgorithm::Run( const Scope& scope, const platform::DeviceContext& dev_ctx) const { + seq_len_ = + scope.FindVar(arg_->inlinks[0])->GetMutable()->dims()[0]; auto step_scopes = GetStepScopes(scope); - rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, - false /*infer_shape_mode*/); + rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { - rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, - false /*infer_shape_mode*/); + rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); } (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } - LinkBootMemoryGradients(step_scopes[0], false); - rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, - false /*infer_shape_mode*/); + rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); + LinkBootMemoryGradients(step_scopes[0]); } void RecurrentGradientAlgorithm::LinkBootMemoryGradients( - Scope* step_scope, bool infer_shape_mode) const { + Scope* step_scope) const { for (auto& attr : arg_->memories) { PADDLE_ENFORCE(step_scope->FindVar(attr.var) != nullptr, "memory variable [%s] does not exists", attr.var); @@ -194,30 +168,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( auto* mem_grad = step_scope->NewVar(attr.var)->GetMutable(); auto* boot_mem_grad = step_scope->NewVar(attr.boot_var)->GetMutable(); - if (infer_shape_mode) { - boot_mem_grad->Resize(mem_grad->dims()); - } else { - boot_mem_grad->ShareDataWith(*mem_grad); - } - } -} - -void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { - seq_len_ = - scope.FindVar(arg_->inlinks[0])->GetMutable()->dims()[0]; - auto step_scopes = GetStepScopes(scope); - rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, - true /*infer_shape_mode*/); - for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { - if (static_cast(step_id) != seq_len_ - 1) { - rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, - true /*infer_shape_mode*/); - } - (*stepnet_)->InferShape(*step_scopes[step_id]); + boot_mem_grad->Resize(mem_grad->dims()); + boot_mem_grad->ShareDataWith(*mem_grad); } - rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, - true /*infer_shape_mode*/); - LinkBootMemoryGradients(step_scopes[0], true /*infer_shape_mode*/); } RecurrentGradientOp::RecurrentGradientOp( diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index 18f8c53e18..752025e42c 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -41,11 +41,6 @@ class RecurrentAlgorithm { stepnet_ = stepnet; } - /** - * InferShape must be called before Run. - */ - void InferShape(const framework::Scope& scope) const; - protected: /* * The step scopes will be stored in the father scope as a variable. @@ -61,7 +56,7 @@ class RecurrentAlgorithm { ->GetMutable>(); } - void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const; + void InitMemories(framework::Scope* step_scopes) const; private: std::unique_ptr* stepnet_; @@ -91,13 +86,7 @@ class RecurrentGradientAlgorithm { void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const; - void LinkBootMemoryGradients(framework::Scope* step_scopes, - bool infer_shape_mode) const; - - /** - * InferShape must be called before Run. - */ - void InferShape(const framework::Scope& scope) const; + void LinkBootMemoryGradients(framework::Scope* step_scopes) const; protected: inline const std::vector& GetStepScopes( @@ -136,10 +125,6 @@ class RecurrentOp : public framework::OperatorBase { const OperatorBase& stepnet() const { return *stepnet_; } - void InferShape(const framework::Scope& scope) const { - alg_.InferShape(scope); - } - static const rnn::ArgumentName kArgName; private: @@ -162,10 +147,6 @@ class RecurrentGradientOp : public framework::OperatorBase { PADDLE_THROW("Not Implemented"); } - void InferShape(const framework::Scope& scope) const { - alg_.InferShape(scope); - } - void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const override { alg_.Run(scope, dev_ctx); diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index a767009d23..a02994f99d 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -25,7 +25,7 @@ using LoDTensor = framework::LoDTensor; void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, - const size_t seq_len, bool infer_shape_mode) { + const size_t seq_len) { PADDLE_ENFORCE(!inlinks.empty(), "no in links are provided."); for (size_t i = 0; i < inlinks.size(); ++i) { // global inputs @@ -41,11 +41,9 @@ void SegmentInputs(const std::vector& step_scopes, for (size_t j = 0; j < seq_len; j++) { Tensor* step_input = step_scopes[j]->NewVar(inlinks[i])->GetMutable(); - if (!infer_shape_mode) { - // The input of operators of each step is Tensor here. - // Maybe need to modify Slice function. - *step_input = input->Slice(j, j + 1); - } + // The input of operators of each step is Tensor here. + // Maybe need to modify Slice function. + *step_input = input->Slice(j, j + 1); step_input->Resize(step_dims); } } @@ -53,39 +51,35 @@ void SegmentInputs(const std::vector& step_scopes, void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, - const size_t seq_len, bool infer_shape_mode) { + const size_t seq_len) { for (size_t i = 0; i < outlinks.size(); i++) { auto output_var = step_scopes[0]->parent().FindVar(outlinks[i]); PADDLE_ENFORCE_NOT_NULL(output_var, "output link [%s] is not in scope.", outlinks[i]); LoDTensor* output = output_var->GetMutable(); - if (infer_shape_mode) { - auto step_scope_var = step_scopes[0]->FindVar(outlinks[i]); - PADDLE_ENFORCE_NOT_NULL(step_scope_var, "%s not in scope", outlinks[i]); - f::DDim step_dims = - step_scope_var->template GetMutable()->dims(); - std::vector dims_vec = vectorize(step_dims); - dims_vec.insert(dims_vec.begin(), seq_len); - output->Resize(f::make_ddim(dims_vec)); - } else { - output->mutable_data(platform::CPUPlace()); - for (size_t j = 0; j < seq_len; j++) { - LoDTensor* step_output = - step_scopes[j]->FindVar(outlinks[i])->GetMutable(); - // TODO(luotao02) data type and platform::DeviceContext() should set - // correctly - (output->Slice(j, j + 1)) - .CopyFrom(*step_output, platform::CPUPlace()); - } + auto step_scope_var = step_scopes[0]->FindVar(outlinks[i]); + PADDLE_ENFORCE_NOT_NULL(step_scope_var, "%s not in scope", outlinks[i]); + f::DDim step_dims = + step_scope_var->template GetMutable()->dims(); + std::vector dims_vec = vectorize(step_dims); + dims_vec.insert(dims_vec.begin(), seq_len); + output->Resize(f::make_ddim(dims_vec)); + output->mutable_data(platform::CPUPlace()); + for (size_t j = 0; j < seq_len; j++) { + LoDTensor* step_output = + step_scopes[j]->FindVar(outlinks[i])->GetMutable(); + // TODO(luotao02) data type and platform::DeviceContext() should set + // correctly + (output->Slice(j, j + 1)) + .CopyFrom(*step_output, platform::CPUPlace()); } } } void LinkMemories(const std::vector& scopes, const std::vector& memories, - const size_t step_id, const int offset, - bool infer_shape_mode) { + const size_t step_id, const int offset) { PADDLE_ENFORCE_LT(step_id, scopes.size(), "step [%d] is out of range of step scopes' size [%d]", step_id, scopes.size()); @@ -100,11 +94,8 @@ void LinkMemories(const std::vector& scopes, for (auto& attr : memories) { auto mem = scope->FindVar(attr.pre_var)->GetMutable(); auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); - if (infer_shape_mode) { - mem->Resize(linked_mem->dims()); - } else { - mem->ShareDataWith(*linked_mem); - } + mem->Resize(linked_mem->dims()); + mem->ShareDataWith(*linked_mem); } } diff --git a/paddle/operators/rnn/recurrent_op_utils.h b/paddle/operators/rnn/recurrent_op_utils.h index 9c777f1e90..fd17b9b889 100644 --- a/paddle/operators/rnn/recurrent_op_utils.h +++ b/paddle/operators/rnn/recurrent_op_utils.h @@ -64,18 +64,18 @@ struct ArgumentName { */ void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, - const size_t seq_len, bool infer_shape_mode); + const size_t seq_len); /** * Process outputs of step nets and merge to variables. */ void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, - const size_t seq_len, bool infer_shape_mode); + const size_t seq_len); void LinkMemories(const std::vector& step_scopes, const std::vector& memories, const size_t step_id, - const int offset, bool infer_shape_mode); + const int offset); void InitArgument(const ArgumentName& name, Argument* arg, const framework::OperatorBase& op, bool is_grad = false); diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 5d76313aeb..c54843faa6 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -22,14 +22,15 @@ class SumOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase* ctx) const override { + PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null"); auto x_dims = ctx->GetInputsDim("X"); - PADDLE_ENFORCE(!x_dims.empty(), "Input(X) of SumOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of SumOp should not be null."); - auto in_dim = x_dims[0]; size_t N = x_dims.size(); PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1."); + + auto in_dim = x_dims[0]; for (size_t i = 1; i < N; i++) { auto dim = x_dims[i]; PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape"); diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 6b9e7a88ce..1f114432c0 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -16,14 +16,17 @@ class PySimpleRNN(object): ''' def __init__(self, input_dim=30, batch_size=50, weight_dim=15, sent_len=11): - self.x = np.random.normal(size=(sent_len, batch_size, input_dim)) - self.W = np.random.normal(size=(input_dim, input_dim)) - self.U = np.random.normal(size=(input_dim, input_dim)) - self.h_boot = np.random.normal(size=(batch_size, input_dim)) + self.x = np.random.normal(size=(sent_len, batch_size, + input_dim)).astype("float32") + self.W = np.random.normal(size=(input_dim, input_dim)).astype("float32") + self.U = np.random.normal(size=(input_dim, input_dim)).astype("float32") + self.h_boot = np.random.normal(size=(batch_size, + input_dim)).astype("float32") # memories self.mems = [ - np.zeros(shape=(batch_size, input_dim)) for i in range(sent_len) + np.zeros(shape=(batch_size, input_dim)).astype("float32") + for i in range(sent_len) ] def forward(self): @@ -36,7 +39,7 @@ class PySimpleRNN(object): return [self.x[i] for i in range(self.x.shape[0])] def concat_outputs(self): - return np.array(self.mems) + return np.array(self.mems).astype("float32") def step(self, step_id, x): ''' @@ -47,8 +50,8 @@ class PySimpleRNN(object): pre_mem = self.mems[step_id - 1] else: pre_mem = self.h_boot - xW = np.matmul(x, self.W) - hU = np.matmul(pre_mem, self.U) + xW = np.matmul(x, self.W).astype("float32") + hU = np.matmul(pre_mem, self.U).astype("float32") sum = xW + hU self.mems[step_id] = py_sigmoid(sum) @@ -102,7 +105,8 @@ class RecurrentOpTest(unittest.TestCase): self.create_step_net() ctx = core.DeviceContext.create(core.CPUPlace()) self.rnnop.run(self.scope, ctx) - return np.array(self.scope.find_var("h@mem").get_tensor()) + return np.array(self.scope.find_var("h@mem").get_tensor()).astype( + "float32") def create_global_variables(self): # create inlink @@ -142,7 +146,7 @@ class RecurrentOpTest(unittest.TestCase): stepnet = core.Net.create() x_fc_op = Operator("mul", X="x", Y="W", Out="Wx") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") - sum_op = Operator("add", X="Wx", Y="Uh", Out="sum") + sum_op = Operator("sum", X=["Wx", "Uh"], Out="sum") sig_op = Operator("sigmoid", X="sum", Y="h@mem") for op in [x_fc_op, h_fc_op, sum_op, sig_op]: @@ -179,7 +183,7 @@ class RecurrentGradientOpTest(unittest.TestCase): stepnet = core.Net.create() x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") - sum_op = Operator("add", X="Wx", Y="Uh", Out="sum") + sum_op = Operator("sum", X=["Wx", "Uh"], Out="sum") sig_op = Operator("sigmoid", X="sum", Y="h@alias") for op in [x_fc_op, h_fc_op, sum_op, sig_op]: From 3723304da953cce7aa88d1fdbd684bff91412dae Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 11:56:14 -0700 Subject: [PATCH 133/342] Add missing ctor --- paddle/framework/grad_op_desc_maker.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h index cb4d160bd0..b4b6d54bf3 100644 --- a/paddle/framework/grad_op_desc_maker.h +++ b/paddle/framework/grad_op_desc_maker.h @@ -79,6 +79,8 @@ class GradOpDescMakerBase { class SingleGradOpDescMaker : public GradOpDescMakerBase { public: + using GradOpDescMakerBase::GradOpDescMakerBase; + std::vector operator()() const { return {this->Apply()}; } protected: @@ -86,6 +88,9 @@ class SingleGradOpDescMaker : public GradOpDescMakerBase { }; class DefaultGradOpDescMaker : public SingleGradOpDescMaker { + public: + using SingleGradOpDescMaker::SingleGradOpDescMaker; + protected: virtual OpDescBind Apply() const { OpDescBind grad; From c705f065ba403606d39bc972d85f7eba1920f029 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Mon, 2 Oct 2017 16:14:48 -0400 Subject: [PATCH 134/342] add TensorArray (#4459) * add tensor array * update * set type --- paddle/framework/CMakeLists.txt | 3 + paddle/framework/tensor_array.cc | 283 ++++++++++++++++++++++++++ paddle/framework/tensor_array.h | 118 +++++++++++ paddle/framework/tensor_array_test.cc | 130 ++++++++++++ 4 files changed, 534 insertions(+) create mode 100644 paddle/framework/tensor_array.cc create mode 100644 paddle/framework/tensor_array.h create mode 100644 paddle/framework/tensor_array_test.cc diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 9140854a96..5d394132b7 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -43,3 +43,6 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) + +cc_library(tensor_array SRCS tensor_array.cc DEPS lod_tensor) +cc_test(tensor_array_test SRCS tensor_array_test.cc DEPS tensor_array place) diff --git a/paddle/framework/tensor_array.cc b/paddle/framework/tensor_array.cc new file mode 100644 index 0000000000..d54714c66c --- /dev/null +++ b/paddle/framework/tensor_array.cc @@ -0,0 +1,283 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + + + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/tensor_array.h" + +#include +#include +#include + +namespace paddle { +namespace framework { + +namespace detail { + +/* + * Offer an iterator over the length-sorted lod-tensor's top level. The top + * level of a lod-tensor stores batch-size of sequences, each top-level sequence + * may contains several lower-level sequences, sort top-level lod by the numbers + * of lower-level sequences in descending order, so that during RNN's running, + * the batch-size will keep decreasing, the short sentences will end at the tail + * of each batch. + * + * Let's take a simple lod-tensor for example + * + * |(0) |(1) top-level has two instances + * ||| ||||| lower-level + * + * sort by lower-level's length + * + * |(1) |(0) + * ||||| ||| + * + * when RNN runs, it get 5 batches (equals the number of elements the longest + * sequence has) + * + * ||||| + * ||| + * + * the first three batches has two elements, the last two elements just has 1 + * element each. + */ +struct DynamicBatchUnpacker { + using value_type = float; + + DynamicBatchUnpacker(const LoDTensor& source, size_t level, + bool descend = true) + : source(&source), level(level) { + BuildLengthSortedMeta(descend); + } + + LoDTensor GetBatch(size_t index); + + std::vector meta; + + LoDTensor const* source; + size_t level; + + protected: + void BuildLengthSortedMeta(bool descend); +}; + +LoDTensor PackDynamicBatch(const std::vector& source, + const std::vector& meta, const LoD& lod, + size_t level); + +} // namespace detail + +const LoDTensor& TensorArray::Read(size_t index) const { + PADDLE_ENFORCE_LE(index, MAX_SIZE, "index[%d] too large", index); + if (index >= size()) { + values_.resize(index + 1); + } + return values_[index]; +} + +void TensorArray::Write(size_t index, const LoDTensor& value) { + PADDLE_ENFORCE_LE(index, MAX_SIZE, "index[%d] too large", index); + + if (index >= size()) { + values_.resize(index + 1); + } + + values_[index].Resize(value.dims()); + values_[index].mutable_data(platform::CPUPlace()); + values_[index].CopyFrom(value, platform::CPUPlace()); +} + +void TensorArray::WriteShared(size_t index, const LoDTensor& value) { + PADDLE_ENFORCE_LE(index, MAX_SIZE, "index[%d] too large", index); + if (index >= size()) { + values_.resize(index + 1); + } + + values_[index].ShareDataWith(value); +} + +LoDTensor TensorArray::Pack(size_t level, const std::vector& meta, + const LoD& lod) const { + return detail::PackDynamicBatch(values_, meta, lod, level); +} + +std::vector TensorArray::Unpack(const LoDTensor& source, int level, + bool length_desend) { + detail::DynamicBatchUnpacker unpacker(source, level, + length_desend /*descend*/); + + // find max length of all the sequences + size_t max_length = 0; + for (const auto& seq : unpacker.meta) { + max_length = std::max(max_length, seq.end - seq.begin); + } + + // write batches to values + for (size_t batch_id = 0; batch_id < max_length; batch_id++) { + Write(batch_id, unpacker.GetBatch(batch_id)); + } + + return unpacker.meta; +} + +LoDTensor TensorArray::Stack() const { + LoDTensor result; + if (size() == 0) return result; + + const auto& first_dims = values_.front().dims(); + // check all the values have the same shape + // TODO(superjom) check the same dtypes + for (size_t idx = 1; idx < size(); idx++) { + const auto& value_dims = values_[idx].dims(); + PADDLE_ENFORCE_EQ(first_dims, value_dims); + } + + // copy + auto result_dims = vectorize(first_dims); + result_dims.insert(result_dims.begin(), size()); + result.Resize(make_ddim(result_dims)); + result.mutable_data(platform::CPUPlace()); + + for (size_t idx = 0; idx < size(); idx++) { + result.Slice(idx, idx + 1) + .CopyFrom(Read(idx), platform::CPUPlace()); + } + return result; +} + +void TensorArray::Unstack(const LoDTensor& source) const { + Unstack(source, false /*data_shared*/); +} + +void TensorArray::UnstackShared(const LoDTensor& source) const { + Unstack(source, true /*data_shared*/); +} + +void TensorArray::Unstack(const LoDTensor& source, bool data_shared) const { + size_t first_dim = source.dims()[0]; + DDim value_dims = slice_ddim(source.dims(), 1, source.dims().size()); + PADDLE_ENFORCE_GT(first_dim, 0, + "source should have some data to be unstacked"); + + values_.resize(first_dim); + + for (size_t elem = 0; elem < first_dim; elem++) { + // create a new value + auto& value = values_[elem]; + if (data_shared) { + // share memory + value.ShareDataWith(source.Slice(elem, elem + 1)); + } else { + // copy + value.Resize(value_dims); + value.CopyFrom(source.Slice(elem, elem + 1), + platform::CPUPlace()); + } + } +} + +size_t TensorArray::size() const { return values_.size(); } + +namespace detail { + +void DynamicBatchUnpacker::BuildLengthSortedMeta(bool descend) { + PADDLE_ENFORCE(meta.empty(), "duplicate build meta"); + // collect meta for each sequence in some level + auto lod = SliceLevels(source->lod(), level, level + 1)[0]; + + for (size_t seq_id = 0; seq_id < lod.size() - 1; seq_id++) { + DySeqMeta seq_meta({lod[seq_id], lod[seq_id + 1], seq_id}); + meta.push_back(seq_meta); + } + + PADDLE_ENFORCE_GT(meta.size(), 0, "meta is empty"); + + // sort by length + sort(meta.begin(), meta.end(), + [descend](const DySeqMeta& a, const DySeqMeta& b) { + bool a_ge_b = (a.end - a.begin) > (b.end - b.begin); + return descend ? a_ge_b : !a_ge_b; + }); +} + +LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) { + PADDLE_ENFORCE(!meta.empty(), "should build meta first"); + LoDTensor result; + + // collect indice need to copy to the batch + std::vector indice; + for (size_t seq_id = 0; seq_id < meta.size(); seq_id++) { + const auto& seq_meta = meta[seq_id]; + if (index >= seq_meta.end) break; + indice.push_back(seq_meta.begin + index); + } + + PADDLE_ENFORCE(!indice.empty(), "invalid batch at %d", index); + + // copy the indice of records in LoDTensor + auto record_dims = slice_ddim(source->dims(), 1, source->dims().size()); + auto record_dims_vec = vectorize(record_dims); + record_dims_vec.insert(record_dims_vec.begin(), indice.size()); + result.Resize(make_ddim(record_dims_vec)); + result.mutable_data(platform::CPUPlace()); + + for (size_t i = 0; i < indice.size() - 1; i++) { + auto index = indice[i]; + auto target = result.Slice(i, i + 1); + auto source_ = source->Slice(index, index + 1); + target.CopyFrom(source_, platform::CPUPlace()); + } + + return result; +} + +LoDTensor PackDynamicBatch(const std::vector& source, + const std::vector& meta, const LoD& lod, + size_t level) { + PADDLE_ENFORCE(!source.empty()); + PADDLE_ENFORCE(!meta.empty()); + PADDLE_ENFORCE(!lod.empty()); + + LoDTensor result; + + // init result space + auto record_dims = slice_ddim(source[0].dims(), 1, source[0].dims().size()); + auto record_dims_vec = vectorize(record_dims); + auto height = lod[level].back(); + record_dims_vec.insert(record_dims_vec.begin(), height); + result.Resize(make_ddim(record_dims_vec)); + result.mutable_data(platform::CPUPlace()); + + for (size_t batch_id = 0; batch_id < source.size(); batch_id++) { + for (size_t seq_id = 0; seq_id < meta.size(); seq_id++) { + const auto& seq_meta = meta[seq_id]; + // source is source[batch_id][seq_id] + // target is result[index] + auto index = seq_meta.begin + batch_id; + if (index >= seq_meta.end) break; + auto source_ = source[batch_id].Slice(seq_id, seq_id + 1); + auto target = result.Slice(index, index + 1); + target.CopyFrom(source_, platform::CPUPlace()); + } + } + + result.set_lod(lod); + + return result; +} + +} // namespace detail + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/tensor_array.h b/paddle/framework/tensor_array.h new file mode 100644 index 0000000000..e76f33d2c0 --- /dev/null +++ b/paddle/framework/tensor_array.h @@ -0,0 +1,118 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include + +#include "paddle/framework/lod_tensor.h" + +namespace paddle { +namespace framework { + +/* + * DyBatchSeqPosition stores indices of the basic element in tensor. It is used + * after lod-tensor's re-assembling, its info can be used to recover the order + * in original lod-tensor. + */ +struct DySeqMeta { + size_t begin; + size_t end; // not included + size_t ori_idx; +}; + +/* + * TensorArray is a C-array-like array of tensors, it is meant to be used with + * dynamic iteration primitives such as while_loop. It is used to segment inputs + * and store states in all time steps. + * + * By providing some methods similar to a C++ array, the difinition of some + * state-based dynamic models such as RNN cound be more natural and highly + * flexible. + */ +class TensorArray { + public: + using value_type = float; + + // max number of values allowed to store. + const size_t MAX_SIZE{100000}; + + /* + * Inputs: + * - value_shared: share memory between tensors. + */ + explicit TensorArray(bool values_shared = true) + : values_shared_(values_shared) {} + + /* + * Read the value at location `index` in the `TensorArray`. + */ + const LoDTensor &Read(size_t index) const; + + /* + * Write value into the index of the TensorArray. + */ + void Write(size_t index, const LoDTensor &value); + + /* + * Write value into the index of the TensorArray, with memory shared. + */ + void WriteShared(size_t index, const LoDTensor &value); + + /* + * Recover the original LoD-arranged LoDTensor with the `values`, `level` and + * `indice_map`. + */ + LoDTensor Pack(size_t level, const std::vector &meta, + const LoD &lod) const; + + /* + * Split LoDTensor in some `level` and write the generated batches to + * `values`, if set `desend`, will sort by length in descending order else in + * ascending order. + */ + std::vector Unpack(const LoDTensor &source, int level, + bool length_desend); + + /* + * Pack the values into a tensor with rank one higher than each tensor in + * values. + */ + LoDTensor Stack() const; + + /* + * Unpacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors. + */ + void Unstack(const LoDTensor &source) const; + + /* + * Unpacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors, + * with memory of tensors shared. + */ + void UnstackShared(const LoDTensor &source) const; + + /* + * Return the number of values. + */ + size_t size() const; + + protected: + void Unstack(const LoDTensor &source, bool data_shared) const; + + private: + mutable std::vector values_; + bool values_shared_; +}; // class TensorArray + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/tensor_array_test.cc b/paddle/framework/tensor_array_test.cc new file mode 100644 index 0000000000..d9f52509cd --- /dev/null +++ b/paddle/framework/tensor_array_test.cc @@ -0,0 +1,130 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/tensor_array.h" + +#include + +namespace paddle { +namespace framework { + +class TensorArrayTester : public ::testing::Test { + protected: + void SetUp() override { + LoDTensor source; + source.Resize(make_ddim({batch_size, dim})); + int* data = source.mutable_data(platform::CPUPlace()); + for (int i = 0; i < 16 * 32; i++) { + data[i] = i; + } + ta.Unstack(source); + } + + TensorArray ta; + const int batch_size = 16; + const int dim = 32; +}; + +TEST_F(TensorArrayTester, Read) { + for (int i = 0; i < batch_size; i++) { + const auto& tensor = ta.Read(i); + ASSERT_EQ(tensor.dims()[0], 1); + ASSERT_EQ(tensor.dims()[1], dim); + } +} + +TEST_F(TensorArrayTester, Write) { + LoDTensor source; + source.Resize(make_ddim({1, dim})); + for (int i = 0; i < dim; i++) { + *(source.mutable_data(platform::CPUPlace()) + i) = i; + } + + ta.Write(2, source); + + const auto& tensor = ta.Read(2); + for (int i = 0; i < dim; i++) { + EXPECT_EQ(*(tensor.data() + i), *(source.data() + i)); + } +} + +TEST_F(TensorArrayTester, WriteShared) { + LoDTensor source; + source.Resize(make_ddim({1, dim})); + for (int i = 0; i < dim; i++) { + *(source.mutable_data(platform::CPUPlace()) + i) = i; + } + + ta.WriteShared(2, source); + + const auto& tensor = ta.Read(2); + for (int i = 0; i < dim; i++) { + EXPECT_EQ(*(tensor.data() + i), *(source.data() + i)); + } + + EXPECT_EQ(source.data(), tensor.data()); +} + +class TensorArrayPackTester : public ::testing::Test { + protected: + virtual void SetUp() override { + lod.push_back(std::vector{0, 2, 9, 13}); + + source.set_lod(lod); + source.Resize(make_ddim({13, 128})); + source.mutable_data(platform::CPUPlace()); + + // content of each setence: 0 1 2 3 4 + const auto& level = lod.front(); + for (size_t i = 0; i < level.size() - 1; i++) { + size_t begin = level[i]; + size_t end = level[i + 1]; + for (size_t j = begin; j < end; j++) { + auto record = source.Slice(j, j + 1); + for (int dim = 0; dim < 128; dim++) { + record.mutable_data(platform::CPUPlace())[dim] = j - begin; + } + } + } + + // unpack + meta = ta.Unpack(source, 0, true); + } + + LoD lod; + TensorArray ta; + LoDTensor source; + std::vector meta; +}; + +TEST_F(TensorArrayPackTester, Unpack) { + ASSERT_EQ(ta.size(), 7UL); + + const auto& t0 = ta.Read(0); + const auto& t1 = ta.Read(1); + + ASSERT_EQ(t0.data()[0], int(0)); + ASSERT_EQ(t1.data()[0], int(1)); +} + +TEST_F(TensorArrayPackTester, Pack) { + LoDTensor packed = ta.Pack(0, meta, lod); +} + +TEST_F(TensorArrayTester, size) { + ASSERT_EQ(ta.size(), static_cast(batch_size)); +} + +} // namespace framework +} // namespace paddle From 9b54ad18f8c6c974cff39b9e128a3ef82dd57455 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Sat, 30 Sep 2017 16:06:52 +0800 Subject: [PATCH 135/342] add configuration helper for resize layer. --- doc/api/v1/index_cn.rst | 2 +- doc/api/v2/config/layer.rst | 5 ++++ .../paddle/trainer_config_helpers/layers.py | 25 ++++++++++++++++- .../tests/configs/file_list.sh | 2 +- .../protostr/test_resize_layer.protostr | 27 +++++++++++++++++++ .../tests/configs/test_resize_layer.py | 6 +++++ 6 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py diff --git a/doc/api/v1/index_cn.rst b/doc/api/v1/index_cn.rst index 3718cd73a2..cf146dc088 100644 --- a/doc/api/v1/index_cn.rst +++ b/doc/api/v1/index_cn.rst @@ -21,7 +21,7 @@ Model Config API trainer_config_helpers/optimizers.rst trainer_config_helpers/data_sources.rst trainer_config_helpers/layers.rst - trainer_config_helpers/activations.rst + trainer_config_helpers/activations.rst trainer_config_helpers/poolings.rst trainer_config_helpers/networks.rst trainer_config_helpers/evaluators.rst diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index c94627a728..d4e9d53e5c 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -345,6 +345,11 @@ clip .. autoclass:: paddle.v2.layer.clip :noindex: +resize +------ +.. autoclass:: paddle.v2.layer.resize + :noindex: + slope_intercept --------------- .. autoclass:: paddle.v2.layer.slope_intercept diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 74025d2a7b..d37f29d2c4 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -142,6 +142,7 @@ __all__ = [ 'img_pool3d_layer', 'scale_shift_layer', 'img_conv3d_layer', + 'resize_layer', ] @@ -250,6 +251,8 @@ class LayerType(object): KMAX_SEQ_SCORE = 'kmax_seq_score' SCALE_SHIFT_LAYER = 'scale_shift' + RESIZE = 'resize' + @staticmethod def is_layer_type(type_name): """ @@ -6473,7 +6476,7 @@ def switch_order_layer(input, act=None, layer_attr=None): """ - This layer switch dimension order of image input. + This layer switch dimension order of image input. From order "batchSize, channels, height, width" to order "batchSize, height, width, channels". @@ -6932,3 +6935,23 @@ def scale_shift_layer(input, name=None, param_attr=None, bias_attr=None): bias=ParamAttr.to_bias(bias_attr)) return LayerOutput( name, LayerType.SCALE_SHIFT_LAYER, parents=[input], size=input.size) + + +@wrap_name_default("resize") +def resize_layer(input, size, name=None): + """ + The resize layer resizes the input matrix with a shape of [Height, Width] + into the output matrix with a shape of [Height x Width / size, size], + where size is the parameter of this layer indicating the output dimension. + + :param input: The input to this layer. + :type input: LayerOutput. + :param name: The name of this layer. It is optional. + :type name: basestring + :param size: The resized output dimesion of this layer. + :type size: int + :return: A LayerOutput object. + :rtype: LayerOutput + """ + Layer(name=name, type=LayerType.RESIZE, inputs=Input(input.name), size=size) + return LayerOutput(name, LayerType.RESIZE, parents=[input], size=input.size) diff --git a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh index 8a204a96f3..6a4550c209 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/file_list.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/file_list.sh @@ -10,6 +10,6 @@ test_prelu_layer test_row_conv test_detection_output_layer test_multibox_loss_la test_recursive_topology test_gated_unit_layer test_clip_layer test_row_l2_norm_layer test_kmax_seq_socre_layer test_sub_nested_seq_select_layer test_scale_shift_layer test_seq_slice_layer test_cross_entropy_over_beam test_pooling3D_layer -test_conv3d_layer test_deconv3d_layer test_BatchNorm3D) +test_conv3d_layer test_deconv3d_layer test_BatchNorm3D test_resize_layer) export whole_configs=(test_split_datasource) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr new file mode 100644 index 0000000000..9399252b23 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_resize_layer.protostr @@ -0,0 +1,27 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 300 + active_type: "" +} +layers { + name: "__resize_0__" + type: "resize" + size: 150 + active_type: "" + inputs { + input_layer_name: "input" + } +} +input_layer_names: "input" +output_layer_names: "__resize_0__" +sub_models { + name: "root" + layer_names: "input" + layer_names: "__resize_0__" + input_layer_names: "input" + output_layer_names: "__resize_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py new file mode 100644 index 0000000000..09a6f50733 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_resize_layer.py @@ -0,0 +1,6 @@ +from paddle.trainer_config_helpers import * + +data = data_layer(name='input', size=300) +resized = resize_layer(input=data, size=150) + +outputs(resized) From 04e604b7198179d2feedd76b2cf455656698b21f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 30 Sep 2017 16:55:40 -0700 Subject: [PATCH 136/342] Unify Map in OpDescBind --- paddle/framework/op_desc.cc | 27 ++++++++++++++++++++++++++- paddle/framework/op_desc.h | 37 ++++++------------------------------- paddle/platform/enforce.h | 4 ++-- 3 files changed, 34 insertions(+), 34 deletions(-) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 0c12c55dc0..33a064890c 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -112,6 +112,30 @@ const std::unordered_map &OpDescBind::GetAttrMap() return attrs_; } +struct SetAttrDescVisitor : public boost::static_visitor { + explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} + mutable OpDesc::Attr *attr_; + void operator()(int v) const { attr_->set_i(v); } + void operator()(float v) const { attr_->set_f(v); } + void operator()(const std::string &v) const { attr_->set_s(v); } + void operator()(bool b) const { attr_->set_b(b); } + + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_ints()); + } + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_floats()); + } + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_strings()); + } + void operator()(const std::vector &v) const { + VectorToRepeated(v, attr_->mutable_bools()); + } + void operator()(BlockDesc *desc) const { attr_->set_block_idx(desc->idx()); } + void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } +}; + void OpDescBind::Sync() { if (need_update_) { this->op_desc_.mutable_inputs()->Clear(); @@ -134,7 +158,8 @@ void OpDescBind::Sync() { attr_desc->set_name(attr.first); attr_desc->set_type( static_cast(attr.second.which() - 1)); - boost::apply_visitor(SetAttrDescVisitor(attr_desc), attr.second); + SetAttrDescVisitor visitor(attr_desc); + boost::apply_visitor(visitor, attr.second); } need_update_ = false; diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 0cf7d13971..e03b4d067f 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "paddle/framework/attribute.h" +#include "paddle/framework/op_info.h" #include "paddle/framework/var_desc.h" namespace paddle { @@ -61,48 +62,22 @@ class OpDescBind { void SetBlockAttr(const std::string &name, BlockDescBind &block); // Only be used in C++ - void SetAttrMap(const std::unordered_map &attr_map); + void SetAttrMap(const AttributeMap &attr_map); Attribute GetAttr(const std::string &name) const; int GetBlockAttr(const std::string &name) const; // Only be used in C++ - const std::unordered_map &GetAttrMap() const; + const AttributeMap &GetAttrMap() const; private: - struct SetAttrDescVisitor : public boost::static_visitor { - explicit SetAttrDescVisitor(OpDesc::Attr *attr) : attr_(attr) {} - mutable OpDesc::Attr *attr_; - void operator()(int v) const { attr_->set_i(v); } - void operator()(float v) const { attr_->set_f(v); } - void operator()(const std::string &v) const { attr_->set_s(v); } - void operator()(bool b) const { attr_->set_b(b); } - - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_ints()); - } - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_floats()); - } - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_strings()); - } - void operator()(const std::vector &v) const { - VectorToRepeated(v, attr_->mutable_bools()); - } - void operator()(BlockDesc *desc) const { - attr_->set_block_idx(desc->idx()); - } - void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } - }; - void Sync(); OpDesc op_desc_; - std::unordered_map> inputs_; - std::unordered_map> outputs_; - std::unordered_map attrs_; + VariableNameMap inputs_; + VariableNameMap outputs_; + AttributeMap attrs_; // need_update_ indicate there some local changes not be synchronized. If // local changes should be synchronized, need_update_ should be set to true. diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index b523ef03c0..52bd23039b 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -185,7 +185,7 @@ inline void throw_on_error(T e) { std::make_exception_ptr( \ std::runtime_error(paddle::string::Sprintf(__VA_ARGS__))), \ __FILE__, __LINE__); \ - } while (0) + } while (false) #define PADDLE_ENFORCE(...) \ do { \ @@ -195,7 +195,7 @@ inline void throw_on_error(T e) { throw ::paddle::platform::EnforceNotMet(std::current_exception(), \ __FILE__, __LINE__); \ } \ - } while (0) + } while (false) /* * Some enforce helpers here, usage: From c7ae0aac6660cc6cad2e7977ae573359433d484c Mon Sep 17 00:00:00 2001 From: kexinzhao <19hskevin87@gmail.com> Date: Sun, 1 Oct 2017 09:36:02 -0700 Subject: [PATCH 137/342] Fix typo in new_op_cn.md --- doc/howto/dev/new_op_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 9d3d02ffc3..c823d7e9fc 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -206,7 +206,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, - `REGISTER_OP` : 注册`ops::MulOp`类,类型名为`mul`,该类的`ProtoMaker`为`ops::MulOpMaker`,注册`ops::MulOpGrad`,类型名为`mul_grad`。 - `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op。 - - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulKernel`类。 + - `REGISTER_OP_CPU_KERNEL` :注册`ops::MulKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::MulGradKernel`类。 - 在 `.cu`文件中注册GPU Kernel。 From 83764d491cc8a835a315f29a04fe1addfd1a05ae Mon Sep 17 00:00:00 2001 From: kexinzhao <19hskevin87@gmail.com> Date: Sun, 1 Oct 2017 09:40:49 -0700 Subject: [PATCH 138/342] Update new_op_en.md --- doc/howto/dev/new_op_en.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/dev/new_op_en.md b/doc/howto/dev/new_op_en.md index 57ff7caad1..1e88e1f5b4 100644 --- a/doc/howto/dev/new_op_en.md +++ b/doc/howto/dev/new_op_en.md @@ -205,7 +205,7 @@ The definition of its corresponding backward operator, if applicable, is similar - `REGISTER_OP` registers the `ops::MulOp` class, type named `mul`, its type `ProtoMaker` is `ops::MulOpMaker`, registering `ops::MulOpGrad` as `mul_grad`. - `REGISTER_OP_WITHOUT_GRADIENT` registers an operator without gradient. - - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specialized template types `paddle::platform::CPUPlace` and `float`, which also registers `ops::MulKernel`. + - `REGISTER_OP_CPU_KERNEL` registers `ops::MulKernel` class and specialized template types `paddle::platform::CPUPlace` and `float`, which also registers `ops::MulGradKernel`. - Registering GPU Kernel in `.cu` files From 18799476c20743313eb8361efc6cd21886d95862 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 11:39:10 -0700 Subject: [PATCH 139/342] Use `type_defs.h` to resolve cyclic dependencies --- paddle/framework/attribute.h | 10 +--------- paddle/framework/op_desc.h | 2 +- paddle/framework/op_info.h | 7 +------ paddle/framework/type_defs.h | 38 ++++++++++++++++++++++++++++++++++++ 4 files changed, 41 insertions(+), 16 deletions(-) create mode 100644 paddle/framework/type_defs.h diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h index c7559cefb6..d13530e340 100644 --- a/paddle/framework/attribute.h +++ b/paddle/framework/attribute.h @@ -21,20 +21,12 @@ limitations under the License. */ #include #include "paddle/framework/framework.pb.h" +#include "paddle/framework/type_defs.h" #include "paddle/platform/enforce.h" -#include "paddle/platform/variant.h" namespace paddle { namespace framework { -// The order should be as same as framework.proto -typedef boost::variant, - std::vector, std::vector, bool, - std::vector, BlockDesc*> - Attribute; - -typedef std::unordered_map AttributeMap; - ProgramDesc& GetProgramDesc(); template diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index e03b4d067f..0af4169715 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "paddle/framework/attribute.h" -#include "paddle/framework/op_info.h" +#include "paddle/framework/type_defs.h" #include "paddle/framework/var_desc.h" namespace paddle { diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 6d1ee4dece..470336d367 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -19,15 +19,10 @@ #include #include "paddle/framework/attribute.h" #include "paddle/framework/op_desc.h" +#include "paddle/framework/type_defs.h" namespace paddle { namespace framework { -class OperatorBase; -using VariableNameMap = std::map>; - -using OpCreator = std::function; class GradOpDescMakerBase { public: diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h new file mode 100644 index 0000000000..dec5066f1e --- /dev/null +++ b/paddle/framework/type_defs.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/platform/variant.h" + +namespace paddle { +namespace framework { +class OperatorBase; +using VariableNameMap = std::map>; + +// The order should be as same as framework.proto +using Attribute = + boost::variant, + std::vector, std::vector, bool, + std::vector, BlockDesc*>; + +using AttributeMap = std::unordered_map; + +using OpCreator = std::function; + +} // namespace framework +} // namespace paddle From 31bdb3f3cc98a38a3ec4951f26dd06260cdc4695 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 14:03:38 -0700 Subject: [PATCH 140/342] tmp --- paddle/framework/operator.h | 98 +++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 73e53a4176..f807909650 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -317,6 +317,104 @@ class ExecutionContext : public InferShapeContext { const platform::DeviceContext& device_context_; }; +class CompileTimeInferShapeContext : public InferShapeContextBase { + public: + CompileTimeInferShapeContext(const OperatorBase& op, const Scope& scope) + : op_(op), scope_(scope) {} + + bool HasInput(const std::string& name) const { + auto ipt = op_.Input(name); + auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); + return var != nullptr; + } + + bool HasOutput(const std::string& name) const { + auto ipt = op_.Output(name); + auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); + return var != nullptr; + } + + bool HasInputs(const std::string& name) const { + auto inputs = op_.Inputs(name); + if (inputs.size() == 0UL) { + return false; + } + for (auto& input : inputs) { + if (scope_.FindVar(input) == nullptr) { + return false; + } + } + return true; + } + + bool HasOutputs(const std::string& name) const { + auto outputs = op_.Outputs(name); + if (outputs.size() == 0UL) { + return false; + } + for (auto& output : outputs) { + if (scope_.FindVar(output) == nullptr) { + return false; + } + } + return true; + } + + DDim GetInputDim(const std::string& name) const { + return GetDim(op_.Input(name)); + } + + void SetInputDim(const std::string& name, const DDim& dim) { + SetDim(op_.Input(name), dim); + } + + DDim GetOutputDim(const std::string& name) const { + return GetDim(op_.Output(name)); + } + + void SetOutputDim(const std::string& name, const DDim& dim) { + SetDim(op_.Output(name), dim); + } + + AttrReader Attrs() const { return AttrReader(op_.Attrs()); } + + const std::vector& Inputs(const std::string& name) const { + return op_.Inputs(name); + } + + const std::vector& Outputs(const std::string& name) const { + return op_.Outputs(name); + } + + private: + template + Tensor* GetTensor(const std::string& name) const { + Tensor* t = nullptr; + auto* var = scope_.FindVar(name); + if (!var->IsType() && !var->IsType()) { + if (Allocate) { + t = var->GetMutable(); + } else { + PADDLE_THROW("Variable(%s) should be tensor", name); + } + } else { + t = GetTensorFromVar(scope_.FindVar(name)); + } + return t; + } + + DDim GetDim(const std::string& name) const { + return GetTensor(name)->dims(); + } + + void SetDim(const std::string& name, const DDim& dim) { + GetTensor(name)->Resize(dim); + } + + const OperatorBase& op_; + const Scope& scope_; +}; + class RuntimeInferShapeContext : public InferShapeContextBase { public: RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope) From 1ac654a69f3713cf12d47aff0b853b63de354803 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 2 Oct 2017 13:48:35 -0700 Subject: [PATCH 141/342] Implementing the Adagrad optimizer step operator --- paddle/operators/adagrad_op.cc | 85 +++++++++++++++++++ paddle/operators/adagrad_op.cu | 20 +++++ paddle/operators/adagrad_op.h | 53 ++++++++++++ .../v2/framework/tests/test_adagrad_op.py | 32 +++++++ 4 files changed, 190 insertions(+) create mode 100644 paddle/operators/adagrad_op.cc create mode 100644 paddle/operators/adagrad_op.cu create mode 100644 paddle/operators/adagrad_op.h create mode 100644 python/paddle/v2/framework/tests/test_adagrad_op.py diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc new file mode 100644 index 0000000000..03e22cc600 --- /dev/null +++ b/paddle/operators/adagrad_op.cc @@ -0,0 +1,85 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/adagrad_op.h" + +namespace paddle { +namespace operators { + +class AdagradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("param"), + "Input(param) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("grad"), + "Input(grad) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("moment"), + "Input(moment) of AdagradOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("param_out"), + "Output(param_out) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("moment_out"), + "Output(moment_out) of AdagradOp should not be null."); + + auto param_dim = ctx->GetInputDim("param"); + PADDLE_ENFORCE_EQ( + param_dim, ctx->GetInputDim("grad"), + "Param and grad input of AdagradOp should have the same dimension."); + PADDLE_ENFORCE_EQ( + param_dim, ctx->GetInputDim("moment"), + "Param and moment input of AdagradOp should have the same dimension."); + + ctx->SetOutputDim("param_out", param_dim); + ctx->SetOutputDim("moment_out", param_dim); + } +}; + +class AdagradOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AdagradOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("param", "Input parameter"); + AddInput("grad", "Input gradient"); + AddInput("moment", "Second moment"); + + AddOutput("param_out", "Output parameter"); + AddOutput("moment_out", "Output second moment"); + + AddAttr("learning_rate", "Learning rate"); + AddAttr("epsilon", "Constant for numerical stability"); + AddComment(R"DOC( + +Adaptive Gradient Algorithm (Adagrad). + +moment_out = moment + grad * grad +param_out = param - learning_rate * grad / (sqrt(moment_out) + epsilon) + +The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) +does not have the epsilon attribute. It is added here for numerical stability +by avoiding division by zero. + +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(adagrad, ops::AdagradOp, ops::AdagradOpMaker); +REGISTER_OP_CPU_KERNEL(adagrad, + ops::AdagradOpKernel); diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu new file mode 100644 index 0000000000..be16973c54 --- /dev/null +++ b/paddle/operators/adagrad_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/adagrad_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(adagrad, + ops::AdagradOpKernel); \ No newline at end of file diff --git a/paddle/operators/adagrad_op.h b/paddle/operators/adagrad_op.h new file mode 100644 index 0000000000..ca1836c3fa --- /dev/null +++ b/paddle/operators/adagrad_op.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + +template +class AdagradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto param_out = ctx.Output("param_out"); + auto moment_out = ctx.Output("moment_out"); + + param_out->mutable_data(ctx.GetPlace()); + moment_out->mutable_data(ctx.GetPlace()); + + float lr = ctx.Attr("learning_rate"); + float epsilon = ctx.Attr("epsilon"); + + auto p = EigenVector::Flatten(*ctx.Input("param")); + auto g = EigenVector::Flatten(*ctx.Input("grad")); + auto m = EigenVector::Flatten(*ctx.Input("moment")); + auto p_out = EigenVector::Flatten(*param_out); + auto m_out = EigenVector::Flatten(*moment_out); + auto place = ctx.GetEigenDevice(); + + m_out.device(place) = m + g * g; + p_out.device(place) = p - lr * g / (m_out.sqrt() + epsilon); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_adagrad_op.py b/python/paddle/v2/framework/tests/test_adagrad_op.py new file mode 100644 index 0000000000..b3f8b812e1 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_adagrad_op.py @@ -0,0 +1,32 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestAdagradOp(OpTest): + def setUp(self): + self.op_type = "adagrad" + + param = np.random.random((123, 321)).astype("float32") + grad = np.random.random((123, 321)).astype("float32") + moment = np.zeros((123, 321)).astype("float32") + + learning_rate = 0.01 + epsilon = 1e-6 + + self.inputs = {'param': param, 'grad': grad, 'moment': moment} + + self.attrs = {'learning_rate': learning_rate, 'epsilon': epsilon} + + moment_out = moment + grad * grad + param_out = param - learning_rate * grad / (np.sqrt(moment_out) + + epsilon) + + self.outputs = {'param_out': param_out, 'moment_out': moment_out} + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() From 37bbaabdf1abfe14e19cf7dcb7a842a10b36d1c8 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 2 Oct 2017 14:17:17 -0700 Subject: [PATCH 142/342] "fix conflict" --- paddle/framework/op_info.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 7940922b09..9672e540c8 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -20,7 +20,7 @@ #include "paddle/framework/attribute.h" #include "paddle/framework/op_desc.h" #include "paddle/framework/type_defs.h" - +#include "paddle/platform/macros.h" namespace paddle { namespace framework { From 6b051b651ae72305d9877fd3cd094028c21bdddb Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 14:24:03 -0700 Subject: [PATCH 143/342] optimize code --- paddle/operators/recurrent_op.cc | 38 ++++++++++++---------- paddle/operators/recurrent_op.h | 4 +-- paddle/operators/rnn/recurrent_op_utils.cc | 8 ++--- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 016e2043fd..bcd6a3410a 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -32,24 +32,25 @@ void RecurrentAlgorithm::Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const { auto* input0 = scope.FindVar(arg_->inlinks[0]); PADDLE_ENFORCE_NOT_NULL(input0); - seq_len_ = input0->GetMutable()->dims()[0]; - PADDLE_ENFORCE_GT(seq_len_, 0); + size_t seq_len = input0->GetMutable()->dims()[0]; + PADDLE_ENFORCE_GT(seq_len, 0); - CreateScopes(scope); + CreateScopes(scope, seq_len); auto& step_scopes = GetStepScopes(scope); - rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); + rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len); InitMemories(step_scopes[0]); - for (size_t i = 0; i < seq_len_; i++) { - if (i > 0) { - rnn::LinkMemories(step_scopes, arg_->memories, i, -1); + for (size_t step_id = 0; step_id < seq_len; step_id++) { + if (step_id > 0) { + rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1); } - (*stepnet_)->Run(*step_scopes[i], dev_ctx); + (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } - rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); + rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len); } -void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { +void RecurrentAlgorithm::CreateScopes(const Scope& scope, + size_t seq_len) const { // TODO(superjom) Only two scopes are needed for inference, this case will be // supported later. auto step_scopes_var = scope.FindVar(arg_->step_scopes); @@ -60,8 +61,8 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { PADDLE_ENFORCE_NOT_NULL(stepnet_); PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "stepnet_ op has no outputs"); - if (seq_len_ > step_scopes->size()) { - for (size_t i = step_scopes->size(); i < seq_len_; ++i) { + if (seq_len > step_scopes->size()) { + for (size_t i = step_scopes->size(); i < seq_len; ++i) { auto& step_scope = scope.NewScope(); // create step net's temp inputs @@ -144,17 +145,18 @@ class RecurrentAlgorithmProtoAndCheckerMaker void RecurrentGradientAlgorithm::Run( const Scope& scope, const platform::DeviceContext& dev_ctx) const { - seq_len_ = - scope.FindVar(arg_->inlinks[0])->GetMutable()->dims()[0]; + auto* input0 = scope.FindVar(arg_->inlinks[0]); + PADDLE_ENFORCE_NOT_NULL(input0); + size_t seq_len = input0->GetMutable()->dims()[0]; auto step_scopes = GetStepScopes(scope); - rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_); - for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { - if (static_cast(step_id) != seq_len_ - 1) { + rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len); + for (int step_id = seq_len - 1; step_id >= 0; --step_id) { + if (step_id != seq_len - 1) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1); } (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } - rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_); + rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len); LinkBootMemoryGradients(step_scopes[0]); } diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index 752025e42c..253d7e3284 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -48,7 +48,7 @@ class RecurrentAlgorithm { * NOTE the scopes are reused in both the forward and backward, so just * create once and expand its size if more steps need. */ - void CreateScopes(const framework::Scope& scope) const; + void CreateScopes(const framework::Scope& scope, size_t seq_len) const; const std::vector& GetStepScopes( const framework::Scope& scope) const { @@ -61,7 +61,6 @@ class RecurrentAlgorithm { private: std::unique_ptr* stepnet_; rnn::Argument* arg_; - mutable size_t seq_len_; }; class RecurrentGradientAlgorithm { @@ -97,7 +96,6 @@ class RecurrentGradientAlgorithm { private: rnn::Argument* arg_; - mutable size_t seq_len_; std::unique_ptr* stepnet_; }; diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index a02994f99d..a37d21d480 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -53,12 +53,12 @@ void ConcatOutputs(const std::vector& step_scopes, const std::vector& outlinks, const size_t seq_len) { for (size_t i = 0; i < outlinks.size(); i++) { - auto output_var = step_scopes[0]->parent().FindVar(outlinks[i]); + auto* output_var = step_scopes[0]->parent().FindVar(outlinks[i]); PADDLE_ENFORCE_NOT_NULL(output_var, "output link [%s] is not in scope.", outlinks[i]); LoDTensor* output = output_var->GetMutable(); - auto step_scope_var = step_scopes[0]->FindVar(outlinks[i]); + auto* step_scope_var = step_scopes[0]->FindVar(outlinks[i]); PADDLE_ENFORCE_NOT_NULL(step_scope_var, "%s not in scope", outlinks[i]); f::DDim step_dims = step_scope_var->template GetMutable()->dims(); @@ -89,8 +89,8 @@ void LinkMemories(const std::vector& scopes, step_id + offset, scopes.size(), "offset [%d] is out of range, it must be less than (%d - %d)", offset, scopes.size(), step_id); - auto scope = scopes[step_id]; - auto linked_scope = scopes[step_id + offset]; + auto* scope = scopes[step_id]; + auto* linked_scope = scopes[step_id + offset]; for (auto& attr : memories) { auto mem = scope->FindVar(attr.pre_var)->GetMutable(); auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); From cde542e6524d8fd084983e20e0051a3caf22f6b1 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 14:51:15 -0700 Subject: [PATCH 144/342] optimize auto --- paddle/operators/recurrent_op.cc | 6 +++--- paddle/operators/rnn/recurrent_op_utils.cc | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index bcd6a3410a..04c4c24951 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -53,9 +53,9 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope, size_t seq_len) const { // TODO(superjom) Only two scopes are needed for inference, this case will be // supported later. - auto step_scopes_var = scope.FindVar(arg_->step_scopes); + auto* step_scopes_var = scope.FindVar(arg_->step_scopes); PADDLE_ENFORCE(step_scopes_var != nullptr, ""); - auto step_scopes = step_scopes_var->GetMutable>(); + auto* step_scopes = step_scopes_var->GetMutable>(); // Now all variables in scope must be created outside of op. PADDLE_ENFORCE_NOT_NULL(stepnet_); @@ -148,7 +148,7 @@ void RecurrentGradientAlgorithm::Run( auto* input0 = scope.FindVar(arg_->inlinks[0]); PADDLE_ENFORCE_NOT_NULL(input0); size_t seq_len = input0->GetMutable()->dims()[0]; - auto step_scopes = GetStepScopes(scope); + auto& step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len); for (int step_id = seq_len - 1; step_id >= 0; --step_id) { if (step_id != seq_len - 1) { diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index a37d21d480..ef317a71f1 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -92,8 +92,8 @@ void LinkMemories(const std::vector& scopes, auto* scope = scopes[step_id]; auto* linked_scope = scopes[step_id + offset]; for (auto& attr : memories) { - auto mem = scope->FindVar(attr.pre_var)->GetMutable(); - auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); + auto* mem = scope->FindVar(attr.pre_var)->GetMutable(); + auto* linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); mem->Resize(linked_mem->dims()); mem->ShareDataWith(*linked_mem); } @@ -106,11 +106,11 @@ void InitArgument(const ArgumentName& name, Argument* arg, arg->inlinks = op.Inputs(name.inlinks); arg->outlinks = op.Outputs(name.outlinks); - auto boot_memories = + auto& boot_memories = is_grad ? op.Outputs(name.boot_memories) : op.Inputs(name.boot_memories); // attributes - auto memories = op.Attr>(name.memories); - auto pre_memories = op.Attr>(name.pre_memories); + auto& memories = op.Attr>(name.memories); + auto& pre_memories = op.Attr>(name.pre_memories); PADDLE_ENFORCE(memories.size() == boot_memories.size(), "the size of memories, boot_memories don't match:%d,%d", From 05cbd4daacb699ec94d77f5b0229aaaa16c35af1 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 2 Oct 2017 15:00:41 -0700 Subject: [PATCH 145/342] fix format --- paddle/operators/adagrad_op.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu index be16973c54..a5b7951121 100644 --- a/paddle/operators/adagrad_op.cu +++ b/paddle/operators/adagrad_op.cu @@ -17,4 +17,4 @@ namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(adagrad, - ops::AdagradOpKernel); \ No newline at end of file + ops::AdagradOpKernel); From d2bd6f45cb82531eff7ce7e64360d75e351c643d Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 2 Oct 2017 15:07:02 -0700 Subject: [PATCH 146/342] "replace add with sum" --- paddle/framework/backward.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index fbacfeed94..35759f8e78 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -159,7 +159,7 @@ static std::unique_ptr BackwardRecursive( insert_position.push_back( {dup_op.back(), OpRegistry::CreateOp( - "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, + "add", {{"X", {insert_add_x}}, {"X", {insert_add_y}}}, {{"Out", {insert_add_out}}}, {})}); } } From 2bceab0fb446aa1c5370a613bf67041d4534f187 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Sun, 1 Oct 2017 08:29:09 +0000 Subject: [PATCH 147/342] add generic add operator --- paddle/framework/backward.cc | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0ec18de5b8..c625c0caf7 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -141,9 +141,35 @@ static std::unique_ptr BackwardRecursive( net->ops_[op_offset]->Rename(name, dup_outputs.back()); } // collect all the offset to append `add` op for each alias - insert_position.push_back( - {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}}, - {{"Out", {name}}}, {})}); + // + // one variable is shared between multiple operators. + // insert add operator one by one, then add it to output + if (dup_outputs.size() == 2) { + insert_position.push_back( + {dup_op.back(), + OpRegistry::CreateOp( + "add", {{"X", {dup_outputs[0]}}, {"Y", {dup_outputs[1]}}}, + {{"Out", {name}}}, {})}); + } else { + for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; + ++output_idx) { + auto insert_add_x = dup_outputs[output_idx]; + auto insert_add_y = dup_outputs[output_idx]; + auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); + // first add op inserted + if (output_idx == dup_outputs.size() - 1) { + insert_add_out = name; + } + if (output_idx != 0) { + insert_add_y = name + "@SHARED@" + std::to_string(output_idx); + } + insert_position.push_back( + {dup_op.back(), + OpRegistry::CreateOp( + "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, + {{"Out", {insert_add_out}}}, {})}); + } + } } // make sure the inserted `add` ops follow the BFS order. From 800085fe2d37e5ad3ef706701ffb008a2c668ee1 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Sun, 1 Oct 2017 08:36:04 +0000 Subject: [PATCH 148/342] fix typo --- paddle/framework/backward.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c625c0caf7..b850939040 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -161,7 +161,7 @@ static std::unique_ptr BackwardRecursive( insert_add_out = name; } if (output_idx != 0) { - insert_add_y = name + "@SHARED@" + std::to_string(output_idx); + insert_add_y = name + "@SHARED@" + std::to_string(output_idx-1); } insert_position.push_back( {dup_op.back(), From f6496272cfcbb4d2ad9eb8a272a065007059a004 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Sun, 1 Oct 2017 09:18:21 -0700 Subject: [PATCH 149/342] relauch ci --- paddle/framework/backward.cc | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index b850939040..fbacfeed94 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -144,31 +144,23 @@ static std::unique_ptr BackwardRecursive( // // one variable is shared between multiple operators. // insert add operator one by one, then add it to output - if (dup_outputs.size() == 2) { + for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; + ++output_idx) { + auto insert_add_x = dup_outputs[output_idx]; + auto insert_add_y = dup_outputs[output_idx]; + auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); + // first add op inserted + if (output_idx == dup_outputs.size() - 2) { + insert_add_out = name; + } + if (output_idx != 0) { + insert_add_y = name + "@SHARED@" + std::to_string(output_idx - 1); + } insert_position.push_back( {dup_op.back(), OpRegistry::CreateOp( - "add", {{"X", {dup_outputs[0]}}, {"Y", {dup_outputs[1]}}}, - {{"Out", {name}}}, {})}); - } else { - for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; - ++output_idx) { - auto insert_add_x = dup_outputs[output_idx]; - auto insert_add_y = dup_outputs[output_idx]; - auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); - // first add op inserted - if (output_idx == dup_outputs.size() - 1) { - insert_add_out = name; - } - if (output_idx != 0) { - insert_add_y = name + "@SHARED@" + std::to_string(output_idx-1); - } - insert_position.push_back( - {dup_op.back(), - OpRegistry::CreateOp( - "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, - {{"Out", {insert_add_out}}}, {})}); - } + "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, + {{"Out", {insert_add_out}}}, {})}); } } From a1b935e356573266dfff08d5fd279815492c8843 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 2 Oct 2017 15:07:02 -0700 Subject: [PATCH 150/342] "replace add with sum" --- paddle/framework/backward.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index fbacfeed94..35759f8e78 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -159,7 +159,7 @@ static std::unique_ptr BackwardRecursive( insert_position.push_back( {dup_op.back(), OpRegistry::CreateOp( - "add", {{"X", {insert_add_x}}, {"Y", {insert_add_y}}}, + "add", {{"X", {insert_add_x}}, {"X", {insert_add_y}}}, {{"Out", {insert_add_out}}}, {})}); } } From 578a357b616ee188d692764843ae834a449e81c2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 15:12:20 -0700 Subject: [PATCH 151/342] Make compile pass --- paddle/framework/CMakeLists.txt | 4 +--- paddle/framework/backward.cc | 33 +++++++++++++++++++++++++++++---- paddle/framework/op_desc.h | 14 ++++++++------ paddle/framework/op_registry.cc | 6 ++++++ paddle/framework/op_registry.h | 8 +++++--- 5 files changed, 49 insertions(+), 16 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 9140854a96..eb316b4c8c 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,10 +26,8 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) -cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) -cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) +cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) -cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) py_proto_compile(framework_py_proto SRCS framework.proto) # Generate an empty __init__.py to make framework_py_proto as a valid python module. diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index ab2567a25c..eb34bc3693 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" +#include "paddle/operators/net_op.h" #include #include @@ -24,6 +25,32 @@ namespace paddle { namespace framework { +static inline std::unique_ptr CreateGradOp( + const OperatorBase& op) { + OpDescBind op_desc; + op_desc.SetInputMap(op.Inputs()); + op_desc.SetOutputMap(op.Outputs()); + op_desc.SetType(op.Type()); + op_desc.SetAttrMap(op.Attrs()); + auto& info = OpInfoMap::Instance().Get(op.Type()); + auto grad_descs = info.grad_op_maker_(op_desc); + std::vector> grad_ops; + grad_ops.reserve(grad_descs.size()); + std::transform( + grad_descs.begin(), grad_descs.end(), std::back_inserter(grad_ops), + [](OpDescBind& grad_desc) { return OpRegistry::CreateOp(&grad_desc); }); + PADDLE_ENFORCE_GT(grad_ops.size(), 0); + if (grad_ops.size() == 1) { + return std::move(grad_ops[0]); + } else { + auto net_op = new operators::NetOp(); + for (auto& grad_op : grad_ops) { + net_op->AppendOp(std::move(grad_op)); + } + return std::unique_ptr(net_op); + } +} + template static void ForEachVarName(const Map& names, T callback) { for (auto& name : names) { @@ -154,10 +181,8 @@ static std::unique_ptr BackwardRecursive( net->InsertOp(pos.first + 1, std::move(pos.second)); } } else { - OpDescBind fwd_desc; - fwd_desc.SetInput(forwardOp.Inputs()); - - std::unique_ptr grad_op(OpRegistry::CreateGradOp(forwardOp)); + std::unique_ptr grad_op(CreateGradOp(forwardOp)); + PADDLE_ENFORCE(grad_op != nullptr); ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( const std::string& grad_input) { diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index ec92d08768..72d7a0379b 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -76,18 +76,22 @@ class OpDescBind { return MapKeys(outputs_); } - void SetInput( - const std::unordered_map> &input) { + void SetInputMap(const VariableNameMap &input) { this->inputs_ = input; this->need_update_ = true; } - void SetOutput( - const std::unordered_map> &output) { + void SetOutputMap(const VariableNameMap &output) { this->outputs_ = output; this->need_update_ = true; } + void Sync(); + + const VariableNameMap &Inputs() const { return inputs_; } + + const VariableNameMap &Outputs() const { return outputs_; } + private: template static std::vector MapKeys(const MapType &map) { @@ -99,8 +103,6 @@ class OpDescBind { return ret_val; } - void Sync(); - OpDesc op_desc_; VariableNameMap inputs_; VariableNameMap outputs_; diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 0a2b6fd582..35f280981b 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -52,5 +52,11 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { return CreateOp(op_desc.type(), inputs, outputs, attrs); } +std::unique_ptr OpRegistry::CreateOp(OpDescBind* op_desc) { + op_desc->Sync(); + return CreateOp(op_desc->Type(), op_desc->Inputs(), op_desc->Outputs(), + op_desc->GetAttrMap()); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 0f377f34cb..d14f70008b 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -23,8 +23,8 @@ limitations under the License. */ #include "paddle/framework/attribute.h" #include "paddle/framework/details/op_registry.h" #include "paddle/framework/framework.pb.h" -#include "paddle/framework/grad_op_builder.h" #include "paddle/framework/grad_op_desc_maker.h" +#include "paddle/framework/op_desc.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" @@ -46,15 +46,15 @@ class Registrar { template struct OperatorRegistrar : public Registrar { explicit OperatorRegistrar(const char* op_type) : op_type(op_type) { + std::cerr << "Reg operator " << op_type << std::endl; PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), "'%s' is registered more than once.", op_type); static_assert(sizeof...(ARGS) != 0, "OperatorRegistrar should be invoked at least by OpClass"); details::OperatorRegistrarRecursive<0, false, ARGS...>(op_type, &info); + OpInfoMap::Instance().Insert(op_type, info); } - ~OperatorRegistrar() { OpInfoMap::Instance().Insert(op_type, info); } - const char* op_type; OpInfo info; @@ -79,6 +79,8 @@ class OpRegistry { AttributeMap attrs); static std::unique_ptr CreateOp(const OpDesc& op_desc); + + static std::unique_ptr CreateOp(OpDescBind* op_desc); }; template From ff8766e910a4d9ba1e208458de2719708d6663d3 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 15:50:00 -0700 Subject: [PATCH 152/342] Stash --- paddle/framework/backward_test.cc | 2 ++ paddle/framework/details/op_registry.h | 1 + paddle/framework/op_desc.h | 2 +- paddle/framework/op_info.h | 8 -------- paddle/framework/op_proto_maker.h | 5 ----- paddle/framework/op_registry.cc | 4 +++- 6 files changed, 7 insertions(+), 15 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 28fc6f9ced..85f1dd91ed 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -378,6 +378,8 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { + 1UL /* external output number*/ + 1UL /* number of gradient of external output*/ + 2U /* internal variable number*/); + std::cerr << grad_fc.DebugString() << std::endl; + EXPECT_EQ(grad_fc.Outputs(all).size(), 2UL /* input number of mul*/ + 2UL /* input number of rowwise_add diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h index daa474e8c5..c805dae7d7 100644 --- a/paddle/framework/details/op_registry.h +++ b/paddle/framework/details/op_registry.h @@ -85,6 +85,7 @@ struct OpInfoFiller { info->proto_ = new OpProto; info->checker_ = new OpAttrChecker(); auto maker = T(info->proto_, info->checker_); + std::cerr << "Assign Maker " << op_type << std::endl; maker.Validate(); info->proto_->set_type(op_type); PADDLE_ENFORCE( diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 72d7a0379b..4c1ada05f0 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -98,7 +98,7 @@ class OpDescBind { std::vector ret_val; ret_val.reserve(map.size()); std::transform( - map.begin(), map.end(), ret_val.begin(), + map.begin(), map.end(), std::back_inserter(ret_val), [](const typename MapType::value_type &pair) { return pair.first; }); return ret_val; } diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 683476dfd4..ab13dad962 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -42,19 +42,11 @@ struct OpInfo { return *proto_; } - const OpAttrChecker& Checker() const { - PADDLE_ENFORCE_NOT_NULL(checker_, - "Operator Checker has not been registered"); - return *checker_; - } - const OpCreator& Creator() const { PADDLE_ENFORCE_NOT_NULL(creator_, "Operator Creator has not been registered"); return creator_; } - - bool HasGradientOp() const { return !grad_op_type_.empty(); } }; class OpInfoMap { diff --git a/paddle/framework/op_proto_maker.h b/paddle/framework/op_proto_maker.h index 4d55a37db9..a134befd90 100644 --- a/paddle/framework/op_proto_maker.h +++ b/paddle/framework/op_proto_maker.h @@ -44,11 +44,6 @@ class OpProtoAndCheckerMaker { var_->set_intermediate(true); return *this; } - - VariableBuilder& NotInGradient() { - var_->set_not_in_gradient(true); - return *this; - } }; VariableBuilder AddInput(const std::string& name, const std::string& comment); diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 35f280981b..ac6aa8d28e 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -23,7 +23,9 @@ std::unique_ptr OpRegistry::CreateOp( const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, AttributeMap attrs) { auto& info = OpInfoMap::Instance().Get(type); - info.Checker().Check(attrs); + if (info.checker_ != nullptr) { + info.checker_->Check(attrs); + } auto op = info.Creator()(type, inputs, outputs, attrs); return std::unique_ptr(op); } From 84b8baf1967e327712269e7632235438d09759d9 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 2 Oct 2017 15:50:24 -0700 Subject: [PATCH 153/342] gather scatter with cuda streams --- paddle/operators/gather.cu.h | 13 ++++++++----- paddle/operators/gather_op.cu | 5 ++--- paddle/operators/scatter.cu.h | 10 ++++++---- paddle/operators/scatter_op.cu | 4 ++-- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/paddle/operators/gather.cu.h b/paddle/operators/gather.cu.h index b400c10440..2ae11376a2 100644 --- a/paddle/operators/gather.cu.h +++ b/paddle/operators/gather.cu.h @@ -46,9 +46,9 @@ __global__ void GatherCUDAKernel(const T* params, const int* indices, T* output, * return: output tensor */ template -void GPUGather(const Place& place, const Tensor* src, const Tensor* index, - Tensor* output) { - PADDLE_ENFORCE(platform::is_gpu_place(place)); +void GPUGather(const platform::DeviceContext& ctx, const Tensor* src, + const Tensor* index, Tensor* output) { + // PADDLE_ENFORCE(platform::is_gpu_place(place)); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); int index_size = index->dims()[0]; @@ -68,8 +68,11 @@ void GPUGather(const Place& place, const Tensor* src, const Tensor* index, int block = 512; int n = slice_size * index_size; int grid = (n + block - 1) / block; - GatherCUDAKernel<<>>(p_src, p_index, p_output, index_size, - slice_size); + + GatherCUDAKernel<<< + grid, block, 0, + reinterpret_cast(ctx).stream()>>>( + p_src, p_index, p_output, index_size, slice_size); } } // namespace operators diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu index 06004614b2..9937be5915 100644 --- a/paddle/operators/gather_op.cu +++ b/paddle/operators/gather_op.cu @@ -32,7 +32,7 @@ class GatherOpCUDAKernel : public framework::OpKernel { output->mutable_data(ctx.GetPlace()); - GPUGather(ctx.GetPlace(), x, index, output); + GPUGather(ctx.device_context(), x, index, output); } }; @@ -42,7 +42,6 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), "This kernel only runs on GPU device."); - LOG(INFO) << "Gather grad here"; auto *Index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); auto *dO = ctx.Input(framework::GradVarName("Out")); @@ -53,7 +52,7 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { auto place = ctx.GetEigenDevice(); dxt.device(place) = dxt.constant(static_cast(0)); - GPUScatterAssign(ctx.GetPlace(), dO, Index, dX); + GPUScatterAssign(ctx.device_context(), dO, Index, dX); } }; diff --git a/paddle/operators/scatter.cu.h b/paddle/operators/scatter.cu.h index add4791a79..f4a3965d94 100644 --- a/paddle/operators/scatter.cu.h +++ b/paddle/operators/scatter.cu.h @@ -45,11 +45,11 @@ __global__ void ScatterCUDAKernel(const T* params, const int* indices, * return: output tensor */ template -void GPUScatterAssign(const platform::Place& place, +void GPUScatterAssign(const platform::DeviceContext& ctx, const paddle::framework::Tensor* src, const paddle::framework::Tensor* index, paddle::framework::Tensor* output) { - PADDLE_ENFORCE(platform::is_gpu_place(place)); + // PADDLE_ENFORCE(platform::is_gpu_place(place)); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); int index_size = index->dims()[0]; @@ -70,8 +70,10 @@ void GPUScatterAssign(const platform::Place& place, int n = slice_size * index_size; int grid = (n + block - 1) / block; - ScatterCUDAKernel<<>>(p_src, p_index, p_output, index_size, - slice_size); + ScatterCUDAKernel<<< + grid, block, 0, + reinterpret_cast(ctx).stream()>>>( + p_src, p_index, p_output, index_size, slice_size); } } // namespace operators diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu index 831eabdae4..6d13a876f9 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/scatter_op.cu @@ -32,7 +32,7 @@ class ScatterOpCUDAKernel : public framework::OpKernel { Out->ShareDataWith(*Ref); - GPUScatterAssign(ctx.GetPlace(), Updates, Index, Out); + GPUScatterAssign(ctx.device_context(), Updates, Index, Out); } }; @@ -51,7 +51,7 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates = dO[Index] - GPUGather(ctx.GetPlace(), dOut, Index, dUpdates); + GPUGather(ctx.device_context(), dOut, Index, dUpdates); } }; From 9ff1fd41b2e8769d233e160975e036f539cda99f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 15:57:18 -0700 Subject: [PATCH 154/342] Fix MacOS compile error The private data `tensor_shared_` is not used. --- paddle/framework/tensor_array.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/paddle/framework/tensor_array.h b/paddle/framework/tensor_array.h index e76f33d2c0..22ae6a966f 100644 --- a/paddle/framework/tensor_array.h +++ b/paddle/framework/tensor_array.h @@ -47,13 +47,6 @@ class TensorArray { // max number of values allowed to store. const size_t MAX_SIZE{100000}; - /* - * Inputs: - * - value_shared: share memory between tensors. - */ - explicit TensorArray(bool values_shared = true) - : values_shared_(values_shared) {} - /* * Read the value at location `index` in the `TensorArray`. */ @@ -111,7 +104,6 @@ class TensorArray { private: mutable std::vector values_; - bool values_shared_; }; // class TensorArray } // namespace framework From adec0d30fe8454f84b6bc61cc8b0385f6483d0c3 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 16:18:26 -0700 Subject: [PATCH 155/342] Simplify SumOp Kernel --- paddle/operators/CMakeLists.txt | 6 +++++- paddle/operators/sum_op.cc | 29 +++++++++++++++-------------- paddle/operators/sum_op.cu | 4 +--- paddle/operators/sum_op.h | 19 ------------------- 4 files changed, 21 insertions(+), 37 deletions(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 43eb4de2c1..0fa1fca2bc 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -103,12 +103,16 @@ set(DEPS_OPS recurrent_op cond_op cross_entropy_op - softmax_with_cross_entropy_op) + softmax_with_cross_entropy_op + sum_op) + + op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS framework_proto tensor net_op) op_library(cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op) op_library(cross_entropy_op DEPS cross_entropy) op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) +op_library(sum_op DEPS net_op) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index c54843faa6..7c422b4770 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -11,6 +11,7 @@ limitations under the License. */ #include "paddle/operators/sum_op.h" #include +#include "paddle/operators/net_op.h" namespace paddle { namespace operators { @@ -57,21 +58,23 @@ or not. But the output only shares the LoD with the first input. } }; -class SumGradOp : public framework::OperatorWithKernel { +class SumGradOp : public NetOp { public: - using framework::OperatorWithKernel::OperatorWithKernel; + SumGradOp(const std::string& type, const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : NetOp(type, inputs, outputs, attrs) { + auto& x_grad_names = Outputs(framework::GradVarName("X")); + auto out_grad_name = this->Input(framework::GradVarName("Out")); - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { - auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out")); - auto x_grad_names = ctx->Outputs(framework::GradVarName("X")); - size_t x_length = x_grad_names.size(); - std::vector x_grad_dims; - x_grad_dims.reserve(x_length); - for (size_t i = 0; i < x_length; ++i) { - x_grad_dims.push_back(out_grad_dims); + framework::AttributeMap grad_attrs; + grad_attrs["scale"] = 1.0f; + for (auto& x_grad_name : x_grad_names) { + AppendOp(framework::OpRegistry::CreateOp( + "scale", {{"X", {out_grad_name}}}, {{"Out", {x_grad_name}}}, + grad_attrs)); } - ctx->SetOutputsDim(framework::GradVarName("X"), x_grad_dims); + CompleteAddOp(false); } }; @@ -81,5 +84,3 @@ class SumGradOp : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OP(sum, ops::SumOp, ops::SumOpMaker, sum_grad, ops::SumGradOp); REGISTER_OP_CPU_KERNEL(sum, ops::SumKernel); -REGISTER_OP_CPU_KERNEL(sum_grad, - ops::SumGradKernel); diff --git a/paddle/operators/sum_op.cu b/paddle/operators/sum_op.cu index a465cf3659..7129e6bf62 100644 --- a/paddle/operators/sum_op.cu +++ b/paddle/operators/sum_op.cu @@ -13,6 +13,4 @@ limitations under the License. */ #include "paddle/operators/sum_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel); -REGISTER_OP_GPU_KERNEL(sum_grad, - ops::SumGradKernel); +REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel); \ No newline at end of file diff --git a/paddle/operators/sum_op.h b/paddle/operators/sum_op.h index 7e8fbb9e41..91e5da8b40 100644 --- a/paddle/operators/sum_op.h +++ b/paddle/operators/sum_op.h @@ -42,24 +42,5 @@ class SumKernel : public framework::OpKernel { } }; -template -class SumGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* input = context.Input(framework::GradVarName("Out")); - auto outs = context.MultiOutput(framework::GradVarName("X")); - for (auto out : outs) { - out->mutable_data(context.GetPlace()); - } - - auto place = context.GetEigenDevice(); - auto in = EigenVector::Flatten(*input); - for (auto out : outs) { - auto result = EigenVector::Flatten(*out); - result.device(place) = in; - } - } -}; - } // namespace operators } // namespace paddle From d550380ed92ed9c762ff0248780a3c28dbf27416 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 16:52:11 -0700 Subject: [PATCH 156/342] add CompileTimeInferShapeContext --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/block_desc.cc | 5 ++ paddle/framework/block_desc.h | 2 + paddle/framework/operator.h | 84 +++++++++++++-------------------- 4 files changed, 41 insertions(+), 52 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 5d394132b7..a2efcdb55c 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -23,7 +23,7 @@ cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) -cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope) +cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 9570aedfdd..670533a3fe 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -34,6 +34,11 @@ VarDescBind *BlockDescBind::Var(const std::string &name) const { return it->second.get(); } +bool BlockDescBind::HasVar(const std::string &name) const { + auto it = vars_.find(name); + return it != vars_.end(); +} + std::vector BlockDescBind::AllVars() const { std::vector res; for (const auto &p : vars_) { diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 1a1135bab4..41cf1dc385 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -45,6 +45,8 @@ class BlockDescBind { VarDescBind *Var(const std::string &name_bytes) const; + bool HasVar(const std::string &var_name) const; + std::vector AllVars() const; BlockDescBind *ParentBlock() const; diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index f807909650..2874237b9c 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -319,100 +319,82 @@ class ExecutionContext : public InferShapeContext { class CompileTimeInferShapeContext : public InferShapeContextBase { public: - CompileTimeInferShapeContext(const OperatorBase& op, const Scope& scope) - : op_(op), scope_(scope) {} + CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block) + : op_(op), block_(block) {} bool HasInput(const std::string& name) const { - auto ipt = op_.Input(name); - auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); - return var != nullptr; + const std::vector& input_names = op_.Input(name); + PADDLE_ENFORCE_EQ(input_names.size(), 1UL, "Inputs(%s) length is not 1", + name); + return block_.HasVar(input_names[0]); } bool HasOutput(const std::string& name) const { - auto ipt = op_.Output(name); - auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); - return var != nullptr; + const std::vector& output_names = op_.Output(name); + PADDLE_ENFORCE_EQ(output_names.size(), 1UL, "Outputs(%s) length is not 1", + name); + return block_.HasVar(output_names[0]); } bool HasInputs(const std::string& name) const { - auto inputs = op_.Inputs(name); - if (inputs.size() == 0UL) { - return false; - } - for (auto& input : inputs) { - if (scope_.FindVar(input) == nullptr) { - return false; - } + const std::vector& input_names = op_.Input(name); + PADDLE_ENFORCE_GT(input_names.size(), 0UL, "Inputs(%s) length is 0", name); + for (auto& input : input_names) { + if (!block_.HasVar(input)) return false; } return true; } bool HasOutputs(const std::string& name) const { - auto outputs = op_.Outputs(name); - if (outputs.size() == 0UL) { - return false; - } - for (auto& output : outputs) { - if (scope_.FindVar(output) == nullptr) { - return false; - } + const std::vector& output_names = op_.Output(name); + PADDLE_ENFORCE_GT(output_names.size(), 0UL, "Inputs(%s) length is 0", name); + for (auto& output : output_names) { + if (!block_.HasVar(name)) return false; } return true; } DDim GetInputDim(const std::string& name) const { - return GetDim(op_.Input(name)); + std::vector ddims = GetInputsDim(name); + PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Inputs(%s) length is not 1", name); + return ddims[0]; } void SetInputDim(const std::string& name, const DDim& dim) { - SetDim(op_.Input(name), dim); + SetInputsDim(name, {dim}); } DDim GetOutputDim(const std::string& name) const { - return GetDim(op_.Output(name)); + std::vector ddims = GetOutputsDim(name); + PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Outputs(%s) length is not 1", name); + return ddims[0]; } void SetOutputDim(const std::string& name, const DDim& dim) { - SetDim(op_.Output(name), dim); + SetOutputsDim(name, {dim}); } - AttrReader Attrs() const { return AttrReader(op_.Attrs()); } + AttrReader Attrs() const { return AttrReader(op_.GetAttrMap()); } const std::vector& Inputs(const std::string& name) const { - return op_.Inputs(name); + return op_.Input(name); } const std::vector& Outputs(const std::string& name) const { - return op_.Outputs(name); + return op_.Output(name); } private: - template - Tensor* GetTensor(const std::string& name) const { - Tensor* t = nullptr; - auto* var = scope_.FindVar(name); - if (!var->IsType() && !var->IsType()) { - if (Allocate) { - t = var->GetMutable(); - } else { - PADDLE_THROW("Variable(%s) should be tensor", name); - } - } else { - t = GetTensorFromVar(scope_.FindVar(name)); - } - return t; - } - DDim GetDim(const std::string& name) const { - return GetTensor(name)->dims(); + return framework::make_ddim(block_.Var(name)->Shape()); } void SetDim(const std::string& name, const DDim& dim) { - GetTensor(name)->Resize(dim); + block_.Var(name)->SetShape(framework::vectorize(dim)); } - const OperatorBase& op_; - const Scope& scope_; + const OpDescBind& op_; + const BlockDescBind& block_; }; class RuntimeInferShapeContext : public InferShapeContextBase { From ff1bfdedc97eb0834745e812abd619581fde7950 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 17:09:25 -0700 Subject: [PATCH 157/342] Fix CRLF in sum_op.cu --- paddle/operators/sum_op.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/sum_op.cu b/paddle/operators/sum_op.cu index 7129e6bf62..b1896d3cd8 100644 --- a/paddle/operators/sum_op.cu +++ b/paddle/operators/sum_op.cu @@ -13,4 +13,4 @@ limitations under the License. */ #include "paddle/operators/sum_op.h" namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel); \ No newline at end of file +REGISTER_OP_GPU_KERNEL(sum, ops::SumKernel); From 61c03f9d59969d698faca1547115d80e8faa3e9d Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Mon, 2 Oct 2017 17:52:56 -0700 Subject: [PATCH 158/342] Adding the implementation for rmsprop operator --- paddle/operators/rmsprop_op.cc | 87 +++++++++++++++++++ paddle/operators/rmsprop_op.cu | 20 +++++ paddle/operators/rmsprop_op.h | 54 ++++++++++++ .../v2/framework/tests/test_rmsprop_op.py | 37 ++++++++ 4 files changed, 198 insertions(+) create mode 100644 paddle/operators/rmsprop_op.cc create mode 100644 paddle/operators/rmsprop_op.cu create mode 100644 paddle/operators/rmsprop_op.h create mode 100644 python/paddle/v2/framework/tests/test_rmsprop_op.py diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc new file mode 100644 index 0000000000..dcf3599f4d --- /dev/null +++ b/paddle/operators/rmsprop_op.cc @@ -0,0 +1,87 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/rmsprop_op.h" + +namespace paddle { +namespace operators { + +class RmspropOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(param) of RmspropOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(grad) of RmspropOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Moment"), + "Input(moment) of RmspropOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(param_out) of RmspropOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), + "Output(moment_out) of RmspropOp should not be null."); + + auto param_dim = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ( + param_dim, ctx->GetInputDim("Grad"), + "Param and grad input of RmspropOp should have the same dimension."); + PADDLE_ENFORCE_EQ( + param_dim, ctx->GetInputDim("Moment"), + "Param and moment input of RmspropOp should have the same dimension."); + + ctx->SetOutputDim("ParamOut", param_dim); + ctx->SetOutputDim("MomentOut", param_dim); + } +}; + +class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { + public: + RmspropOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Param", "Input parameter"); + AddInput("Grad", "Input gradient"); + AddInput("Moment", "Second moment"); + + AddOutput("ParamOut", "Output parameter"); + AddOutput("MomentOut", "Output second moment"); + + AddAttr("learningRate", "Learning rate"); + AddAttr("epsilon", "Constant for numerical stability"); + AddAttr("decayRate", "Decay rate for moving average of gradients"); + AddComment(R"DOC( + +RMSprop + +MomentOut = decayRate * Moment + (1 - decayRate) * Grad * Grad +ParamOut = Param - learningRate * Grad / (sqrt(MomentOut) + epsilon) + +The original slide(Slide 29 of +http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) +does not have the epsilon attribute. It is added here for numerical stability +to avoid division by zero. + +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(rmsprop, ops::RmspropOp, ops::RmspropOpMaker); +REGISTER_OP_CPU_KERNEL(rmsprop, + ops::RmspropOpKernel); diff --git a/paddle/operators/rmsprop_op.cu b/paddle/operators/rmsprop_op.cu new file mode 100644 index 0000000000..52634a5481 --- /dev/null +++ b/paddle/operators/rmsprop_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/rmsprop_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(rmsprop, + ops::RmspropOpKernel); diff --git a/paddle/operators/rmsprop_op.h b/paddle/operators/rmsprop_op.h new file mode 100644 index 0000000000..c94c24bddd --- /dev/null +++ b/paddle/operators/rmsprop_op.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + +template +class RmspropOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto param_out = ctx.Output("ParamOut"); + auto moment_out = ctx.Output("MomentOut"); + + param_out->mutable_data(ctx.GetPlace()); + moment_out->mutable_data(ctx.GetPlace()); + + float lr = ctx.Attr("learningRate"); + float epsilon = ctx.Attr("epsilon"); + float decay = ctx.Attr("decayRate"); + + auto p = EigenVector::Flatten(*ctx.Input("Param")); + auto g = EigenVector::Flatten(*ctx.Input("Grad")); + auto m = EigenVector::Flatten(*ctx.Input("Moment")); + auto p_out = EigenVector::Flatten(*param_out); + auto m_out = EigenVector::Flatten(*moment_out); + auto place = ctx.GetEigenDevice(); + + m_out.device(place) = decay * m + (1 - decay) * g * g; + p_out.device(place) = p - lr * g / (m_out.sqrt() + epsilon); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_rmsprop_op.py b/python/paddle/v2/framework/tests/test_rmsprop_op.py new file mode 100644 index 0000000000..1fc59a0f11 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_rmsprop_op.py @@ -0,0 +1,37 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestRmspropOp(OpTest): + def setUp(self): + self.op_type = "rmsprop" + + param = np.random.random((123, 321)).astype("float32") + grad = np.random.random((123, 321)).astype("float32") + moment = np.zeros((123, 321)).astype("float32") + + learning_rate = 0.01 + epsilon = 1e-6 + decay_rate = 0.9 + + self.inputs = {'Param': param, 'Grad': grad, 'Moment': moment} + + self.attrs = { + 'learningRate': learning_rate, + 'epsilon': epsilon, + 'decayRate': decay_rate + } + + moment_out = decay_rate * moment + (1 - decay_rate) * grad * grad + param_out = param - learning_rate * grad / (np.sqrt(moment_out) + + epsilon) + + self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() From 455436e5148ad0a84cae89e46931e4785c57870d Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 18:04:00 -0700 Subject: [PATCH 159/342] fix compile problem --- paddle/framework/operator.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 2874237b9c..5bb5c8e2f9 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "op_info.h" #include "paddle/framework/attribute.h" +#include "paddle/framework/block_desc.h" #include "paddle/framework/data_type.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" @@ -349,7 +350,7 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { const std::vector& output_names = op_.Output(name); PADDLE_ENFORCE_GT(output_names.size(), 0UL, "Inputs(%s) length is 0", name); for (auto& output : output_names) { - if (!block_.HasVar(name)) return false; + if (!block_.HasVar(output)) return false; } return true; } From 46c551b2997537a70ea82fd55067fd57cc4c59d5 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 18:27:21 -0700 Subject: [PATCH 160/342] Complete Register Gradient in compile time --- paddle/framework/backward_test.cc | 32 ++++++++----- paddle/framework/details/op_registry.h | 1 - paddle/framework/framework.proto | 1 - paddle/framework/op_info.h | 3 ++ paddle/framework/op_registry.h | 1 - paddle/operators/mean_op.cc | 21 ++++++++- paddle/operators/minus_op.cc | 46 +++++++++---------- paddle/operators/pad_op.cc | 22 +++++++-- paddle/operators/scale_op.cc | 33 ++++++------- .../softmax_with_cross_entropy_op.cc | 45 ++++++++++++------ paddle/operators/sum_op.cc | 41 +++++++++-------- 11 files changed, 152 insertions(+), 94 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 85f1dd91ed..93688c383b 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -21,24 +21,34 @@ namespace paddle { namespace framework { -using OperatorBase = framework::OperatorBase; -using OpProtoAndCheckerMaker = framework::OpProtoAndCheckerMaker; -using OpProto = framework::OpProto; -using OpAttrChecker = framework::OpAttrChecker; -using Scope = framework::Scope; using DeviceContext = platform::DeviceContext; class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input X of Add").NotInGradient(); - AddInput("b", "Bias of Add").NotInGradient(); - AddOutput("Out", "Out of Add").NotInGradient(); + AddInput("X", "Input X of Add"); + AddInput("b", "Bias of Add"); + AddOutput("Out", "Out of Add"); AddComment("Add Op"); } }; +class RowWiseAddGradMaker : public SingleGradOpDescMaker { + public: + using SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + OpDescBind Apply() const override { + OpDescBind grad_op; + grad_op.SetInput(GradVarName("Out"), OutputGrad("Out")); + grad_op.SetOutput(GradVarName("X"), InputGrad("X")); + grad_op.SetOutput(GradVarName("b"), InputGrad("b")); + grad_op.SetType("rowwise_add_grad"); + return grad_op; + } +}; + class MulOpMaker : public OpProtoAndCheckerMaker { public: MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -148,8 +158,9 @@ class AddOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OP(rowwise_add, f::NOP, f::RowWiseAddOpMaker, rowwise_add_grad, - f::NOP); +REGISTER_OPERATOR(rowwise_add, f::NOP, f::RowWiseAddOpMaker, + f::RowWiseAddGradMaker); +REGISTER_OPERATOR(rowwise_add_grad, f::NOP); REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP); REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker); @@ -378,7 +389,6 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { + 1UL /* external output number*/ + 1UL /* number of gradient of external output*/ + 2U /* internal variable number*/); - std::cerr << grad_fc.DebugString() << std::endl; EXPECT_EQ(grad_fc.Outputs(all).size(), 2UL /* input number of mul*/ diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h index c805dae7d7..daa474e8c5 100644 --- a/paddle/framework/details/op_registry.h +++ b/paddle/framework/details/op_registry.h @@ -85,7 +85,6 @@ struct OpInfoFiller { info->proto_ = new OpProto; info->checker_ = new OpAttrChecker(); auto maker = T(info->proto_, info->checker_); - std::cerr << "Assign Maker " << op_type << std::endl; maker.Validate(); info->proto_->set_type(op_type); PADDLE_ENFORCE( diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 951c7afbc1..e90a816afa 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -66,7 +66,6 @@ message OpProto { optional bool duplicable = 3 [ default = false ]; optional bool intermediate = 4 [ default = false ]; - optional bool not_in_gradient = 5 [ default = false ]; } // AttrProto describes the C++ type Attribute. diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 2d9568c320..8c2a9178a7 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -17,11 +17,14 @@ #include #include #include + #include "paddle/framework/attribute.h" #include "paddle/framework/op_desc.h" #include "paddle/framework/type_defs.h" #include "paddle/platform/macros.h" +#include "glog/logging.h" + namespace paddle { namespace framework { diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index d14f70008b..da112fa488 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -46,7 +46,6 @@ class Registrar { template struct OperatorRegistrar : public Registrar { explicit OperatorRegistrar(const char* op_type) : op_type(op_type) { - std::cerr << "Reg operator " << op_type << std::endl; PADDLE_ENFORCE(!OpInfoMap::Instance().Has(op_type), "'%s' is registered more than once.", op_type); static_assert(sizeof...(ARGS) != 0, diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index d799239d4e..0c84cbb5a7 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -36,7 +36,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { MeanOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); - AddOutput("Out", "The output of mean op").NotInGradient(); + AddOutput("Out", "The output of mean op"); AddComment(R"DOC( Mean Operator )DOC"); } @@ -52,11 +52,28 @@ class MeanGradOp : public framework::OperatorWithKernel { } }; +class MeanGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + framework::OpDescBind Apply() const override { + framework::OpDescBind grad_op; + grad_op.SetType("mean_grad"); + grad_op.SetInput("X", Input("X")); + grad_op.SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op.SetOutput(framework::GradVarName("X"), InputGrad("X")); + return grad_op; + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad, ops::MeanGradOp); + +REGISTER_OPERATOR(mean, ops::MeanOp, ops::MeanOpMaker, ops::MeanGradMaker); +REGISTER_OPERATOR(mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel); REGISTER_OP_CPU_KERNEL(mean_grad, diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index ce049d4d7b..1b3ae9a9a6 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -49,9 +49,9 @@ class MinusOpMaker : public framework::OpProtoAndCheckerMaker { public: MinusOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The left tensor of minus operator.").NotInGradient(); - AddInput("Y", "The right tensor of minus operator.").NotInGradient(); - AddOutput("Out", "The output tensor of minus operator.").NotInGradient(); + AddInput("X", "The left tensor of minus operator."); + AddInput("Y", "The right tensor of minus operator."); + AddOutput("Out", "The output tensor of minus operator."); AddComment(R"DOC(Minus Operator @@ -64,26 +64,25 @@ or not. But the output only shares the LoD with input `X`. )DOC"); } }; -template -class MinusGradOp : public NetOp { + +class MinusGradMaker : public framework::GradOpDescMakerBase { public: - MinusGradOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : NetOp(type, inputs, outputs, attrs) { - auto out_grad = Input(framework::GradVarName("Out")); - auto x_grad = Output(framework::GradVarName("X")); - auto y_grad = Output(framework::GradVarName("Y")); - - // x_grad = out_grad - AppendOp(framework::OpRegistry::CreateOp("identity", {{"X", {out_grad}}}, - {{"Y", {x_grad}}}, {})); - - framework::AttributeMap scale_attr; - scale_attr["scale"] = static_cast(-1); - AppendOp(framework::OpRegistry::CreateOp("scale", {{"X", {out_grad}}}, - {{"Out", {y_grad}}}, scale_attr)); - CompleteAddOp(false); + using framework::GradOpDescMakerBase::GradOpDescMakerBase; + + std::vector operator()() const override { + std::vector ops; + ops.resize(2); + + ops[0].SetType("scale"); + ops[0].SetInput("X", OutputGrad("Out")); + ops[0].SetOutput("Out", InputGrad("X")); + ops[0].SetAttr("scale", 1.0f); + + ops[1].SetType("scale"); + ops[1].SetInput("X", OutputGrad("Out")); + ops[1].SetOutput("Out", InputGrad("Y")); + ops[1].SetAttr("scale", -1.0f); + return ops; } }; @@ -91,7 +90,6 @@ class MinusGradOp : public NetOp { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad, - ops::MinusGradOp); +REGISTER_OPERATOR(minus, ops::MinusOp, ops::MinusOpMaker, ops::MinusGradMaker); REGISTER_OP_CPU_KERNEL(minus, ops::MinusKernel); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 04ebb14f6e..4bd25fa46a 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -56,8 +56,7 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { "The input should be a k-D tensor(k > 0 and k < 7)"); AddOutput("Out", "The output of pad op." - "A tensor with the same shape as X.") - .NotInGradient(); + "A tensor with the same shape as X."); AddComment(R"DOC( Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: @@ -111,11 +110,28 @@ class PadOpGrad : public framework::OperatorWithKernel { } }; +class PadOpGradMaker : public framework::SingleGradOpDescMaker { + protected: + framework::OpDescBind Apply() const override { + framework::OpDescBind bind; + bind.SetInput("X", Input("X")); + bind.SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + bind.SetOutput(framework::GradVarName("X"), InputGrad("X")); + bind.SetAttrMap(Attrs()); + return bind; + } + + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(pad, ops::PadOp, ops::PadOpMaker, pad_grad, ops::PadOpGrad); + +REGISTER_OPERATOR(pad, ops::PadOp, ops::PadOpMaker, ops::PadOpGradMaker); +REGISTER_OPERATOR(pad_grad, ops::PadOpGrad); REGISTER_OP_CPU_KERNEL(pad, ops::PadKernel); REGISTER_OP_CPU_KERNEL(pad_grad, ops::PadGradKernel); diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index e92501e128..40f0960923 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -41,8 +41,8 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: ScaleOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input tensor of scale operator.").NotInGradient(); - AddOutput("Out", "The output tensor of scale operator.").NotInGradient(); + AddInput("X", "The input tensor of scale operator."); + AddOutput("Out", "The output tensor of scale operator."); AddComment(R"DOC(Scale operator The equation is: Out = scale*X @@ -52,21 +52,18 @@ The equation is: Out = scale*X } }; -// The operator to calculate gradients of a scale operator is just the scale -// operator itself. -// Grad(Out=scale(X)) => Grad(X) = scale(Grad(Out)) -template -class ScaleGradOp : public NetOp { +class ScaleGradMaker : public framework::SingleGradOpDescMaker { public: - ScaleGradOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : NetOp(type, inputs, outputs, attrs) { - AppendOp(framework::OpRegistry::CreateOp( - "scale", {{"X", {Input(framework::GradVarName("Out"))}}}, - {{"Out", {Output(framework::GradVarName("X"))}}}, - {{"scale", Attr("scale")}})); - CompleteAddOp(false); + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + framework::OpDescBind Apply() const override { + framework::OpDescBind grad_op; + grad_op.SetType("scale"); + grad_op.SetInput("X", OutputGrad("Out")); + grad_op.SetOutput("Out", InputGrad("X")); + grad_op.SetAttr("scale", GetAttr("scale")); + return grad_op; } }; @@ -75,7 +72,7 @@ class ScaleGradOp : public NetOp { namespace ops = paddle::operators; -REGISTER_OP(scale, ops::ScaleOp, ops::ScaleOpMaker, scale_grad, - ops::ScaleGradOp); +REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker, + ops::ScaleGradMaker); REGISTER_OP_CPU_KERNEL(scale, ops::ScaleKernel); diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index a76489871f..87dcc3f240 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -27,15 +27,14 @@ class SoftmaxWithCrossEntropyOpMaker AddInput("Logits", "(Tensor, default: Tensor), The unscaled log probabilities " "which is a 2-D tensor with shape [N x K]. N is the batch_size, " - "and K is the class number.") - .NotInGradient(); - AddInput( - "Label", - "(Tensor, default: Tensor), The ground truth which is a 2-D " - "tensor. " - "If softLable is set to 0, Label is a Tensor with shape [N x 1]. " - "If softLable is set to 1, Label is a Tensor " - "with shape [N x K]."); + "and K is the class number."); + AddInput("Label", + "(Tensor, default: Tensor), The ground truth which is a 2-D " + "tensor. " + "If softLable is set to 0, Label is a Tensor with shape [N x " + "1]. " + "If softLable is set to 1, Label is a Tensor " + "with shape [N x K]."); AddOutput( "Softmax", "(Tensor, default: Tensor), A 2-D tensor with shape [N x K]. " @@ -163,15 +162,35 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { } }; +class SoftmaxGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + framework::OpDescBind Apply() const override { + framework::OpDescBind grad_op; + grad_op.SetType("softmax_with_cross_entropy_grad"); + grad_op.SetInput("Label", Input("Label")); + grad_op.SetInput("Softmax", Output("Softmax")); + grad_op.SetInput("Loss", Output("Loss")); + grad_op.SetInput(framework::GradVarName("Softmax"), OutputGrad("Softmax")); + grad_op.SetInput(framework::GradVarName("Loss"), OutputGrad("Loss")); + grad_op.SetOutput(framework::GradVarName("Logits"), InputGrad("Logits")); + grad_op.SetAttrMap(Attrs()); + return grad_op; + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyOp, - ops::SoftmaxWithCrossEntropyOpMaker, - softmax_with_cross_entropy_grad, - ops::SoftmaxWithCrossEntropyOpGrad); +REGISTER_OPERATOR(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyOp, + ops::SoftmaxWithCrossEntropyOpMaker, + ops::SoftmaxWithCrossEntropyOpMaker); +REGISTER_OPERATOR(softmax_with_cross_entropy_grad, + ops::SoftmaxWithCrossEntropyOpGrad); REGISTER_OP_CPU_KERNEL(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyKernel); REGISTER_OP_CPU_KERNEL(softmax_with_cross_entropy_grad, diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 7c422b4770..5ae13492b3 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -45,10 +45,8 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.") - .AsDuplicable() - .NotInGradient(); - AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); + AddInput("X", "the input tensors of sum operator.").AsDuplicable(); + AddOutput("Out", "the output tensor of sum operator."); AddComment(R"DOC( Sum the input tensors. @@ -58,23 +56,25 @@ or not. But the output only shares the LoD with the first input. } }; -class SumGradOp : public NetOp { +class SumGradMaker : public framework::GradOpDescMakerBase { public: - SumGradOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : NetOp(type, inputs, outputs, attrs) { - auto& x_grad_names = Outputs(framework::GradVarName("X")); - auto out_grad_name = this->Input(framework::GradVarName("Out")); + using framework::GradOpDescMakerBase::GradOpDescMakerBase; - framework::AttributeMap grad_attrs; - grad_attrs["scale"] = 1.0f; - for (auto& x_grad_name : x_grad_names) { - AppendOp(framework::OpRegistry::CreateOp( - "scale", {{"X", {out_grad_name}}}, {{"Out", {x_grad_name}}}, - grad_attrs)); - } - CompleteAddOp(false); + std::vector operator()() const override { + auto x_grads = InputGrad("X"); + std::vector grad_ops; + grad_ops.reserve(x_grads.size()); + auto og = OutputGrad("Out"); + std::transform(x_grads.begin(), x_grads.end(), std::back_inserter(grad_ops), + [&og](const std::string& x_grad) { + framework::OpDescBind grad_op; + grad_op.SetType("scale"); + grad_op.SetInput("X", og); + grad_op.SetOutput("Out", {x_grad}); + grad_op.SetAttr("scale", 1.0f); + return grad_op; + }); + return grad_ops; } }; @@ -82,5 +82,6 @@ class SumGradOp : public NetOp { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sum, ops::SumOp, ops::SumOpMaker, sum_grad, ops::SumGradOp); + +REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker); REGISTER_OP_CPU_KERNEL(sum, ops::SumKernel); From 2ccaec4f57afe94f36ee4781bae6e0eec78b29a8 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 2 Oct 2017 18:31:55 -0700 Subject: [PATCH 161/342] gather scatter cond --- paddle/operators/cond_op.cc | 5 ++--- paddle/operators/gather.h | 4 ++-- paddle/operators/gather_op.h | 4 ++-- paddle/operators/gather_test.cc | 4 +++- paddle/operators/scatter.h | 4 ++-- paddle/operators/scatter_op.h | 4 ++-- paddle/operators/scatter_test.cc | 4 +++- 7 files changed, 16 insertions(+), 13 deletions(-) diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index 55822827d9..7d7f1ba3b1 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -126,8 +126,7 @@ void CondOp::PrepareDataForSubnet( dim[0] = index_tensors[i].dims()[0]; tensor_child->mutable_data(dim, platform::CPUPlace()); - CPUGather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], - tensor_child); + CPUGather(dev_ctx, tensor_parent, &index_tensors[i], tensor_child); } } @@ -188,7 +187,7 @@ void CondOp::MergeDataFromSubnet(const framework::Scope& scope, Variable* var_child = sub_scopes[i]->FindVar(output); PADDLE_ENFORCE_NOT_NULL(var_child); auto* tensor_child = &var_child->Get(); - ScatterAssign(dev_ctx.GetPlace(), tensor_child, &index_tensors[i], + ScatterAssign(dev_ctx, tensor_child, &index_tensors[i], tensor_parent); } } diff --git a/paddle/operators/gather.h b/paddle/operators/gather.h index cb635f6825..1e39a6da27 100644 --- a/paddle/operators/gather.h +++ b/paddle/operators/gather.h @@ -32,11 +32,11 @@ namespace operators { * return: output tensor */ template -void CPUGather(const platform::Place& place, +void CPUGather(const platform::DeviceContext& ctx, const paddle::framework::Tensor* src, const paddle::framework::Tensor* index, paddle::framework::Tensor* output) { - PADDLE_ENFORCE(platform::is_cpu_place(place)); + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); int index_size = index->dims()[0]; diff --git a/paddle/operators/gather_op.h b/paddle/operators/gather_op.h index fb065b8da7..5bd2c36f7b 100644 --- a/paddle/operators/gather_op.h +++ b/paddle/operators/gather_op.h @@ -36,7 +36,7 @@ class GatherOpKernel : public framework::OpKernel { output->mutable_data(ctx.GetPlace()); - CPUGather(ctx.GetPlace(), x, index, output); + CPUGather(ctx.device_context(), x, index, output); } }; @@ -56,7 +56,7 @@ class GatherGradientOpKernel : public framework::OpKernel { auto place = ctx.GetEigenDevice(); dxt.device(place) = dxt.constant(static_cast(0)); - ScatterAssign(ctx.GetPlace(), dO, Index, dX); + ScatterAssign(ctx.device_context(), dO, Index, dX); } }; diff --git a/paddle/operators/gather_test.cc b/paddle/operators/gather_test.cc index 3c1d06ccd1..d8bf8dd9a4 100644 --- a/paddle/operators/gather_test.cc +++ b/paddle/operators/gather_test.cc @@ -41,7 +41,9 @@ TEST(Gather, GatherData) { int* p_output = output->mutable_data(make_ddim({2, 4}), CPUPlace()); - CPUGather(CPUPlace(), src, index, output); + auto* cpu_place = new paddle::platform::CPUPlace(); + paddle::platform::CPUDeviceContext ctx(*cpu_place); + CPUGather(ctx, src, index, output); for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h index f895f22e28..0d174d3b5b 100644 --- a/paddle/operators/scatter.h +++ b/paddle/operators/scatter.h @@ -33,11 +33,11 @@ using Tensor = framework::Tensor; * return: output tensor */ template -void ScatterAssign(const platform::Place& place, +void ScatterAssign(const platform::DeviceContext& ctx, const paddle::framework::Tensor* src, const paddle::framework::Tensor* index, paddle::framework::Tensor* output) { - PADDLE_ENFORCE(platform::is_cpu_place(place)); + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); // check index of shape 1-D PADDLE_ENFORCE(index->dims().size() == 1); int index_size = index->dims()[0]; diff --git a/paddle/operators/scatter_op.h b/paddle/operators/scatter_op.h index 771a1f2ddb..ac04968549 100644 --- a/paddle/operators/scatter_op.h +++ b/paddle/operators/scatter_op.h @@ -37,7 +37,7 @@ class ScatterOpKernel : public framework::OpKernel { // In place output: Out = Ref, Out[Index] += Updates Out->ShareDataWith(*Ref); // Apply ScatterUpdate: Out[index] += Updates[:] - ScatterAssign(ctx.GetPlace(), Updates, Index, Out); + ScatterAssign(ctx.device_context(), Updates, Index, Out); } }; @@ -56,7 +56,7 @@ class ScatterGradientOpKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates += dO[Index] - CPUGather(ctx.GetPlace(), dOut, Index, dUpdates); + CPUGather(ctx.device_context(), dOut, Index, dUpdates); } }; diff --git a/paddle/operators/scatter_test.cc b/paddle/operators/scatter_test.cc index bace6419d0..321bba3dad 100644 --- a/paddle/operators/scatter_test.cc +++ b/paddle/operators/scatter_test.cc @@ -40,7 +40,9 @@ TEST(scatter, ScatterUpdate) { float* p_output = output->mutable_data(make_ddim({4, 4}), CPUPlace()); - ScatterAssign(CPUPlace(), src, index, output); + auto* cpu_place = new paddle::platform::CPUPlace(); + paddle::platform::CPUDeviceContext ctx(*cpu_place); + ScatterAssign(ctx, src, index, output); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0)); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], float(0)); From b3e479da1c9cdb580e4577ebdafc5ec451ca4ed2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 2 Oct 2017 18:38:49 -0700 Subject: [PATCH 162/342] Fix CI --- paddle/framework/grad_op_builder_test.cc | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 55c5fa420e..2dbc2e6620 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -39,28 +39,6 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; -TEST(GradOpBuilder, AddTwo) { - std::shared_ptr add_op(f::OpRegistry::CreateOp( - "sum", {{"X", {"x", "y"}}}, {{"Out", {"out"}}}, {})); - std::shared_ptr grad_add_op = - f::OpRegistry::CreateGradOp(*add_op); - - EXPECT_EQ(grad_add_op->Inputs().size(), 1UL); - EXPECT_EQ(grad_add_op->Outputs().size(), 1UL); - EXPECT_EQ(grad_add_op->Input(f::GradVarName("Out")), f::GradVarName("out")); - auto &outputs = grad_add_op->Outputs(f::GradVarName("X")); - EXPECT_EQ(2UL, outputs.size()); - auto in_output = [&outputs](const std::string &name) { - for (auto &output_name : outputs) { - if (output_name == name) return true; - } - return false; - }; - - EXPECT_TRUE(in_output(f::GradVarName("x"))); - EXPECT_TRUE(in_output(f::GradVarName("y"))); -} - REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP); From d1de7ec6304af675a666539c7991bf4bf242f738 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 2 Oct 2017 18:55:46 -0700 Subject: [PATCH 163/342] Change learning rate from attribute to input tensor --- paddle/operators/adagrad_op.cc | 7 ++++++- paddle/operators/adagrad_op.h | 9 ++++++++- python/paddle/v2/framework/tests/test_adagrad_op.py | 5 ++--- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index 03e22cc600..56a5fbcb86 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -29,12 +29,17 @@ class AdagradOp : public framework::OperatorWithKernel { "Input(grad) of AdagradOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("moment"), "Input(moment) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("learning_rate"), + "Input(learning_rate) of AdagradOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("param_out"), "Output(param_out) of AdagradOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("moment_out"), "Output(moment_out) of AdagradOp should not be null."); + auto lr_dims = ctx->GetInputDim("learning_rate"); + PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, + "learning_rate should have one element"); auto param_dim = ctx->GetInputDim("param"); PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("grad"), @@ -56,11 +61,11 @@ class AdagradOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("param", "Input parameter"); AddInput("grad", "Input gradient"); AddInput("moment", "Second moment"); + AddInput("learning_rate", "learning rate of adagrad"); AddOutput("param_out", "Output parameter"); AddOutput("moment_out", "Output second moment"); - AddAttr("learning_rate", "Learning rate"); AddAttr("epsilon", "Constant for numerical stability"); AddComment(R"DOC( diff --git a/paddle/operators/adagrad_op.h b/paddle/operators/adagrad_op.h index ca1836c3fa..73833d4a3f 100644 --- a/paddle/operators/adagrad_op.h +++ b/paddle/operators/adagrad_op.h @@ -20,6 +20,11 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; + +template +using EigenScalar = framework::EigenScalar; + template using EigenVector = framework::EigenVector; @@ -34,12 +39,14 @@ class AdagradOpKernel : public framework::OpKernel { param_out->mutable_data(ctx.GetPlace()); moment_out->mutable_data(ctx.GetPlace()); - float lr = ctx.Attr("learning_rate"); + float lr = ctx.Input("learning_rate")->data()[0]; float epsilon = ctx.Attr("epsilon"); auto p = EigenVector::Flatten(*ctx.Input("param")); auto g = EigenVector::Flatten(*ctx.Input("grad")); auto m = EigenVector::Flatten(*ctx.Input("moment")); + auto lr = EigenScalar::From(*ctx.Input("learning_rate")); + auto p_out = EigenVector::Flatten(*param_out); auto m_out = EigenVector::Flatten(*moment_out); auto place = ctx.GetEigenDevice(); diff --git a/python/paddle/v2/framework/tests/test_adagrad_op.py b/python/paddle/v2/framework/tests/test_adagrad_op.py index b3f8b812e1..2ee38ea37c 100644 --- a/python/paddle/v2/framework/tests/test_adagrad_op.py +++ b/python/paddle/v2/framework/tests/test_adagrad_op.py @@ -11,7 +11,7 @@ class TestAdagradOp(OpTest): grad = np.random.random((123, 321)).astype("float32") moment = np.zeros((123, 321)).astype("float32") - learning_rate = 0.01 + lr = np.array([0.01]).astype("float32") epsilon = 1e-6 self.inputs = {'param': param, 'grad': grad, 'moment': moment} @@ -19,8 +19,7 @@ class TestAdagradOp(OpTest): self.attrs = {'learning_rate': learning_rate, 'epsilon': epsilon} moment_out = moment + grad * grad - param_out = param - learning_rate * grad / (np.sqrt(moment_out) + - epsilon) + param_out = param - lr * grad / (np.sqrt(moment_out) + epsilon) self.outputs = {'param_out': param_out, 'moment_out': moment_out} From 163d28714349d51596be1cb165f93be2b8290bda Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Mon, 2 Oct 2017 19:23:05 -0700 Subject: [PATCH 164/342] Made learning rate the input --- paddle/operators/rmsprop_op.cc | 16 +++++++++++----- paddle/operators/rmsprop_op.h | 2 +- .../paddle/v2/framework/tests/test_rmsprop_op.py | 15 ++++++++------- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index dcf3599f4d..602efab3db 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -24,11 +24,13 @@ class RmspropOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(param) of RmspropOp should not be null."); + "Input(Param) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(grad) of RmspropOp should not be null."); + "Input(Grad) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Moment"), - "Input(moment) of RmspropOp should not be null."); + "Input(Moment) of RmspropOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), "Output(param_out) of RmspropOp should not be null."); @@ -43,6 +45,10 @@ class RmspropOp : public framework::OperatorWithKernel { param_dim, ctx->GetInputDim("Moment"), "Param and moment input of RmspropOp should have the same dimension."); + auto lr_dim = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, + "Learning Rate should be a scalar."); + ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("MomentOut", param_dim); } @@ -56,11 +62,11 @@ class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Param", "Input parameter"); AddInput("Grad", "Input gradient"); AddInput("Moment", "Second moment"); + AddInput("LearningRate", "Learning Rate"); AddOutput("ParamOut", "Output parameter"); AddOutput("MomentOut", "Output second moment"); - AddAttr("learningRate", "Learning rate"); AddAttr("epsilon", "Constant for numerical stability"); AddAttr("decayRate", "Decay rate for moving average of gradients"); AddComment(R"DOC( @@ -68,7 +74,7 @@ class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { RMSprop MomentOut = decayRate * Moment + (1 - decayRate) * Grad * Grad -ParamOut = Param - learningRate * Grad / (sqrt(MomentOut) + epsilon) +ParamOut = Param - LearningRate * Grad / (sqrt(MomentOut) + epsilon) The original slide(Slide 29 of http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) diff --git a/paddle/operators/rmsprop_op.h b/paddle/operators/rmsprop_op.h index c94c24bddd..65b9edd35b 100644 --- a/paddle/operators/rmsprop_op.h +++ b/paddle/operators/rmsprop_op.h @@ -34,13 +34,13 @@ class RmspropOpKernel : public framework::OpKernel { param_out->mutable_data(ctx.GetPlace()); moment_out->mutable_data(ctx.GetPlace()); - float lr = ctx.Attr("learningRate"); float epsilon = ctx.Attr("epsilon"); float decay = ctx.Attr("decayRate"); auto p = EigenVector::Flatten(*ctx.Input("Param")); auto g = EigenVector::Flatten(*ctx.Input("Grad")); auto m = EigenVector::Flatten(*ctx.Input("Moment")); + float lr = ctx.Input("LearningRate")->data()[0]; auto p_out = EigenVector::Flatten(*param_out); auto m_out = EigenVector::Flatten(*moment_out); auto place = ctx.GetEigenDevice(); diff --git a/python/paddle/v2/framework/tests/test_rmsprop_op.py b/python/paddle/v2/framework/tests/test_rmsprop_op.py index 1fc59a0f11..64ca5da48e 100644 --- a/python/paddle/v2/framework/tests/test_rmsprop_op.py +++ b/python/paddle/v2/framework/tests/test_rmsprop_op.py @@ -10,19 +10,20 @@ class TestRmspropOp(OpTest): param = np.random.random((123, 321)).astype("float32") grad = np.random.random((123, 321)).astype("float32") moment = np.zeros((123, 321)).astype("float32") + learning_rate = np.array([0.01]).astype("float32") - learning_rate = 0.01 epsilon = 1e-6 decay_rate = 0.9 - self.inputs = {'Param': param, 'Grad': grad, 'Moment': moment} - - self.attrs = { - 'learningRate': learning_rate, - 'epsilon': epsilon, - 'decayRate': decay_rate + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'LearningRate': learning_rate } + self.attrs = {'epsilon': epsilon, 'decayRate': decay_rate} + moment_out = decay_rate * moment + (1 - decay_rate) * grad * grad param_out = param - learning_rate * grad / (np.sqrt(moment_out) + epsilon) From 42e7fe05a23067677fe7cf552e9534e329886fbb Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Mon, 2 Oct 2017 20:08:06 -0700 Subject: [PATCH 165/342] Changing learning rate from attribute to input(float) (#4568) * Changing learning rate from attribute to input(float) * Removing obsolete code --- paddle/operators/sgd_op.cc | 4 +++- paddle/operators/sgd_op.h | 2 +- paddle/pybind/pybind.cc | 7 +++++++ python/paddle/v2/framework/tests/op_test.py | 17 +++++++++++------ python/paddle/v2/framework/tests/test_sgd_op.py | 3 +-- 5 files changed, 23 insertions(+), 10 deletions(-) diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 3bce95535c..8f9eae4186 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -27,6 +27,8 @@ class SGDOp : public framework::OperatorWithKernel { "Input(param) of SGDOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("grad"), "Input(grad) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("learning_rate"), + "Input(learning_rate) of SGDOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("param_out"), "Output(param_out) of SGDOp should not be null."); @@ -42,9 +44,9 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("param", "input parameter"); + AddInput("learning_rate", "learning rate of sgd"); AddInput("grad", "input gradient"); AddOutput("param_out", "output parameter"); - AddAttr("learning_rate", "learning rate of sgd"); AddComment(R"DOC( Simplest sgd algorithm. diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index a3fe330894..977d201ced 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output("param_out"); - float lr = ctx.Attr("learning_rate"); + float lr = *ctx.Input("learning_rate"); param_out->mutable_data(ctx.GetPlace()); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index f4121e9d71..d480427f59 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -143,6 +143,13 @@ All parameter, weight, gradient are variables in Paddle. .def("set_int", [](Variable &var, int val) -> void { *var.GetMutable() = val; }) .def("get_int", [](const Variable &var) -> int { return var.Get(); }) + .def("is_float", [](const Variable &var) { return var.IsType(); }) + .def("set_float", + [](Variable &var, float val) -> void { + *var.GetMutable() = val; + }) + .def("get_float", + [](const Variable &var) -> float { return var.Get(); }) .def("get_tensor", [](Variable &self) -> LoDTensor * { return self.GetMutable(); diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 75df2eeddf..81067f38bb 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -46,12 +46,17 @@ def create_op(scope, op_type, inputs, outputs, attrs): def set_input(scope, op, inputs, place): def __set_input__(var_name, var): - tensor = scope.find_var(var_name).get_tensor() - if isinstance(var, tuple): - tensor.set_lod(var[1]) - var = var[0] - tensor.set_dims(var.shape) - tensor.set(var, place) + if isinstance(var, tuple) or isinstance(var, np.ndarray): + tensor = scope.find_var(var_name).get_tensor() + if isinstance(var, tuple): + tensor.set_lod(var[1]) + var = var[0] + tensor.set_dims(var.shape) + tensor.set(var, place) + elif isinstance(var, float): + scope.find_var(var_name).set_float(var) + elif isinstance(var, int): + scope.find_var(var_name).set_int(var) for in_name, in_dup in Operator.get_op_inputs(op.type()): if in_name in inputs: diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index 64e54d1500..f1125f4edb 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -10,8 +10,7 @@ class TestSGDOp(OpTest): g = np.random.random((102, 105)).astype("float32") lr = 0.1 - self.inputs = {'param': w, 'grad': g} - self.attrs = {'learning_rate': lr} + self.inputs = {'param': w, 'grad': g, 'learning_rate': lr} self.outputs = {'param_out': w - lr * g} def test_check_output(self): From b5dbe88b5ab504f88c6e7eaaa8b27d3965701478 Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 2 Oct 2017 20:26:17 -0700 Subject: [PATCH 166/342] follow comments --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/executor.cc | 159 +++--------------------------- paddle/framework/executor.h | 14 ++- paddle/framework/executor_test.cc | 12 ++- paddle/platform/CMakeLists.txt | 2 + paddle/platform/device.cc | 59 +++++++++++ paddle/platform/device.h | 45 +++++++++ 7 files changed, 139 insertions(+), 154 deletions(-) create mode 100644 paddle/platform/device.cc create mode 100644 paddle/platform/device.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 984fc62aa3..506d0f9833 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -44,5 +44,5 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto) +cc_library(executor SRCS executor.cc DEPS op_registry device scope framework_proto) cc_test(executor_test SRCS executor_test.cc DEPS executor) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index ebe3259bc0..57e177bb0a 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -15,162 +15,31 @@ limitations under the License. */ #include "paddle/framework/executor.h" #include #include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" #include "paddle/framework/scope.h" -#include "paddle/platform/device_context.h" namespace paddle { namespace framework { -class LinearListView; -class GraphView; - -// Immutable view of a ProgramDesc organized for efficient execution. -class ProgramDescView { - public: - virtual ~ProgramDescView() {} - virtual void Initialize(const ProgramDesc*) = 0; - static ProgramDescView* Create(bool is_linear); -}; - -class LinearListView : public ProgramDescView { - public: - void Initialize(const ProgramDesc*) override; - - private: - std::vector> ops_; -}; - -class GraphView : public ProgramDescView { - public: - void Initialize(const ProgramDesc*) override; -}; - -ProgramDescView* ProgramDescView::Create(bool is_linear) { - if (is_linear) { - return new LinearListView(); - } else { - return new GraphView(); - } -} - -void LinearListView::Initialize(const ProgramDesc* pdesc) { - // get a LinearView of ProgramDesc - for (auto& block_desc : pdesc->blocks()) { - for (auto& op_desc : block_desc.ops()) { - ops_.emplace_back(OpRegistry::CreateOp(op_desc)); - } +Executor::Executor(const std::vector& places) { + devices_.resize(places.size()); + for (size_t i = 0; i < places.size(); i++) { + devices_[i] = platform::GetDevice(places[i]); } } -void GraphView::Initialize(const ProgramDesc* pdesc) { - // get a GraphView of ProgramDesc -} - -struct Device { - platform::CPUDeviceContext* cpu_device_context; -#ifndef PADDLE_ONLY_CPU - platform::CUDADeviceContext* cuda_device_context; -#endif - -#ifndef PADDLE_ONLY_CPU - Device(platform::CPUDeviceContext* cpu, platform::CUDADeviceContext* gpu) - : cpu_device_context(cpu), cuda_device_context(gpu) {} -#else - explicit Device(platform::CPUDeviceContext* cpu) : cpu_device_context(cpu) {} -#endif -}; - -class ExecutorImpl : public Executor { - public: - ExecutorImpl(Scope* scope, const Device* device, const ProgramDesc* pdesc, - bool is_linear) - : scope_(scope), - device_(device), - program_desc_(pdesc), - view_(ProgramDescView::Create(is_linear)) {} - - virtual ~ExecutorImpl() { - if (view_) delete view_; - } - - void Run() override; - - void Initialize(); - - private: - Scope* scope_; - const Device* device_; - const ProgramDesc* program_desc_; - ProgramDescView* view_; -}; - -template -std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - -platform::CPUDeviceContext* GetCPUDeviceContext( - const platform::CPUPlace& place) { - static std::unique_ptr g_cpu_device_context = - make_unique(place); - return g_cpu_device_context.get(); -} - -#ifndef PADDLE_ONLY_CPU -platform::CUDADeviceContext* GetCUDADeviceContext( - const platform::GPUPlace& place) { - static std::unique_ptr g_cuda_device_context = - make_unique(place); - return g_cuda_device_context.get(); -} -#endif - -Device* GetDevice(const platform::Place& place) { - platform::CPUPlace cpu_place; -#ifndef PADDLE_ONLY_CPU - if (platform::is_gpu_place(place)) { - platform::GPUPlace gpu_place = boost::get(place); - static std::unique_ptr g_device = make_unique( - GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place)); - return g_device.get(); - } else { - static std::unique_ptr g_device = - make_unique(GetCPUDeviceContext(cpu_place), nullptr); - return g_device.get(); - } -#else - static std::unique_ptr g_device = - make_unique(GetCPUDeviceContext(cpu_place)); - return g_device.get(); -#endif -} - -framework::Scope* GetScope() { - static std::unique_ptr g_scope = - make_unique(); - return g_scope.get(); -} - -Executor* NewLocalExecutor(const platform::Place& place, - const ProgramDesc& pdesc, bool is_linear) { - return new ExecutorImpl(GetScope(), GetDevice(place), &pdesc, is_linear); -} - -void ExecutorImpl::Run() { +void Executor::Run(const ProgramDesc& pdesc, Scope* scope, + std::vector* outputs) { // operators running - scope_->NewVar(); - device_->cpu_device_context->Wait(); + Scope& local_scope = scope->NewScope(); + local_scope.NewVar(); + for (auto device : devices_) { + device->cpu_device_context->Wait(); #ifndef PADDLE_ONLY_CPU - if (device_->cuda_device_context) { - device_->cuda_device_context->Wait(); - } + if (device->cuda_device_context) { + device->cuda_device_context->Wait(); + } #endif -} - -void ExecutorImpl::Initialize() { - // Initialize the ProgramDescView - view_->Initialize(program_desc_); + } } } // namespace framework diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 25ef2d4d48..5d6d7f37a6 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -15,18 +15,22 @@ limitations under the License. */ #pragma once #include "paddle/framework/framework.pb.h" -#include "paddle/platform/place.h" +#include "paddle/framework/scope.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/device.h" namespace paddle { namespace framework { class Executor { public: - virtual ~Executor() {} - virtual void Run() = 0; -}; + explicit Executor(const std::vector& places); + ~Executor() {} + void Run(const ProgramDesc&, Scope*, std::vector*); -Executor* NewLocalExecutor(const platform::Place&, const ProgramDesc&, bool); + private: + std::vector devices_; +}; } // namespace framework } // namespace paddle diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 6f8ca38768..51d2dfc1c3 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -19,9 +19,15 @@ using namespace paddle::platform; using namespace paddle::framework; TEST(Executor, Init) { + CPUPlace cpu_place1, cpu_place2; + std::vector places; + places.push_back(cpu_place1); + places.push_back(cpu_place2); + Executor* executor = new Executor(places); + ProgramDesc pdesc; - CPUPlace cpu_place; - Executor* executor = NewLocalExecutor(cpu_place, pdesc, true); - executor->Run(); + Scope s; + std::vector* outputs{nullptr}; + executor->Run(pdesc, &s, outputs); delete executor; } \ No newline at end of file diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index daf519b91d..b581937393 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -23,5 +23,7 @@ cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) +cc_library(device SRCS device.cc DEPS device_context) + nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) diff --git a/paddle/platform/device.cc b/paddle/platform/device.cc new file mode 100644 index 0000000000..7acd87c8c3 --- /dev/null +++ b/paddle/platform/device.cc @@ -0,0 +1,59 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/platform/device.h" + +namespace paddle { +namespace platform { + +template +std::unique_ptr make_unique(Args&&... args) { + return std::unique_ptr(new T(std::forward(args)...)); +} + +CPUDeviceContext* GetCPUDeviceContext(const CPUPlace& place) { + static std::unique_ptr g_cpu_device_context = + make_unique(place); + return g_cpu_device_context.get(); +} + +#ifndef PADDLE_ONLY_CPU +CUDADeviceContext* GetCUDADeviceContext(const GPUPlace& place) { + static std::unique_ptr g_cuda_device_context = + make_unique(place); + return g_cuda_device_context.get(); +} +#endif + +Device* GetDevice(const Place& place) { + CPUPlace cpu_place; +#ifndef PADDLE_ONLY_CPU + if (is_gpu_place(place)) { + GPUPlace gpu_place = boost::get(place); + static std::unique_ptr g_device = make_unique( + GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place)); + return g_device.get(); + } else { + static std::unique_ptr g_device = + make_unique(GetCPUDeviceContext(cpu_place), nullptr); + return g_device.get(); + } +#else + static std::unique_ptr g_device = + make_unique(GetCPUDeviceContext(cpu_place)); + return g_device.get(); +#endif +} +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/device.h b/paddle/platform/device.h new file mode 100644 index 0000000000..b1bb8073cf --- /dev/null +++ b/paddle/platform/device.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/platform/device_context.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace platform { + +struct Device { + CPUDeviceContext* cpu_device_context; +#ifndef PADDLE_ONLY_CPU + CUDADeviceContext* cuda_device_context; +#endif + +#ifndef PADDLE_ONLY_CPU + Device(CPUDeviceContext* cpu, CUDADeviceContext* gpu) + : cpu_device_context(cpu), cuda_device_context(gpu) {} +#else + explicit Device(CPUDeviceContext* cpu) : cpu_device_context(cpu) {} +#endif +}; + +CPUDeviceContext* GetCPUDeviceContext(const platform::CPUPlace& place); + +#ifndef PADDLE_ONLY_CPU +CUDADeviceContext* GetCUDADeviceContext(const platform::GPUPlace& place); +#endif + +Device* GetDevice(const platform::Place& place); +} // namespace platform +} // namespace paddle From ff7fdb7d705a34e224561cb53933b5477fef644b Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 10:28:52 -0700 Subject: [PATCH 167/342] Add `CreateBackwardOp` function --- paddle/framework/backward.cc | 56 +++++++++++++++++++++++++++++++++ paddle/framework/backward.h | 5 +++ paddle/framework/op_desc.cc | 20 ++++++++++++ paddle/framework/op_desc.h | 7 +++++ paddle/framework/op_registry.cc | 5 +++ paddle/framework/op_registry.h | 2 ++ 6 files changed, 95 insertions(+) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0ec18de5b8..1b4c5c025e 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -222,5 +222,61 @@ std::unique_ptr Backward( return BackwardRecursive(forwardOp, no_grad_names, uid); } +// ==================================== // + +static bool AllGradInSet(const std::vector& names, + const std::unordered_set& set) { + for (const std::string& name : names) { + if (!set.count(GradVarName(name))) { + return false; + } + } + return true; +} + +std::vector CreatBackwardOps( + const OpDescBind& op_desc, unordered_map& no_grad_vars) { + std::vector grad_op_descs; + // All input gradients of forwarding operator do not need to calculat. + if (AllGradInSet(op_desc_.InputNames(), kGradVarSuffix, no_grad_vars)) { + return grad_op_descs; // empty vector + } + // All output gradients of forwarding operator do not need to calculate. + const std::vector& outputs = op_desc_.OutputNames(); + if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { + for (const std::string& name : outputs) { + no_grad_vars.insert(GradVarName(name)); + } + return grad_op_descs; // empty vector + } + + grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc); + + std::vector fill_zeros_ops; + for (OpDescBind& desc : grad_op_descs) { + for (const std::string& in_name : desc.InputNames()) { + if (no_grad_vars.count(in_name)) { + std::string prefix = in_name.substr( + 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); + std::string new_name = prefix + kZeroVarSuffix; + desc.Rename(in_name, new_name); + OpDescBind op_desc_bind( + {"fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}}); + fill_zeros_ops.push_back(op_desc_bind); + } + } + for (const std::string& out_name : desc.OutputName()) { + if (no_grad_vars.count(out_name)) { + desc.Rename(out_name, kEmptyVarName); + } + } + } + grad_op_descs.insert(grad_op_descs.begin(), fill_zeros_ops.begin(), + fill_zeros_ops.end()); + + // TODO (fengjiayi): RNN op + return grad_op_descs; +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index 1ecf69881b..6aeddafb41 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -23,5 +23,10 @@ namespace framework { extern std::unique_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); + +extern void AppendBackwardOps( + BlockDescBind& block_desc, + const std::unordered_set& no_grad_vars); + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 0c12c55dc0..e98f8f1154 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -18,6 +18,15 @@ limitations under the License. */ namespace paddle { namespace framework { +OpDescBind::OpDescBind(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, + const AttributeMap &attrs) { + op_desc_.set_type(type); + inputs_ = inputs; + outputs_ = outputs; + attrs_ = attrs; +} + OpDesc *OpDescBind::Proto() { Sync(); return &op_desc_; @@ -112,6 +121,17 @@ const std::unordered_map &OpDescBind::GetAttrMap() return attrs_; } +void Rename(const std::string &old_name, const std::string &new_name) { + for (std : string &input : inputs_) { + std::replace(input.second.begin(), input.second.end(), old_name, new_name); + } + for (std::string &output : outputs_) { + std::repalce(output.second.begin(), output.second.end(), old_name, + new_name); + } + need_update_ = true; +} + void OpDescBind::Sync() { if (need_update_) { this->op_desc_.mutable_inputs()->Clear(); diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 0cf7d13971..a32e6d03f7 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -26,6 +26,11 @@ class BlockDescBind; class OpDescBind { public: + OpDescBind() {} + + OpDescBind(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs); + OpDesc *Proto(); std::string Type() const { return op_desc_.type(); } @@ -67,6 +72,8 @@ class OpDescBind { int GetBlockAttr(const std::string &name) const; + void Rename(const std::string &old_name, const std::string &new_name); + // Only be used in C++ const std::unordered_map &GetAttrMap() const; diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index b0e85dd49f..fe3228ce5b 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -57,5 +57,10 @@ std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { return std::unique_ptr(BuildGradOp(&op)); } +static std::vector CreateGradOpDescs(const OpDescBind& op_desc) { + auto& info = OpInfoMap::Instance().Get(op_desc.Type()); + return info.grad_op_maker_(op_desc); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4ee2c7d275..c80b6e9630 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -68,6 +68,8 @@ class OpRegistry { static std::unique_ptr CreateOp(const OpDesc& op_desc); static std::unique_ptr CreateGradOp(const OperatorBase& op); + + static std::vector CreateGradOpDescs(const OpDescBind& op_desc); }; class Registrar { From 494b3bda7d784315433b85826c9cbd18cac5723a Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 3 Oct 2017 10:28:57 -0700 Subject: [PATCH 168/342] fix backward test case --- paddle/framework/backward.cc | 2 +- paddle/framework/backward_test.cc | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 35759f8e78..2c13ddd8d0 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -159,7 +159,7 @@ static std::unique_ptr BackwardRecursive( insert_position.push_back( {dup_op.back(), OpRegistry::CreateOp( - "add", {{"X", {insert_add_x}}, {"X", {insert_add_y}}}, + "sum", {{"X", {insert_add_x}}, {"X", {insert_add_y}}}, {{"Out", {insert_add_out}}}, {})}); } } diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 6932f5b989..a36e7bde8c 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -133,15 +133,18 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker { } }; -class AddOpMaker : public OpProtoAndCheckerMaker { +class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: - AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + SumOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "x").AsDuplicable(); - AddOutput("Out", "out"); + AddInput("X", "the input tensors of sum operator.") + .AsDuplicable() + .NotInGradient(); + AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); AddComment(""); } }; + } // namespace framework } // namespace paddle @@ -154,7 +157,7 @@ REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP); REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker); REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NOP, f::FillZeroOpMaker); -REGISTER_OP(add, f::NOP, f::AddOpMaker, add_grad, f::NOP); +REGISTER_OP(sum, f::NOP, f::SumOpMaker, sum_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); @@ -283,7 +286,7 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); - ASSERT_EQ("add", bwd_net->ops_[2]->Type()); + ASSERT_EQ("sum", bwd_net->ops_[2]->Type()); } TEST(Backward, op_register_grad_not_for_network) { From 6e2f96841a5d3e64dc1c4eabb85b7984099b1d0e Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 3 Oct 2017 17:36:29 +0000 Subject: [PATCH 169/342] simple test --- paddle/framework/executor.cc | 30 ++++++++++++++++++------ paddle/framework/executor_test.cc | 39 ++++++++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 8 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index ebe3259bc0..9e7f6f88df 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/framework/executor.h" #include +#include #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" @@ -22,6 +23,8 @@ limitations under the License. */ namespace paddle { namespace framework { +// using std::unique_ptr op_ptr; + class LinearListView; class GraphView; @@ -158,14 +161,27 @@ Executor* NewLocalExecutor(const platform::Place& place, } void ExecutorImpl::Run() { - // operators running - scope_->NewVar(); - device_->cpu_device_context->Wait(); -#ifndef PADDLE_ONLY_CPU - if (device_->cuda_device_context) { - device_->cuda_device_context->Wait(); + // TODO(tonyyang-svail): only runs the first block + auto& block = program_desc_->blocks(0); + + for (auto& var : block.vars()) { + scope_->NewVar(var.name()); } -#endif + + // std::vector ops; + for (auto& op_desc : block.ops()) { + auto op = framework::OpRegistry::CreateOp(op_desc); + op->InferShape(device_->cpu_device_context); + op->Compute(); + } + + // TODO(tonyyang-svail): need to test gpu device + // device_->cpu_device_context->Wait(); + // #ifndef PADDLE_ONLY_CPU + // if (device_->cuda_device_context) { + // device_->cuda_device_context->Wait(); + // } + // #endif } void ExecutorImpl::Initialize() { diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 6f8ca38768..9ab1b65803 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" +#include "paddle/framework/attribute.h" + #include "gtest/gtest.h" using namespace paddle::platform; @@ -20,8 +22,43 @@ using namespace paddle::framework; TEST(Executor, Init) { ProgramDesc pdesc; + + auto root_block = pdesc.add_blocks(); + root_block->set_idx(0); + root_block->set_parent_idx(-1); + + auto a = root_block->add_vars(); + a->set_name("a"); + auto a_lt = a->mutable_lod_tensor(); + a_lt->set_data_type(paddle::framework::DataType::FP32); + a_lt->add_dims(640); + a_lt->add_dims(640); + + auto b = root_block->add_vars(); + b->set_name("b"); + auto b_lt = b->mutable_lod_tensor(); + b_lt->set_data_type(paddle::framework::DataType::FP32); + b_lt->add_dims(640); + b_lt->add_dims(640); + + auto c = root_block->add_vars(); + c->set_name("c"); + auto c_lt = c->mutable_lod_tensor(); + c_lt->set_data_type(paddle::framework::DataType::FP32); + c_lt->add_dims(640); + c_lt->add_dims(640); + + auto op1 = root_block->add_ops(); + op1->set_type("elementwise_add"); + auto X = op1->add_inputs(); + X->set_parameter("X"); + X->add_arguments("a"); + auto Y = op1->add_inputs(); + Y->set_parameter("Y"); + Y->add_arguments("b"); + CPUPlace cpu_place; Executor* executor = NewLocalExecutor(cpu_place, pdesc, true); executor->Run(); delete executor; -} \ No newline at end of file +} From 2d876b864395513de8db52db944ee5e8150d2730 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 3 Oct 2017 10:54:22 -0700 Subject: [PATCH 170/342] gather scatter fix according to google style --- paddle/operators/cond_op.cc | 4 ++-- paddle/operators/gather.cu.h | 14 +++++++------- paddle/operators/gather.h | 18 +++++++++--------- paddle/operators/gather_op.cu | 4 ++-- paddle/operators/gather_op.h | 4 ++-- paddle/operators/gather_test.cc | 2 +- paddle/operators/scatter.cu.h | 18 +++++++++--------- paddle/operators/scatter.h | 16 +++++++--------- paddle/operators/scatter_op.cu | 4 ++-- paddle/operators/scatter_op.h | 4 ++-- paddle/operators/scatter_test.cc | 2 +- 11 files changed, 44 insertions(+), 46 deletions(-) diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index 7d7f1ba3b1..2737104a20 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -126,7 +126,7 @@ void CondOp::PrepareDataForSubnet( dim[0] = index_tensors[i].dims()[0]; tensor_child->mutable_data(dim, platform::CPUPlace()); - CPUGather(dev_ctx, tensor_parent, &index_tensors[i], tensor_child); + CPUGather(dev_ctx, *tensor_parent, index_tensors[i], tensor_child); } } @@ -187,7 +187,7 @@ void CondOp::MergeDataFromSubnet(const framework::Scope& scope, Variable* var_child = sub_scopes[i]->FindVar(output); PADDLE_ENFORCE_NOT_NULL(var_child); auto* tensor_child = &var_child->Get(); - ScatterAssign(dev_ctx, tensor_child, &index_tensors[i], + ScatterAssign(dev_ctx, *tensor_child, index_tensors[i], tensor_parent); } } diff --git a/paddle/operators/gather.cu.h b/paddle/operators/gather.cu.h index 2ae11376a2..8d04ecd284 100644 --- a/paddle/operators/gather.cu.h +++ b/paddle/operators/gather.cu.h @@ -46,14 +46,14 @@ __global__ void GatherCUDAKernel(const T* params, const int* indices, T* output, * return: output tensor */ template -void GPUGather(const platform::DeviceContext& ctx, const Tensor* src, - const Tensor* index, Tensor* output) { +void GPUGather(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { // PADDLE_ENFORCE(platform::is_gpu_place(place)); // check index of shape 1-D - PADDLE_ENFORCE(index->dims().size() == 1); - int index_size = index->dims()[0]; + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; - auto src_dims = src->dims(); + auto src_dims = src.dims(); framework::DDim output_dims(src_dims); output_dims[0] = index_size; @@ -61,8 +61,8 @@ void GPUGather(const platform::DeviceContext& ctx, const Tensor* src, int slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - const T* p_src = src->data(); - const int* p_index = index->data(); + const T* p_src = src.data(); + const int* p_index = index.data(); T* p_output = output->data(); int block = 512; diff --git a/paddle/operators/gather.h b/paddle/operators/gather.h index 1e39a6da27..052db49cb3 100644 --- a/paddle/operators/gather.h +++ b/paddle/operators/gather.h @@ -24,6 +24,8 @@ limitations under the License. */ namespace paddle { namespace operators { +using framework::Tensor; + /** * A thin wrapper for gathering on cpu tensor * Return a new tensor from source tensor, gathered according to index @@ -32,21 +34,19 @@ namespace operators { * return: output tensor */ template -void CPUGather(const platform::DeviceContext& ctx, - const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void CPUGather(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); // check index of shape 1-D - PADDLE_ENFORCE(index->dims().size() == 1); - int index_size = index->dims()[0]; + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; - auto src_dims = src->dims(); + auto src_dims = src.dims(); framework::DDim output_dims(src_dims); output_dims[0] = index_size; - const T* p_src = src->data(); - const int* p_index = index->data(); + const T* p_src = src.data(); + const int* p_index = index.data(); T* p_output = output->data(); // slice size diff --git a/paddle/operators/gather_op.cu b/paddle/operators/gather_op.cu index 9937be5915..92219d6a43 100644 --- a/paddle/operators/gather_op.cu +++ b/paddle/operators/gather_op.cu @@ -32,7 +32,7 @@ class GatherOpCUDAKernel : public framework::OpKernel { output->mutable_data(ctx.GetPlace()); - GPUGather(ctx.device_context(), x, index, output); + GPUGather(ctx.device_context(), *x, *index, output); } }; @@ -52,7 +52,7 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { auto place = ctx.GetEigenDevice(); dxt.device(place) = dxt.constant(static_cast(0)); - GPUScatterAssign(ctx.device_context(), dO, Index, dX); + GPUScatterAssign(ctx.device_context(), *dO, *Index, dX); } }; diff --git a/paddle/operators/gather_op.h b/paddle/operators/gather_op.h index 5bd2c36f7b..8276ed0d3d 100644 --- a/paddle/operators/gather_op.h +++ b/paddle/operators/gather_op.h @@ -36,7 +36,7 @@ class GatherOpKernel : public framework::OpKernel { output->mutable_data(ctx.GetPlace()); - CPUGather(ctx.device_context(), x, index, output); + CPUGather(ctx.device_context(), *x, *index, output); } }; @@ -56,7 +56,7 @@ class GatherGradientOpKernel : public framework::OpKernel { auto place = ctx.GetEigenDevice(); dxt.device(place) = dxt.constant(static_cast(0)); - ScatterAssign(ctx.device_context(), dO, Index, dX); + ScatterAssign(ctx.device_context(), *dO, *Index, dX); } }; diff --git a/paddle/operators/gather_test.cc b/paddle/operators/gather_test.cc index d8bf8dd9a4..cbd86b8796 100644 --- a/paddle/operators/gather_test.cc +++ b/paddle/operators/gather_test.cc @@ -43,7 +43,7 @@ TEST(Gather, GatherData) { auto* cpu_place = new paddle::platform::CPUPlace(); paddle::platform::CPUDeviceContext ctx(*cpu_place); - CPUGather(ctx, src, index, output); + CPUGather(ctx, *src, *index, output); for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); diff --git a/paddle/operators/scatter.cu.h b/paddle/operators/scatter.cu.h index f4a3965d94..d95436be4f 100644 --- a/paddle/operators/scatter.cu.h +++ b/paddle/operators/scatter.cu.h @@ -19,6 +19,8 @@ namespace paddle { namespace operators { +using Tensor = framework::Tensor; + #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) @@ -45,16 +47,14 @@ __global__ void ScatterCUDAKernel(const T* params, const int* indices, * return: output tensor */ template -void GPUScatterAssign(const platform::DeviceContext& ctx, - const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void GPUScatterAssign(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { // PADDLE_ENFORCE(platform::is_gpu_place(place)); // check index of shape 1-D - PADDLE_ENFORCE(index->dims().size() == 1); - int index_size = index->dims()[0]; + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; - auto src_dims = src->dims(); + auto src_dims = src.dims(); framework::DDim output_dims(src_dims); output_dims[0] = index_size; @@ -62,8 +62,8 @@ void GPUScatterAssign(const platform::DeviceContext& ctx, int slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; - const T* p_src = src->data(); - const int* p_index = index->data(); + const T* p_src = src.data(); + const int* p_index = index.data(); T* p_output = output->data(); int block = 512; diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h index 0d174d3b5b..c1fb844ebd 100644 --- a/paddle/operators/scatter.h +++ b/paddle/operators/scatter.h @@ -33,20 +33,18 @@ using Tensor = framework::Tensor; * return: output tensor */ template -void ScatterAssign(const platform::DeviceContext& ctx, - const paddle::framework::Tensor* src, - const paddle::framework::Tensor* index, - paddle::framework::Tensor* output) { +void ScatterAssign(const platform::DeviceContext& ctx, const Tensor& src, + const Tensor& index, Tensor* output) { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); // check index of shape 1-D - PADDLE_ENFORCE(index->dims().size() == 1); - int index_size = index->dims()[0]; + PADDLE_ENFORCE(index.dims().size() == 1); + int index_size = index.dims()[0]; - auto src_dims = src->dims(); + auto src_dims = src.dims(); auto dst_dims = output->dims(); - const T* p_src = src->data(); - const int* p_index = index->data(); + const T* p_src = src.data(); + const int* p_index = index.data(); T* p_output = output->data(); // check src shape and dst shape should match diff --git a/paddle/operators/scatter_op.cu b/paddle/operators/scatter_op.cu index 6d13a876f9..06f4d75944 100644 --- a/paddle/operators/scatter_op.cu +++ b/paddle/operators/scatter_op.cu @@ -32,7 +32,7 @@ class ScatterOpCUDAKernel : public framework::OpKernel { Out->ShareDataWith(*Ref); - GPUScatterAssign(ctx.device_context(), Updates, Index, Out); + GPUScatterAssign(ctx.device_context(), *Updates, *Index, Out); } }; @@ -51,7 +51,7 @@ class ScatterGradOpCUDAKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates = dO[Index] - GPUGather(ctx.device_context(), dOut, Index, dUpdates); + GPUGather(ctx.device_context(), *dOut, *Index, dUpdates); } }; diff --git a/paddle/operators/scatter_op.h b/paddle/operators/scatter_op.h index ac04968549..6101219006 100644 --- a/paddle/operators/scatter_op.h +++ b/paddle/operators/scatter_op.h @@ -37,7 +37,7 @@ class ScatterOpKernel : public framework::OpKernel { // In place output: Out = Ref, Out[Index] += Updates Out->ShareDataWith(*Ref); // Apply ScatterUpdate: Out[index] += Updates[:] - ScatterAssign(ctx.device_context(), Updates, Index, Out); + ScatterAssign(ctx.device_context(), *Updates, *Index, Out); } }; @@ -56,7 +56,7 @@ class ScatterGradientOpKernel : public framework::OpKernel { dRef->ShareDataWith(*dOut); dUpdates->mutable_data(ctx.GetPlace()); // Gradient by Gather: dUpdates += dO[Index] - CPUGather(ctx.device_context(), dOut, Index, dUpdates); + CPUGather(ctx.device_context(), *dOut, *Index, dUpdates); } }; diff --git a/paddle/operators/scatter_test.cc b/paddle/operators/scatter_test.cc index 321bba3dad..00dbdacbfe 100644 --- a/paddle/operators/scatter_test.cc +++ b/paddle/operators/scatter_test.cc @@ -42,7 +42,7 @@ TEST(scatter, ScatterUpdate) { auto* cpu_place = new paddle::platform::CPUPlace(); paddle::platform::CPUDeviceContext ctx(*cpu_place); - ScatterAssign(ctx, src, index, output); + ScatterAssign(ctx, *src, *index, output); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0)); for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], float(0)); From e946fc15192e7a05df42aeea0b4bf1b87fb77472 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 3 Oct 2017 19:42:18 +0000 Subject: [PATCH 171/342] add elementwise_add --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/executor.cc | 25 +++++++++++++++++++++++++ paddle/framework/executor.h | 1 + paddle/framework/executor_test.cc | 8 +++++++- 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index cbd39dd095..58e78e9a6a 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -44,7 +44,7 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS op_registry device scope framework_proto) +cc_library(executor SRCS executor.cc DEPS op_registry device scope framework_proto ${GLOB_OP_LIB}) cc_test(executor_test SRCS executor_test.cc DEPS executor) cc_library(tensor_array SRCS tensor_array.cc DEPS lod_tensor) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index a61f0f7162..94b9b3b350 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -31,6 +31,31 @@ Executor::Executor(const std::vector& places) { void Executor::Run(const ProgramDesc& pdesc, Scope* scope, std::vector* outputs) { // operators running + // TODO(tonyyang-svail): + // - only runs the first block + // - only runs on the first device + auto& block = pdesc.blocks(0); + auto& device = devices_[0]; + + for (auto& var : block.vars()) { + scope->NewVar(var.name()); + } + + // std::vector ops; + for (auto& op_desc : block.ops()) { + auto op = framework::OpRegistry::CreateOp(op_desc); + // op->InferShape(*scope); + op->Run(*scope, *device->cpu_device_context); + } + + // TODO(tonyyang-svail): need to test gpu device + // device_->cpu_device_context->Wait(); + // #ifndef PADDLE_ONLY_CPU + // if (device_->cuda_device_context) { + // device_->cuda_device_context->Wait(); + // } + // #endif + Scope& local_scope = scope->NewScope(); local_scope.NewVar(); for (auto device : devices_) { diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 5d6d7f37a6..cdb80bc104 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/framework/framework.pb.h" +#include "paddle/framework/op_info.h" #include "paddle/framework/scope.h" #include "paddle/framework/tensor.h" #include "paddle/platform/device.h" diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 4560d6c503..11255af808 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -13,9 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" +#include "gtest/gtest.h" #include "paddle/framework/attribute.h" -#include "gtest/gtest.h" +#include +#include "paddle/framework/grad_op_builder.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" + +USE_OP(elementwise_add); using namespace paddle::platform; using namespace paddle::framework; From 3395bf7ad0f8dd6443bb2075e67331d3152eef43 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 3 Oct 2017 12:48:03 -0700 Subject: [PATCH 172/342] Remove duplicated method in OpDesc --- paddle/framework/grad_op_desc_maker.h | 12 ++++++------ paddle/framework/op_desc.cc | 18 ------------------ paddle/framework/op_desc.h | 10 ++-------- 3 files changed, 8 insertions(+), 32 deletions(-) diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h index b4b6d54bf3..e6d63e4b8a 100644 --- a/paddle/framework/grad_op_desc_maker.h +++ b/paddle/framework/grad_op_desc_maker.h @@ -44,12 +44,12 @@ class GradOpDescMakerBase { return ToGradNames(fwd_op_.Output(name)); } - std::vector InputParamNames() const { - return this->fwd_op_.InputParamNames(); + std::vector InputNames() const { + return this->fwd_op_.InputNames(); } - std::vector OutputParamNames() const { - return this->fwd_op_.OutputParamNames(); + std::vector OutputNames() const { + return this->fwd_op_.OutputNames(); } std::vector Input(const std::string& name) const { @@ -96,12 +96,12 @@ class DefaultGradOpDescMaker : public SingleGradOpDescMaker { OpDescBind grad; grad.SetType(this->GradOpType()); - for (auto& input_param : this->InputParamNames()) { + for (auto& input_param : this->InputNames()) { grad.SetInput(input_param, this->Input(input_param)); grad.SetOutput(GradVarName(input_param), this->InputGrad(input_param)); } - for (auto& output_param : this->OutputParamNames()) { + for (auto& output_param : this->OutputNames()) { grad.SetInput(output_param, this->Output(output_param)); grad.SetInput(GradVarName(output_param), this->OutputGrad(output_param)); } diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 33a064890c..852f0f1eb8 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -31,15 +31,6 @@ const std::vector &OpDescBind::Input( return it->second; } -std::vector OpDescBind::InputNames() const { - std::vector retv; - retv.reserve(this->inputs_.size()); - for (auto &ipt : this->inputs_) { - retv.push_back(ipt.first); - } - return retv; -} - void OpDescBind::SetInput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; @@ -54,15 +45,6 @@ const std::vector &OpDescBind::Output( return it->second; } -std::vector OpDescBind::OutputNames() const { - std::vector retv; - retv.reserve(this->outputs_.size()); - for (auto &ipt : this->outputs_) { - retv.push_back(ipt.first); - } - return retv; -} - void OpDescBind::SetOutput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 12706b9d71..508bcaa67e 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -35,15 +35,11 @@ class OpDescBind { const std::vector &Input(const std::string &name) const; - std::vector InputNames() const; - void SetInput(const std::string ¶m_name, const std::vector &args); const std::vector &Output(const std::string &name) const; - std::vector OutputNames() const; - void SetOutput(const std::string ¶m_name, const std::vector &args); @@ -71,10 +67,8 @@ class OpDescBind { // Only be used in C++ void SetAttrMap(const AttributeMap &attr_map); - std::vector InputParamNames() const { return MapKeys(inputs_); } - std::vector OutputParamNames() const { - return MapKeys(outputs_); - } + std::vector InputNames() const { return MapKeys(inputs_); } + std::vector OutputNames() const { return MapKeys(outputs_); } private: template From 495a80a73645bcabe9e392a6b1a2878845f7a234 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 3 Oct 2017 13:17:10 -0700 Subject: [PATCH 173/342] Update design doc --- doc/design/register_grad_op.md | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md index 12b04fb271..cdb7a8435b 100644 --- a/doc/design/register_grad_op.md +++ b/doc/design/register_grad_op.md @@ -33,22 +33,45 @@ The mapping relationship between an operator and its gradient operators is a fun ```cpp // (OpDesc) --> vector -using GradOpDescMaker = std::function(const OpDesc&)>; +std::function(const OpDescBind&)>; ``` -The function take a `OpDesc` of the forward operator and return one or many gradient operator descriptions. +The function takes an `OpDescBind` of the forward operator and returns one or many gradient operator descriptions. `OpDescBind` is a C++ wrapper for protobuf message `OpDesc` to manipulate `OpDesc` fast. The `GradOpDescMaker` will be registered in `OpInfo`, to replace `grad_op_type_` field. The `OpInfo` should be ```cpp struct OpInfo { - GradOpDescMaker grad_op_maker_; + std::function(const OpDescBind&)> grad_op_maker_; ... }; ``` The `grad_op_maker_ ` is `nullptr` if the operator does not have associated gradient operators. +We propose a base class called `GradOpDescMakerBase` to let operator developers generate `Gradient Operators` easily. The public interface of that class is + +```cpp +class GradOpDescMakerBase { +public: + GradOpDescMakerBase(const OpDescBind& ); + virtual std::vector operator()()const = 0; +}; +``` + +We can convert `GradOpDescMakerBase` to `std::function(const OpDescBind&)>` by + +```cpp +using GradOpMaker = ...; +std::function(const OpDescBind&)> func; +func = [] (const OpDescBind& fwd_op) { + GradOpMaker maker(fwd_op); + return maker(); +}; +``` + +We can write many helper functions since the `GradOpDescMakerBase` is a class now. The basic helper functions get the variables of `Input`, `Output`, `InputGradient` and `OutputGradient` in the forwarding operator. + We should chagne register macros at the same time. In the current solution, there is no difference between forwarding operators and backward operators. So `REGISTER_OP` just register one operator. If the `REGISTER_OPERATOR ` contains `OpProtoAndCheckerMaker` and `GradOpDescMaker`, we just list them in the same macro. It can be done by a macro contains `__VA_ARGS__`. The user interface should be From 6c4d1f551d96dda505be54c9a705d5a6784dd062 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 3 Oct 2017 13:43:25 -0700 Subject: [PATCH 174/342] refine codes --- paddle/framework/CMakeLists.txt | 8 +- paddle/framework/executor.cc | 44 ++++---- paddle/framework/executor.h | 4 +- paddle/framework/executor_test.cc | 103 ++++++++++-------- paddle/platform/CMakeLists.txt | 2 +- paddle/platform/device.cc | 59 ---------- paddle/platform/device_context_manager.cc | 68 ++++++++++++ .../{device.h => device_context_manager.h} | 45 +++++--- 8 files changed, 188 insertions(+), 145 deletions(-) delete mode 100644 paddle/platform/device.cc create mode 100644 paddle/platform/device_context_manager.cc rename paddle/platform/{device.h => device_context_manager.h} (52%) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 58e78e9a6a..898b3a990d 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -44,8 +44,12 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS op_registry device scope framework_proto ${GLOB_OP_LIB}) -cc_test(executor_test SRCS executor_test.cc DEPS executor) +cc_library(executor SRCS executor.cc DEPS op_registry device_context_manager scope framework_proto ${GLOB_OP_LIB}) +if(WITH_GPU) + nv_test(executor_test SRCS executor_test.cc DEPS executor) +else() + cc_test(executor_test SRCS executor_test.cc DEPS executor) +endif() cc_library(tensor_array SRCS tensor_array.cc DEPS lod_tensor) cc_test(tensor_array_test SRCS tensor_array_test.cc DEPS tensor_array place) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 94b9b3b350..717f9bf81a 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -22,9 +22,21 @@ namespace paddle { namespace framework { Executor::Executor(const std::vector& places) { - devices_.resize(places.size()); + device_contexts_.resize(places.size()); for (size_t i = 0; i < places.size(); i++) { - devices_[i] = platform::GetDevice(places[i]); + if (platform::is_cpu_place(places[i])) { + device_contexts_[i] = platform::DeviceContextManager::Get() + ->GetDeviceContext( + boost::get(places[i])); + } else { +#ifndef PADDLE_ONLY_CPU + device_contexts_[i] = platform::DeviceContextManager::Get() + ->GetDeviceContext( + boost::get(places[i])); +#else + PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); +#endif + } } } @@ -34,37 +46,25 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, // TODO(tonyyang-svail): // - only runs the first block // - only runs on the first device + Scope& local_scope = scope->NewScope(); + auto& block = pdesc.blocks(0); - auto& device = devices_[0]; + auto& device_context = device_contexts_[0]; for (auto& var : block.vars()) { - scope->NewVar(var.name()); + local_scope.NewVar(var.name()); } // std::vector ops; for (auto& op_desc : block.ops()) { auto op = framework::OpRegistry::CreateOp(op_desc); - // op->InferShape(*scope); - op->Run(*scope, *device->cpu_device_context); + // InferShape is now doing inside Run method. + op->Run(local_scope, *device_context); } // TODO(tonyyang-svail): need to test gpu device - // device_->cpu_device_context->Wait(); - // #ifndef PADDLE_ONLY_CPU - // if (device_->cuda_device_context) { - // device_->cuda_device_context->Wait(); - // } - // #endif - - Scope& local_scope = scope->NewScope(); - local_scope.NewVar(); - for (auto device : devices_) { - device->cpu_device_context->Wait(); -#ifndef PADDLE_ONLY_CPU - if (device->cuda_device_context) { - device->cuda_device_context->Wait(); - } -#endif + for (auto device_context : device_contexts_) { + device_context->Wait(); } } diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index cdb80bc104..795b8ffdab 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/framework/op_info.h" #include "paddle/framework/scope.h" #include "paddle/framework/tensor.h" -#include "paddle/platform/device.h" +#include "paddle/platform/device_context_manager.h" namespace paddle { namespace framework { @@ -30,7 +30,7 @@ class Executor { void Run(const ProgramDesc&, Scope*, std::vector*); private: - std::vector devices_; + std::vector device_contexts_; }; } // namespace framework diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 11255af808..810ff2a512 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -15,8 +15,6 @@ limitations under the License. */ #include "paddle/framework/executor.h" #include "gtest/gtest.h" #include "paddle/framework/attribute.h" - -#include #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" @@ -26,52 +24,71 @@ USE_OP(elementwise_add); using namespace paddle::platform; using namespace paddle::framework; -TEST(Executor, Init) { - ProgramDesc pdesc; - - auto root_block = pdesc.add_blocks(); - root_block->set_idx(0); - root_block->set_parent_idx(-1); - - auto a = root_block->add_vars(); - a->set_name("a"); - auto a_lt = a->mutable_lod_tensor(); - a_lt->set_data_type(paddle::framework::DataType::FP32); - a_lt->add_dims(640); - a_lt->add_dims(640); - - auto b = root_block->add_vars(); - b->set_name("b"); - auto b_lt = b->mutable_lod_tensor(); - b_lt->set_data_type(paddle::framework::DataType::FP32); - b_lt->add_dims(640); - b_lt->add_dims(640); - - auto c = root_block->add_vars(); - c->set_name("c"); - auto c_lt = c->mutable_lod_tensor(); - c_lt->set_data_type(paddle::framework::DataType::FP32); - c_lt->add_dims(640); - c_lt->add_dims(640); - - auto op1 = root_block->add_ops(); - op1->set_type("elementwise_add"); - auto X = op1->add_inputs(); - X->set_parameter("X"); - X->add_arguments("a"); - auto Y = op1->add_inputs(); - Y->set_parameter("Y"); - Y->add_arguments("b"); - - CPUPlace cpu_place1, cpu_place2; +class ExecutorTester : public ::testing::Test { + public: + virtual void SetUp() override { + auto root_block = pdesc_.add_blocks(); + root_block->set_idx(0); + root_block->set_parent_idx(-1); + + auto a = root_block->add_vars(); + a->set_name("a"); + auto a_lt = a->mutable_lod_tensor(); + a_lt->set_data_type(paddle::framework::DataType::FP32); + a_lt->add_dims(640); + a_lt->add_dims(640); + + auto b = root_block->add_vars(); + b->set_name("b"); + auto b_lt = b->mutable_lod_tensor(); + b_lt->set_data_type(paddle::framework::DataType::FP32); + b_lt->add_dims(640); + b_lt->add_dims(640); + + auto c = root_block->add_vars(); + c->set_name("c"); + auto c_lt = c->mutable_lod_tensor(); + c_lt->set_data_type(paddle::framework::DataType::FP32); + c_lt->add_dims(640); + c_lt->add_dims(640); + + auto op1 = root_block->add_ops(); + op1->set_type("elementwise_add"); + auto X = op1->add_inputs(); + X->set_parameter("X"); + X->add_arguments("a"); + auto Y = op1->add_inputs(); + Y->set_parameter("Y"); + Y->add_arguments("b"); + } + + protected: + std::vector* outputs_{nullptr}; + ProgramDesc pdesc_; + Scope scope_; +}; + +TEST_F(ExecutorTester, InitCPU) { std::vector places; + CPUPlace cpu_place1, cpu_place2; places.push_back(cpu_place1); places.push_back(cpu_place2); Executor* executor = new Executor(places); - Scope s; - std::vector* outputs{nullptr}; - executor->Run(pdesc, &s, outputs); + executor->Run(pdesc_, &scope_, outputs_); + delete executor; +} + +#ifndef PADDLE_ONLY_CPU +TEST_F(ExecutorTester, InitGPU) { + std::vector places; + GPUPlace gpu_place0(0); + GPUPlace gpu_place1(1); + places.push_back(gpu_place0); + places.push_back(gpu_place1); + Executor* executor = new Executor(places); + executor->Run(pdesc_, &scope_, outputs_); delete executor; } +#endif diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index b581937393..b4ddf721dd 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -23,7 +23,7 @@ cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) -cc_library(device SRCS device.cc DEPS device_context) +cc_library(device_context_manager SRCS device_context_manager.cc DEPS device_context) nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) diff --git a/paddle/platform/device.cc b/paddle/platform/device.cc deleted file mode 100644 index 7acd87c8c3..0000000000 --- a/paddle/platform/device.cc +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/platform/device.h" - -namespace paddle { -namespace platform { - -template -std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - -CPUDeviceContext* GetCPUDeviceContext(const CPUPlace& place) { - static std::unique_ptr g_cpu_device_context = - make_unique(place); - return g_cpu_device_context.get(); -} - -#ifndef PADDLE_ONLY_CPU -CUDADeviceContext* GetCUDADeviceContext(const GPUPlace& place) { - static std::unique_ptr g_cuda_device_context = - make_unique(place); - return g_cuda_device_context.get(); -} -#endif - -Device* GetDevice(const Place& place) { - CPUPlace cpu_place; -#ifndef PADDLE_ONLY_CPU - if (is_gpu_place(place)) { - GPUPlace gpu_place = boost::get(place); - static std::unique_ptr g_device = make_unique( - GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place)); - return g_device.get(); - } else { - static std::unique_ptr g_device = - make_unique(GetCPUDeviceContext(cpu_place), nullptr); - return g_device.get(); - } -#else - static std::unique_ptr g_device = - make_unique(GetCPUDeviceContext(cpu_place)); - return g_device.get(); -#endif -} -} // namespace platform -} // namespace paddle diff --git a/paddle/platform/device_context_manager.cc b/paddle/platform/device_context_manager.cc new file mode 100644 index 0000000000..156d317c8a --- /dev/null +++ b/paddle/platform/device_context_manager.cc @@ -0,0 +1,68 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/platform/device_context_manager.h" + +namespace paddle { +namespace platform { + +DeviceContextManager::DeviceContextManager() { +#ifndef PADDLE_ONLY_CPU + device_count_ = GetDeviceCount(); + cuda_contexts_.reserve(device_count_); + for (int i = 0; i < device_count_; i++) { + cuda_contexts_[i] = nullptr; + } +#endif +} + +template <> +CPUDeviceContext* DeviceContextManager::GetDeviceContext< + CPUPlace, CPUDeviceContext>(const CPUPlace& place) { + if (!cpu_context_) { + cpu_context_ = new CPUDeviceContext(place); + } + return cpu_context_; +} + +#ifndef PADDLE_ONLY_CPU +template <> +CUDADeviceContext* DeviceContextManager::GetDeviceContext< + GPUPlace, CUDADeviceContext>(const GPUPlace& place) { + int gpu_id = place.device; + PADDLE_ENFORCE(gpu_id < device_count_, + "GPU device id must less than device count"); + SetDeviceId(gpu_id); + if (!cuda_contexts_[gpu_id]) { + cuda_contexts_[gpu_id] = new CUDADeviceContext(place); + } + return cuda_contexts_[gpu_id]; +} +#endif + +DeviceContextManager::~DeviceContextManager() { + if (cpu_context_) { + delete cpu_context_; + } +#ifndef PADDLE_ONLY_CPU + for (int i = 0; i < device_count_; i++) { + if (cuda_contexts_[i]) { + delete cuda_contexts_[i]; + } + } +#endif +} + +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/device.h b/paddle/platform/device_context_manager.h similarity index 52% rename from paddle/platform/device.h rename to paddle/platform/device_context_manager.h index b1bb8073cf..da15808a60 100644 --- a/paddle/platform/device.h +++ b/paddle/platform/device_context_manager.h @@ -13,33 +13,46 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once - #include "paddle/platform/device_context.h" -#include "paddle/platform/place.h" namespace paddle { namespace platform { -struct Device { - CPUDeviceContext* cpu_device_context; -#ifndef PADDLE_ONLY_CPU - CUDADeviceContext* cuda_device_context; -#endif +template +struct Converter; + +template <> +struct Converter { + using DeviceContextType = CPUDeviceContext; +}; #ifndef PADDLE_ONLY_CPU - Device(CPUDeviceContext* cpu, CUDADeviceContext* gpu) - : cpu_device_context(cpu), cuda_device_context(gpu) {} -#else - explicit Device(CPUDeviceContext* cpu) : cpu_device_context(cpu) {} -#endif +template <> +struct Converter { + using DeviceContextType = CUDADeviceContext; }; +#endif + +class DeviceContextManager { + public: + DeviceContextManager(); + ~DeviceContextManager(); + + template ::DeviceContextType> + DeviceType* GetDeviceContext(const PlaceType& place); -CPUDeviceContext* GetCPUDeviceContext(const platform::CPUPlace& place); + static DeviceContextManager* Get() { + static DeviceContextManager inst; + return &inst; + } + private: + CPUDeviceContext* cpu_context_; #ifndef PADDLE_ONLY_CPU -CUDADeviceContext* GetCUDADeviceContext(const platform::GPUPlace& place); + int device_count_; + std::vector cuda_contexts_; #endif - -Device* GetDevice(const platform::Place& place); +}; } // namespace platform } // namespace paddle From 62de57e1ee28bdb349148028d079b4e3192ecb46 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Tue, 3 Oct 2017 14:01:22 -0700 Subject: [PATCH 175/342] Update lod_tensor.md --- paddle/framework/lod_tensor.md | 168 +++++++++++++++++++-------------- 1 file changed, 97 insertions(+), 71 deletions(-) diff --git a/paddle/framework/lod_tensor.md b/paddle/framework/lod_tensor.md index 07bbdf9416..0fa14f3470 100644 --- a/paddle/framework/lod_tensor.md +++ b/paddle/framework/lod_tensor.md @@ -1,147 +1,173 @@ # Design Doc: LoD (Level-of-Detail) Tensor -PaddlePaddle's RNN doesn't require that all instances have the same length. To do so, we introduce an extension to Tensor, namely, LoD Tensor. +As other deep learning systems, PaddlePaddle supports training models from sequence data. Also, like other systems, PaddlePaddle represent a mini-batch of sequences as a Tensor. What is different is that PaddlePaddle doesn't require that all sequences in a mini-batch are of the same length. Thus no need for padding zeros. -## Challenge of Variable-length Inputs +| | TensorFlow | PaddlePaddle | +|-----------------------|------------|--------------| +| RNN | Support | Support | +| recursive RNN | Support | Support | +| padding zeros | Must | No need | +| blob data type | Tensor | LoDTensor | -People usually represent a mini-batch by a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. So a transformation, T, of all images can be a matrix multiplication of the 10xOx32-dimensional tensor T and the 10x32x32 Tensor. +PaddlePaddle achieves this flexibility by passing through a new data type, *LoD Tensor*, which is a Tensor attached with segmentation index known as *LoD*, between operators. The LoD index doesn't only segments a tensor, but also recursively segments sub-sequences. This document presents the design of LoD and LoDTensor. -Another example is that each mini-batch contains 32 sentences, where each word is a D-dimensional one-hot vector. If all sentences have the same length L, we can represent this mini-batch by a 32xLxD tensor. However, in most cases, sentences have variable lengths, and we will need an index data structure to record these variable lengths. -## LoD as a Solution +## The Challenge: Variable-length Sequences -### Mini-Batch of variable-length sentences +Most deep learning systems represent a mini-batch as a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. Another example is that each mini-batch contains N sentences, where each word is a D-dimensional one-hot vector. Suppose that all sentences have the same length L, we can represent this mini-batch by a NxLxD tensor. -Let's imagine a mini-batch of 3 variable lengths sentences, containing 3, 1, and 2 words respectively. We can represent it by a (3+1+2)xD tensor plus some index information: +Both examples show that the elements of sequences are usually of the same size. In the first example, all images are 32x32, and in the second one, all words are D-dimensional vectors. It doesn't make sense to allow variable-sized images, as that would require transformations like convolution represented by variable-sized Tensors. + +The real challenge is that in most cases, sentences have variable lengths, and we will need an index data structure to segment the tensor into sequences. Also, sequences might consist of sub-sequences. + +## A Solution: The LoD Index + +Let is visit this challenge from examples. + +### A Mini-Batch of Sentences + +Let's imagine a mini-batch of 3 variable lengths sentences composed by 3, 1, and 2 words respectively. We can represent it by a (3+1+2)xD tensor plus some index information: ``` - 3 3 1 2 ||| | || ``` -Each `|` represents a D-dimensional word vectors. The number 3 on top indicate 3 sentences, and numbers 3, 1, and 2 on the second level represent the number of words in each sentence. +where each `|` represents a D-dimensional word vector. The numbers, 3, 1, and 2, form a 1-level LoD. + +### Recursive Sequences + +Let check another example of a 2-level LoD Tensor. Consider a mini-batch of three articles with 3, 1, and 2 sentences, and each sentence consists of words: + +``` +3 1 2 +3 2 4 1 2 3 +||| || |||| | || ||| +``` -### Mini-Batch of variable-length videos +### A Mini-Batch of Videos -This approach generalizes to the case where elements are not words, but higher dimensional objects, like images. Suppose that a mini-batch contains videos of the same frame size 640x480. If a mini-batch contains 3 videos of 3, 1, and 2 frames respectively. The underlying tensor is of size (3+1+2)x640x480. The index information illustrates as: +LoD Tensor generalizes to the case where elements are higher dimensional objects, like images. Suppose that a mini-batch contains videos of the same frame size 640x480. Here is a mini-batch of 3 videos with 3, 1, and 2 frames respectively. ``` - 3 3 1 2 口口口 口 口口 ``` -where each `口` represents an image. +The underlying tensor is of size (3+1+2)x640x480, and each `口` represents a 640x480 image. -### Mini-Batch of fixed-size images +### A Mini-Batch of Images -Let's get back to a typical example, image classification, where each mini-batch has M fixed-sized images. The LoD Tensor representation is +In traditional cases like a mini-batch with N fixed-sized images, the LoD Tensor representation is as ``` - M 1 1 1 1 1 口口口口 ... 口 ``` -The many 1's on the second level seem duplicated. For this particular case of 2 levels and the second level always have length 1, we can ignore the LoD index. - -### Design and summarization - -In summary, as long as that the essential elements (words or images) have the same size, we can represent mini-batches by a LoD Tensor: +It doesn't loss anything to ignore the many 1's in the index and to consider this LoD Tensor a usual Tensor: -- The underlying tensor has size LxD1xD2x..., where D1xD2... is the size of the essential elements, and -- The first dimension size L has an additonal property -- a LoD index as a nested vector: +``` +口口口口 ... 口 +``` - ```c++ - typedef std::vector> LoD; - ``` +### Model Parameters -- The LoD index is not necessary when there are only two levels and all elements of the second level have length 1. +A model parameter is just a usual Tensor, which, just like the above example, is a **0-level LoD Tensor**. -## Slicing of LoD Tensor +## The LoD Tensor -Consider that we have a network with three levels of RNN: the top level one handles articles, the second level one handles sentences, and the basic level one handles words. This network requires that mini-batches represented by 3 level LoD Tensor, for example, +Let us revisit above example of the 2-level LoD Tensor ``` - 3 3 1 2 3 2 4 1 2 3 ||| || |||| | || ||| ``` -To allow each level of RNN to handle its input, we define **the slicing of a LoD Tensor is defined as getting the j-th sequence on level i, or the -slice** +It is indeed a tree, where leaves are elementary sequences identified by **branches**. + +For example, the third sentence in above example is identified by branch <0,2>, where 0 indicates the first article with length 3, and 2 indicates the third sentence in this article with length 4. + +### The LoD Index -For example, the <2,1>-slice of above slice is +We can save the LoD index in above example ``` -2 -|| +3 1 2 +3 2 4 1 2 3 ``` -and the <1,2>-slice of above example is +in a not-full 2D matrix: +```c++ +typedef std::vector > LoD; ``` -2 -2 3 -|| ||| -``` -Let's go on slicing this slice. Its <1,1>-slice is +where + +- `LoD.size()` is the number of levels, or the maximum length of branches, +- `LoD[i][j]` is the length of the j-th segment at the i-th level. + +## The Offset Representation + +To quickly access elementary sequences, we adopt an offset representation -- instead of saving the lengths, we save the beginning and ending elements of sequences. + +In the above example, we accumulate the length of elementary sequences: ``` -1 -1 -| +3 2 4 1 2 3 ``` -### The Slicing Algorithm +into offsets -The algorithm, with over-simplified data structure, is defined as +``` +0 3 5 9 10 12 15 + = = = = = = + 3 2+3 4+5 1+9 2+10 3+12 +``` -```c++ -typedef std::vector> LoD; +so we know that the first sentence is from word 0 to word 3, and the second sentence from work 3 to word 5. -struct LoDTensor { - LoD lod_; - float* tensor_; -}; +Similarly, lengths in the top level LoD -LoDTensor Slice(const LoDTensor& lodt, int level, int sequence); +``` +3 1 2 ``` -Let us revisit the example above +is transformed into offsets of elements/words: ``` - 3 -3 1 2 -3 2 4 1 2 3 -||| || |||| | || ||| +0 9 10 15 + = = = + 3+2+4 1+9 2+3+10 ``` -Suppose that we want to retrieve the <1,2>-slice +so we can tell that the first article is from word 0 to word 9, and the second article is from word 9 to word 10. + +The complete offset representation is as follows: ``` -2 -2 3 -|| ||| +0 9 10 15 +0 3 5 9 10 12 15 +||| || |||| | || ||| ``` -we will need to find out the starting position of this slice by summing over all leaf nodes in `LoD` to the left of the slice, i.e., 3 + 2 + 4 + 1 = 10. +## Slicing of LoD Tensors + +When we use the above 2-level LoD Tensor as the input to a nested-RNN, we need to retrieve certain sequences. Here we define the sequence identified by branch as the **-slice**. -To avoid the traversal of the LoD tree at slicing time, we can do it at the construction time -- instead of saving the lengths of the next level in the LoD tree, we can save the starting offset of the next level. For example, above LoD Tensor can be transformed into +For example, the <2>-slice of above example is ``` - 0 -0 9 10 -0 3 5 9 10 12 -||| || |||| | || ||| +10 15 +10 12 15 + || ||| ``` -We don't really need the 0 on top, so the LoD Tensor could be +and the <2,0>-slice of above slice is ``` -0 9 10 -0 3 5 9 10 12 -||| || |||| | || ||| +10 12 + || ``` From 9935fdd3dd92cf9930f88b070090925d2909ed1a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 14:14:42 -0700 Subject: [PATCH 176/342] Update --- paddle/framework/backward.cc | 57 ++++++++++++++++++++++++++++++++--- paddle/framework/block_desc.h | 4 +++ paddle/framework/op_desc.cc | 16 ++++++++++ paddle/framework/op_desc.h | 4 +++ 4 files changed, 77 insertions(+), 4 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 1b4c5c025e..0f65478ef8 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -235,14 +235,17 @@ static bool AllGradInSet(const std::vector& names, } std::vector CreatBackwardOps( - const OpDescBind& op_desc, unordered_map& no_grad_vars) { + const std::unique_ptr& op_desc_ptr, + unordered_map& no_grad_vars) { + const OpDescBind& op_desc = *op_desc_ptr; std::vector grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - if (AllGradInSet(op_desc_.InputNames(), kGradVarSuffix, no_grad_vars)) { + if (AllGradInSet(op_desc_.InputArgumentNames(), kGradVarSuffix, + no_grad_vars)) { return grad_op_descs; // empty vector } // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc_.OutputNames(); + const std::vector& outputs = op_desc_.OutputArugumentNames(); if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { for (const std::string& name : outputs) { no_grad_vars.insert(GradVarName(name)); @@ -254,7 +257,7 @@ std::vector CreatBackwardOps( std::vector fill_zeros_ops; for (OpDescBind& desc : grad_op_descs) { - for (const std::string& in_name : desc.InputNames()) { + for (const std::string& in_name : desc.InputArgumentNames()) { if (no_grad_vars.count(in_name)) { std::string prefix = in_name.substr( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); @@ -278,5 +281,51 @@ std::vector CreatBackwardOps( return grad_op_descs; } +void AppendBackwardOps(BlockDescBind& block_desc, + const std::unordered_set& no_grad_vars) { + std::unordered_map> dup_out_ops; + size_t grad_desc_idx = 0; + std::deque> op_descs = block_desc.ops_; + std::vector> grad_op_descs; + for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { + std::vector op_grads = CreatBackwardOps(*it, no_grad_vars); + for (const OpDescBind& desc : op_grads) { + for (const std::string& out_name : desc.OutputArugumentNames()) { + dup_out_ops[out_name].emplace_back(grad_desc_idx); + } + ++grad_desc_idx; + } + grad_op_descs.insert(grad_op_descs.end(), op_grads.begin(), op_grads.end()); + } + // Check whether some variables are written more than once + std::list> pending_sum_ops; + for (const auto& dup : dup_out_ops) { + const std::string& out_name = dup.first; + const std::vector dup_op = dup.second; + if (out_name != kEmptyVarName && dup_op.size() > 1) { + std::vector sum_op_inputs; + for (size_t i = 0; i < dup_op.size(); ++i) { + std::string new_name = out_name + "@RENAME@" + std::to_string(i); + grad_op_descs[dup_op[i]].Rename(out_name, new_name); + sum_op_inputs.emplace_back(new_name); + } + pending_sum_ops.push_back( + {dup_op.back(), + OpDescBind( + {"sum", {{"X", {sum_op_inputs}}}, {{"Out", {out_name}}}, {}})}); + } + } + pending_sum_ops.sort( + [](const std::pair& a, + const std::pair& b) { return a.first > b.first; }); + for (auto& p : pending_sum_ops) { + grad_op_descs.insert(grad_op_descs.begin() + p.first + 1, + std::move(p.second)); + } + // Append grad_op_descs to BlockDescBind::ops_ + for () { + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 59513ede33..a171dfef30 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -32,6 +32,10 @@ class ProgramDescBind; class BlockDescBind { public: + friend void AppendBackwardOps( + BlockDescBind &block_desc, + const std::unordered_set &no_grad_vars); + BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index f2e0c14fbd..e6c0cdacd9 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -49,6 +49,14 @@ std::vector OpDescBind::InputNames() const { return retv; } +std::vector InputArgumentNames() const { + std::vector retv; + for (auto &ipt : this->inputs_) { + retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); + } + return retv; +} + void OpDescBind::SetInput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; @@ -72,6 +80,14 @@ std::vector OpDescBind::OutputNames() const { return retv; } +std::vector OutputArgumentNames() const { + std::vector retv; + for (auto &ipt : this->outputs_) { + retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); + } + return retv; +} + void OpDescBind::SetOutput(const std::string ¶m_name, const std::vector &args) { need_update_ = true; diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 2280654812..e30c58632e 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -42,6 +42,8 @@ class OpDescBind { std::vector InputNames() const; + std::vector InputArgumentNames() const; + void SetInput(const std::string ¶m_name, const std::vector &args); @@ -49,6 +51,8 @@ class OpDescBind { std::vector OutputNames() const; + std::vector OutputArgumentNames() const; + void SetOutput(const std::string ¶m_name, const std::vector &args); From 48a9ab4a0896b3102637fb7606b27bbf6b097bc3 Mon Sep 17 00:00:00 2001 From: Markus Kliegl Date: Tue, 3 Oct 2017 14:21:42 -0700 Subject: [PATCH 177/342] minor language fixes --- paddle/framework/lod_tensor.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/paddle/framework/lod_tensor.md b/paddle/framework/lod_tensor.md index 0fa14f3470..597bc48cf3 100644 --- a/paddle/framework/lod_tensor.md +++ b/paddle/framework/lod_tensor.md @@ -1,6 +1,6 @@ # Design Doc: LoD (Level-of-Detail) Tensor -As other deep learning systems, PaddlePaddle supports training models from sequence data. Also, like other systems, PaddlePaddle represent a mini-batch of sequences as a Tensor. What is different is that PaddlePaddle doesn't require that all sequences in a mini-batch are of the same length. Thus no need for padding zeros. +Like other deep learning systems, PaddlePaddle supports training models from sequence data. Also, like other systems, PaddlePaddle represent a mini-batch of sequences as a Tensor. What is different is that PaddlePaddle doesn't require all sequences in a mini-batch to be of the same length. Thus no need for padding zeros. | | TensorFlow | PaddlePaddle | |-----------------------|------------|--------------| @@ -9,24 +9,24 @@ As other deep learning systems, PaddlePaddle supports training models from seque | padding zeros | Must | No need | | blob data type | Tensor | LoDTensor | -PaddlePaddle achieves this flexibility by passing through a new data type, *LoD Tensor*, which is a Tensor attached with segmentation index known as *LoD*, between operators. The LoD index doesn't only segments a tensor, but also recursively segments sub-sequences. This document presents the design of LoD and LoDTensor. +PaddlePaddle achieves this flexibility by passing through a new data type, *LoD Tensor*, which is a Tensor attached with segmentation index known as *LoD*, between operators. The LoD index doesn't only segment a tensor, but also recursively segments sub-sequences. This document presents the design of LoD and LoDTensor. ## The Challenge: Variable-length Sequences Most deep learning systems represent a mini-batch as a Tensor. For example, a mini-batch of 10 images, each of size 32x32, is a 10x32x32 Tensor. Another example is that each mini-batch contains N sentences, where each word is a D-dimensional one-hot vector. Suppose that all sentences have the same length L, we can represent this mini-batch by a NxLxD tensor. -Both examples show that the elements of sequences are usually of the same size. In the first example, all images are 32x32, and in the second one, all words are D-dimensional vectors. It doesn't make sense to allow variable-sized images, as that would require transformations like convolution represented by variable-sized Tensors. +Both examples show that the elements of sequences are usually of the same size. In the first example, all images are 32x32, and in the second one, all words are D-dimensional vectors. It doesn't make sense to allow variable-sized images, as that would require transformations like convolution to handle variable-sized Tensors. The real challenge is that in most cases, sentences have variable lengths, and we will need an index data structure to segment the tensor into sequences. Also, sequences might consist of sub-sequences. ## A Solution: The LoD Index -Let is visit this challenge from examples. +To understand our solution, it is best to look at some examples. ### A Mini-Batch of Sentences -Let's imagine a mini-batch of 3 variable lengths sentences composed by 3, 1, and 2 words respectively. We can represent it by a (3+1+2)xD tensor plus some index information: +Let's imagine a mini-batch of 3 variable lengths sentences composed of 3, 1, and 2 words, respectively. We can represent the mini-batch by a (3+1+2)xD tensor plus some index information: ``` 3 1 2 @@ -37,7 +37,7 @@ where each `|` represents a D-dimensional word vector. The numbers, 3, 1, and 2 ### Recursive Sequences -Let check another example of a 2-level LoD Tensor. Consider a mini-batch of three articles with 3, 1, and 2 sentences, and each sentence consists of words: +Let check another example of a 2-level LoD Tensor. Consider a mini-batch of three articles with 3, 1, and 2 sentences, and each sentence consists of a variable number of words: ``` 3 1 2 @@ -47,7 +47,7 @@ Let check another example of a 2-level LoD Tensor. Consider a mini-batch of thr ### A Mini-Batch of Videos -LoD Tensor generalizes to the case where elements are higher dimensional objects, like images. Suppose that a mini-batch contains videos of the same frame size 640x480. Here is a mini-batch of 3 videos with 3, 1, and 2 frames respectively. +LoD tensors generalize to the case where elements are higher dimensional objects, like images. Suppose that a mini-batch contains videos of the same frame size 640x480. Here is a mini-batch of 3 videos with 3, 1, and 2 frames, respectively. ``` 3 1 2 @@ -65,7 +65,7 @@ In traditional cases like a mini-batch with N fixed-sized images, the LoD Tenso 口口口口 ... 口 ``` -It doesn't loss anything to ignore the many 1's in the index and to consider this LoD Tensor a usual Tensor: +In this case, we don't lose any information by ignoring the many 1's in the index and simply considering this LoD Tensor as a usual Tensor: ``` 口口口口 ... 口 @@ -91,7 +91,7 @@ For example, the third sentence in above example is identified by branch <0,2>, ### The LoD Index -We can save the LoD index in above example +We can save the LoD index in the above example ``` 3 1 2 @@ -129,13 +129,13 @@ into offsets so we know that the first sentence is from word 0 to word 3, and the second sentence from work 3 to word 5. -Similarly, lengths in the top level LoD +Similarly, the lengths in the top level LoD ``` 3 1 2 ``` -is transformed into offsets of elements/words: +are transformed into offsets of elements/words as follows: ``` 0 9 10 15 @@ -148,9 +148,9 @@ so we can tell that the first article is from word 0 to word 9, and the second a The complete offset representation is as follows: ``` -0 9 10 15 -0 3 5 9 10 12 15 -||| || |||| | || ||| +0 9 10 15 +0 3 5 9 10 12 15 + ||| || |||| | || ||| ``` ## Slicing of LoD Tensors From b2806135a53cbe85fbc764375d9cecc2596ab4be Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 3 Oct 2017 14:41:21 -0700 Subject: [PATCH 178/342] Change Interface to unique_ptr --- doc/design/register_grad_op.md | 6 +++--- paddle/framework/grad_op_desc_maker.h | 28 +++++++++++++++------------ paddle/framework/op_info.h | 2 +- paddle/framework/type_defs.h | 4 ++++ 4 files changed, 24 insertions(+), 16 deletions(-) diff --git a/doc/design/register_grad_op.md b/doc/design/register_grad_op.md index cdb7a8435b..3cf8a59446 100644 --- a/doc/design/register_grad_op.md +++ b/doc/design/register_grad_op.md @@ -42,7 +42,7 @@ The `GradOpDescMaker` will be registered in `OpInfo`, to replace `grad_op_type_` ```cpp struct OpInfo { - std::function(const OpDescBind&)> grad_op_maker_; + std::function>(const OpDescBind&)> grad_op_maker_; ... }; ``` @@ -55,11 +55,11 @@ We propose a base class called `GradOpDescMakerBase` to let operator developers class GradOpDescMakerBase { public: GradOpDescMakerBase(const OpDescBind& ); - virtual std::vector operator()()const = 0; + virtual std::vector> operator()()const = 0; }; ``` -We can convert `GradOpDescMakerBase` to `std::function(const OpDescBind&)>` by +We can convert `GradOpDescMakerBase` to `std::function>(const OpDescBind&)>` by ```cpp using GradOpMaker = ...; diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h index e6d63e4b8a..e9ae6e2206 100644 --- a/paddle/framework/grad_op_desc_maker.h +++ b/paddle/framework/grad_op_desc_maker.h @@ -24,7 +24,7 @@ class GradOpDescMakerBase { explicit GradOpDescMakerBase(const OpDescBind& fwd_op) : fwd_op_(fwd_op) {} virtual ~GradOpDescMakerBase() = default; - virtual std::vector operator()() const = 0; + virtual std::vector> operator()() const = 0; protected: static std::vector ToGradNames( @@ -81,10 +81,14 @@ class SingleGradOpDescMaker : public GradOpDescMakerBase { public: using GradOpDescMakerBase::GradOpDescMakerBase; - std::vector operator()() const { return {this->Apply()}; } + std::vector> operator()() const { + std::vector> retv; + retv.emplace_back(this->Apply()); + return retv; + } protected: - virtual OpDescBind Apply() const = 0; + virtual std::unique_ptr Apply() const = 0; }; class DefaultGradOpDescMaker : public SingleGradOpDescMaker { @@ -92,23 +96,23 @@ class DefaultGradOpDescMaker : public SingleGradOpDescMaker { using SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - virtual OpDescBind Apply() const { - OpDescBind grad; - grad.SetType(this->GradOpType()); + virtual std::unique_ptr Apply() const { + auto* grad = new OpDescBind(); + grad->SetType(this->GradOpType()); for (auto& input_param : this->InputNames()) { - grad.SetInput(input_param, this->Input(input_param)); - grad.SetOutput(GradVarName(input_param), this->InputGrad(input_param)); + grad->SetInput(input_param, this->Input(input_param)); + grad->SetOutput(GradVarName(input_param), this->InputGrad(input_param)); } for (auto& output_param : this->OutputNames()) { - grad.SetInput(output_param, this->Output(output_param)); - grad.SetInput(GradVarName(output_param), this->OutputGrad(output_param)); + grad->SetInput(output_param, this->Output(output_param)); + grad->SetInput(GradVarName(output_param), this->OutputGrad(output_param)); } - grad.SetAttrMap(this->Attrs()); + grad->SetAttrMap(this->Attrs()); - return grad; + return std::unique_ptr(grad); } virtual std::string GradOpType() const { diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 806a960018..8b7882485f 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -28,7 +28,7 @@ namespace framework { struct OpInfo { OpCreator creator_; std::string grad_op_type_; - std::function(const OpDescBind&)> grad_op_maker_; + GradOpMakerFN grad_op_maker_; OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h index dec5066f1e..a5b9472213 100644 --- a/paddle/framework/type_defs.h +++ b/paddle/framework/type_defs.h @@ -20,6 +20,7 @@ namespace paddle { namespace framework { class OperatorBase; +class OpDescBind; using VariableNameMap = std::map>; // The order should be as same as framework.proto @@ -34,5 +35,8 @@ using OpCreator = std::function; +using GradOpMakerFN = + std::function>(const OpDescBind&)>; + } // namespace framework } // namespace paddle From 71dff503ce6934fd78508879545debdbf8776c51 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 3 Oct 2017 15:28:44 -0700 Subject: [PATCH 179/342] API of GAN --- doc/design/gan_api.md | 134 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 doc/design/gan_api.md diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md new file mode 100644 index 0000000000..65ca49410a --- /dev/null +++ b/doc/design/gan_api.md @@ -0,0 +1,134 @@ +''' +GAN implementation, just a demo. +''' +# pd for short, should be more concise. +from paddle.v2 as pd +import numpy as np +import logging + +X = pd.data(pd.float_vector(784)) + +# Conditional-GAN should be a class. +### Class member function: the initializer. +class DCGAN(object): + def __init__(self, y_dim=None): + + # hyper parameters + self.y_dim = y_dim # conditional gan or not + self.batch_size = 100 + self.z_dim = z_dim # input noise dimension + + # define parameters of discriminators + self.D_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.D_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data + self.D_W2 = pd.Varialble(np.random.rand(128, 1)) + self.D_b2 = pd.Variable(np.zeros(128)) + self.theta_D = [D_W1, D_b1, D_W2, D_b2] + + # define parameters of generators + self.G_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.G_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data + self.G_W2 = pd.Varialble(np.random.rand(128, 1)) + self.G_b2 = pd.Variable(np.zeros(128)) + self.theta_G = [D_W1, D_b1, D_W2, D_b2] + + self.build_model() + +### Class member function: Generator Net +def generator(self, z, y = None): + + # Generator Net + if not self.y_dim: + z = pd.concat(1, [z, y]) + + G_h0 = pd.fc(z, self.G_w0, self.G_b0) + G_h0_bn = pd.batch_norm(G_h0) + G_h0_relu = pd.relu(G_h0_bn) + + G_h1 = pd.fc(G_h0_relu, self.G_w1, self.G_b1) + G_h1_bn = pd.batch_norm(G_h1) + G_h1_relu = pd.relu(G_h1_bn) + + G_h2 = pd.deconv(G_h1_relu, self.G_W2, self.G_b2)) + G_im = pd.tanh(G_im) + return G_im + +### Class member function: Discriminator Net +def discriminator(self, image): + + # Discriminator Net + D_h0 = pd.conv2d(image, self.D_w0, self.D_b0) + D_h0_bn = pd.batchnorm(h0) + D_h0_relu = pd.lrelu(h0_bn) + + D_h1 = pd.conv2d(D_h0_relu, self.D_w1, self.D_b1) + D_h1_bn = pd.batchnorm(D_h1) + D_h1_relu = pd.lrelu(D_h1_bn) + + D_h2 = pd.fc(D_h1_relu, self.D_w2, self.D_b2) + return D_h2 + +### Class member function: Build the model +def build_model(self): + + # input data + if self.y_dim: + self.y = pd.data(pd.float32, [self.batch_size, self.y_dim]) + self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + self.z = pd.data(tf.float32, [None, self.z_size]) + + # if conditional GAN + if self.y_dim: + self.G = self.generator(self.z, self.y) + self.D_t = self.discriminator(self.images) + # generated fake images + self.sampled = self.sampler(self.z, self.y) + self.D_f = self.discriminator(self.images) + else: # original version of GAN + self.G = self.generator(self.z) + self.D_t = self.discriminator(self.images) + # generate fake images + self.sampled = self.sampler(self.z) + self.D_f = self.discriminator(self.images) + + self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) + self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size)) + self.d_loss = self.d_loss_real + self.d_loss_fake + + self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie)) + +# Main function for the demo: +if __name__ == "__main__": + + # dcgan + dcgan = DCGAN() + dcgan.build_model() + + # load mnist data + data_X, data_y = self.load_mnist() + + # Two subgraphs required!!! + d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss) + g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.g_loss) + + # executor + sess = pd.executor() + + # training + for epoch in xrange(10000): + for batch_id in range(N / batch_size): + idx = ... + # sample a batch + batch_im, batch_label = data_X[idx:idx+batch_size], data_y[idx:idx+batch_size] + # sample z + batch_z = np.random.uniform(-1., 1., [batch_size, z_dim]) + + if batch_id % 2 == 0: + sess.run(d_optim, + feed_dict = {dcgan.images: batch_im, + dcgan.y: batch_label, + dcgan.z: batch_z}) + else: + sess.run(g_optim, + feed_dict = {dcgan.z: batch_z}) From e47770bd27a20b2fa9bf2754d16b0e71008185e5 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 15:30:21 -0700 Subject: [PATCH 180/342] Update --- paddle/framework/backward.cc | 85 +++++++++++++++++---------------- paddle/framework/backward.h | 2 +- paddle/framework/op_desc.cc | 13 ++--- paddle/framework/op_registry.cc | 3 +- paddle/framework/op_registry.h | 3 +- 5 files changed, 57 insertions(+), 49 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0f65478ef8..b4eb89e2d7 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -234,18 +234,17 @@ static bool AllGradInSet(const std::vector& names, return true; } -std::vector CreatBackwardOps( - const std::unique_ptr& op_desc_ptr, - unordered_map& no_grad_vars) { - const OpDescBind& op_desc = *op_desc_ptr; - std::vector grad_op_descs; +std::vector> MakeGradOpDescs( + const std::unique_ptr& op_desc, + unordered_set& no_grad_vars) { + std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - if (AllGradInSet(op_desc_.InputArgumentNames(), kGradVarSuffix, + if (AllGradInSet(op_desc->InputArgumentNames(), kGradVarSuffix, no_grad_vars)) { return grad_op_descs; // empty vector } // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc_.OutputArugumentNames(); + const std::vector& outputs = op_desc->OutputArugumentNames(); if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { for (const std::string& name : outputs) { no_grad_vars.insert(GradVarName(name)); @@ -255,50 +254,54 @@ std::vector CreatBackwardOps( grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc); - std::vector fill_zeros_ops; - for (OpDescBind& desc : grad_op_descs) { - for (const std::string& in_name : desc.InputArgumentNames()) { + std::list> pending_fill_zeros_ops; + for (auto& desc : grad_op_descs) { + for (const std::string& in_name : desc->InputArgumentNames()) { if (no_grad_vars.count(in_name)) { std::string prefix = in_name.substr( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); std::string new_name = prefix + kZeroVarSuffix; - desc.Rename(in_name, new_name); - OpDescBind op_desc_bind( - {"fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}}); - fill_zeros_ops.push_back(op_desc_bind); + desc->Rename(in_name, new_name); + OpDescBind* fill_zeros_op = new OpDescBind( + "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}); + pending_fill_zeros_ops.push_back({fill_zeros_op}); } } - for (const std::string& out_name : desc.OutputName()) { + for (const std::string& out_name : desc->OutputArgumentName()) { if (no_grad_vars.count(out_name)) { - desc.Rename(out_name, kEmptyVarName); + desc->Rename(out_name, kEmptyVarName); } } } - grad_op_descs.insert(grad_op_descs.begin(), fill_zeros_ops.begin(), - fill_zeros_ops.end()); + grad_op_descs.insert(std::begin(grad_op_descs), + std::begin(pending_fill_zeros_ops), + std::end(pending_fill_zeros_ops)); // TODO (fengjiayi): RNN op return grad_op_descs; } -void AppendBackwardOps(BlockDescBind& block_desc, - const std::unordered_set& no_grad_vars) { +void AppendBackwardOpDescs( + BlockDescBind& block_desc, + const std::unordered_set& no_grad_vars) { std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; - std::deque> op_descs = block_desc.ops_; - std::vector> grad_op_descs; - for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { - std::vector op_grads = CreatBackwardOps(*it, no_grad_vars); - for (const OpDescBind& desc : op_grads) { - for (const std::string& out_name : desc.OutputArugumentNames()) { + std::deque> block_op_descs = block_desc.ops_; + std::vector> backward_descs; + for (auto it = block_op_descs.rbegin(); it != block_op_descs.rend(); ++it) { + std::vector> op_grads = + MakeGradOpDescs(*it, no_grad_vars); + for (const auto& desc : op_grads) { + for (const std::string& out_name : desc->OutputArugumentNames()) { dup_out_ops[out_name].emplace_back(grad_desc_idx); } ++grad_desc_idx; } - grad_op_descs.insert(grad_op_descs.end(), op_grads.begin(), op_grads.end()); + backward_descs.insert(backward_descs.end(), op_grads.begin(), + op_grads.end()); } // Check whether some variables are written more than once - std::list> pending_sum_ops; + std::list>> pending_sum_ops; for (const auto& dup : dup_out_ops) { const std::string& out_name = dup.first; const std::vector dup_op = dup.second; @@ -306,25 +309,27 @@ void AppendBackwardOps(BlockDescBind& block_desc, std::vector sum_op_inputs; for (size_t i = 0; i < dup_op.size(); ++i) { std::string new_name = out_name + "@RENAME@" + std::to_string(i); - grad_op_descs[dup_op[i]].Rename(out_name, new_name); + backward_descs[dup_op[i]]->Rename(out_name, new_name); sum_op_inputs.emplace_back(new_name); } - pending_sum_ops.push_back( - {dup_op.back(), - OpDescBind( - {"sum", {{"X", {sum_op_inputs}}}, {{"Out", {out_name}}}, {}})}); + OpDescBind* sum_op = new OpDescBind("sum", {{"X", sum_op_inputs}}, + {{"Out", {out_name}}}, {}); + pending_sum_ops.push_back({dup_op.back(), {sum_op}}); } } pending_sum_ops.sort( - [](const std::pair& a, - const std::pair& b) { return a.first > b.first; }); + [](const std::pair>& a, + const std::pair>& b) { + return a.first > b.first; + }); for (auto& p : pending_sum_ops) { - grad_op_descs.insert(grad_op_descs.begin() + p.first + 1, - std::move(p.second)); - } - // Append grad_op_descs to BlockDescBind::ops_ - for () { + backward_descs.insert(backward_descs.begin() + p.first + 1, + std::move(p.second)); } + // Append backward_descs to BlockDescBind::ops_ + block_op_descs.insert(std::end(block_op_descs), std::begin(backward_descs), + std::end(backward_descs)); + return; } } // namespace framework diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index 6aeddafb41..fb496c34c7 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -24,7 +24,7 @@ extern std::unique_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); -extern void AppendBackwardOps( +extern void AppendBackwardOpDescs( BlockDescBind& block_desc, const std::unordered_set& no_grad_vars); diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index e6c0cdacd9..2c6aec717b 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -49,7 +49,7 @@ std::vector OpDescBind::InputNames() const { return retv; } -std::vector InputArgumentNames() const { +std::vector OpDescBind::InputArgumentNames() const { std::vector retv; for (auto &ipt : this->inputs_) { retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); @@ -80,7 +80,7 @@ std::vector OpDescBind::OutputNames() const { return retv; } -std::vector OutputArgumentNames() const { +std::vector OpDescBind::OutputArgumentNames() const { std::vector retv; for (auto &ipt : this->outputs_) { retv.insert(retv.end(), ipt.second.begin(), ipt.second.end()); @@ -137,12 +137,13 @@ const std::unordered_map &OpDescBind::GetAttrMap() return attrs_; } -void Rename(const std::string &old_name, const std::string &new_name) { - for (std : string &input : inputs_) { +void OpDescBind::Rename(const std::string &old_name, + const std::string &new_name) { + for (auto &input : inputs_) { std::replace(input.second.begin(), input.second.end(), old_name, new_name); } - for (std::string &output : outputs_) { - std::repalce(output.second.begin(), output.second.end(), old_name, + for (auto &output : outputs_) { + std::replace(output.second.begin(), output.second.end(), old_name, new_name); } need_update_ = true; diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index fe3228ce5b..d8851a8b42 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -57,7 +57,8 @@ std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { return std::unique_ptr(BuildGradOp(&op)); } -static std::vector CreateGradOpDescs(const OpDescBind& op_desc) { +static std::vector> OpRegistry::CreateGradOpDescs( + const OpDescBind& op_desc) { auto& info = OpInfoMap::Instance().Get(op_desc.Type()); return info.grad_op_maker_(op_desc); } diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index c80b6e9630..e334cd592a 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -69,7 +69,8 @@ class OpRegistry { static std::unique_ptr CreateGradOp(const OperatorBase& op); - static std::vector CreateGradOpDescs(const OpDescBind& op_desc); + static std::vector> CreateGradOpDescs( + const OpDescBind& op_desc); }; class Registrar { From e08367c80678804fc388004ba2ab72f754bc1143 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Tue, 3 Oct 2017 15:53:42 -0700 Subject: [PATCH 181/342] Add few blank lines --- paddle/framework/lod_tensor.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/framework/lod_tensor.md b/paddle/framework/lod_tensor.md index 597bc48cf3..d147f1c425 100644 --- a/paddle/framework/lod_tensor.md +++ b/paddle/framework/lod_tensor.md @@ -20,6 +20,7 @@ Both examples show that the elements of sequences are usually of the same size. The real challenge is that in most cases, sentences have variable lengths, and we will need an index data structure to segment the tensor into sequences. Also, sequences might consist of sub-sequences. + ## A Solution: The LoD Index To understand our solution, it is best to look at some examples. @@ -75,6 +76,7 @@ In this case, we don't lose any information by ignoring the many 1's in the inde A model parameter is just a usual Tensor, which, just like the above example, is a **0-level LoD Tensor**. + ## The LoD Tensor Let us revisit above example of the 2-level LoD Tensor From e21dcc5bdaacbd9dbab5be134b71ba8c57eda717 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 3 Oct 2017 15:59:25 -0700 Subject: [PATCH 182/342] gan api --- doc/design/gan_api.md | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 65ca49410a..b5f37051c6 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -1,15 +1,17 @@ ''' GAN implementation, just a demo. ''' +```python # pd for short, should be more concise. from paddle.v2 as pd import numpy as np import logging X = pd.data(pd.float_vector(784)) - +``` # Conditional-GAN should be a class. ### Class member function: the initializer. +```python class DCGAN(object): def __init__(self, y_dim=None): @@ -19,22 +21,26 @@ class DCGAN(object): self.z_dim = z_dim # input noise dimension # define parameters of discriminators + self.D_W0 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.D_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data self.D_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) self.D_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data self.D_W2 = pd.Varialble(np.random.rand(128, 1)) self.D_b2 = pd.Variable(np.zeros(128)) - self.theta_D = [D_W1, D_b1, D_W2, D_b2] + self.theta_D = [self.D_W0, self.D_b0, self.D_W1, self.D_b1, self.D_W2, self.D_b2] # define parameters of generators + self.G_W0 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.G_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data self.G_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) self.G_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data self.G_W2 = pd.Varialble(np.random.rand(128, 1)) self.G_b2 = pd.Variable(np.zeros(128)) - self.theta_G = [D_W1, D_b1, D_W2, D_b2] - - self.build_model() + self.theta_G = [self.G_W0, self.G_b0, self.G_W1, self.G_b1, self.G_W2, self.G_b2] +``` ### Class member function: Generator Net +```python def generator(self, z, y = None): # Generator Net @@ -52,8 +58,10 @@ def generator(self, z, y = None): G_h2 = pd.deconv(G_h1_relu, self.G_W2, self.G_b2)) G_im = pd.tanh(G_im) return G_im - +``` + ### Class member function: Discriminator Net +```python def discriminator(self, image): # Discriminator Net @@ -67,8 +75,10 @@ def discriminator(self, image): D_h2 = pd.fc(D_h1_relu, self.D_w2, self.D_b2) return D_h2 +``` ### Class member function: Build the model +```python def build_model(self): # input data @@ -97,8 +107,10 @@ def build_model(self): self.d_loss = self.d_loss_real + self.d_loss_fake self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie)) +``` # Main function for the demo: +```python if __name__ == "__main__": # dcgan @@ -109,7 +121,7 @@ if __name__ == "__main__": data_X, data_y = self.load_mnist() # Two subgraphs required!!! - d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss) + d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss, ) g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.g_loss) # executor @@ -125,10 +137,11 @@ if __name__ == "__main__": batch_z = np.random.uniform(-1., 1., [batch_size, z_dim]) if batch_id % 2 == 0: - sess.run(d_optim, + sess.eval(d_optim, feed_dict = {dcgan.images: batch_im, dcgan.y: batch_label, dcgan.z: batch_z}) else: - sess.run(g_optim, + sess.eval(g_optim, feed_dict = {dcgan.z: batch_z}) +``` From 7e4648338adbaa6c36f43706771901fc9158447a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 16:13:35 -0700 Subject: [PATCH 183/342] Fix bug --- paddle/framework/op_registry.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index d8851a8b42..b28eb64af5 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -57,7 +57,7 @@ std::unique_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { return std::unique_ptr(BuildGradOp(&op)); } -static std::vector> OpRegistry::CreateGradOpDescs( +std::vector> OpRegistry::CreateGradOpDescs( const OpDescBind& op_desc) { auto& info = OpInfoMap::Instance().Get(op_desc.Type()); return info.grad_op_maker_(op_desc); From 703321e2be91d8e70e6578fb3c91f76607f2e587 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 3 Oct 2017 16:17:49 -0700 Subject: [PATCH 184/342] Fix CI --- paddle/framework/op_desc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index 508bcaa67e..397393f796 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -76,7 +76,7 @@ class OpDescBind { std::vector ret_val; ret_val.reserve(map.size()); std::transform( - map.begin(), map.end(), ret_val.begin(), + map.begin(), map.end(), std::back_inserter(ret_val), [](const typename MapType::value_type &pair) { return pair.first; }); return ret_val; } From b0d2235834cd1a94c39c1e937f95c58bd7319abc Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 16:24:24 -0700 Subject: [PATCH 185/342] Bug fix --- paddle/framework/backward.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 2c13ddd8d0..89583ade95 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -147,7 +147,7 @@ static std::unique_ptr BackwardRecursive( for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; ++output_idx) { auto insert_add_x = dup_outputs[output_idx]; - auto insert_add_y = dup_outputs[output_idx]; + auto insert_add_y = dup_outputs[output_idx + 1]; auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); // first add op inserted if (output_idx == dup_outputs.size() - 2) { From f5e73f4c7e526e10ec8efe4afc4487b8f60e743d Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 3 Oct 2017 23:29:03 +0000 Subject: [PATCH 186/342] pass simple elementwise_add op --- paddle/framework/executor.cc | 36 ++++++++---------- paddle/framework/executor_test.cc | 63 +++++++++++++++++++++---------- 2 files changed, 58 insertions(+), 41 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 94b9b3b350..da387b47ba 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -13,8 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" +#include #include #include +#include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/scope.h" @@ -30,41 +32,33 @@ Executor::Executor(const std::vector& places) { void Executor::Run(const ProgramDesc& pdesc, Scope* scope, std::vector* outputs) { - // operators running // TODO(tonyyang-svail): // - only runs the first block // - only runs on the first device + // - test on gpu auto& block = pdesc.blocks(0); auto& device = devices_[0]; + // TODO(tonyyang-svail): + // - runs on a new local scope + // Scope& local_scope = scope->NewScope(); + for (auto& var : block.vars()) { scope->NewVar(var.name()); } - // std::vector ops; for (auto& op_desc : block.ops()) { - auto op = framework::OpRegistry::CreateOp(op_desc); - // op->InferShape(*scope); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(*scope, *device->cpu_device_context); } - // TODO(tonyyang-svail): need to test gpu device - // device_->cpu_device_context->Wait(); - // #ifndef PADDLE_ONLY_CPU - // if (device_->cuda_device_context) { - // device_->cuda_device_context->Wait(); - // } - // #endif - - Scope& local_scope = scope->NewScope(); - local_scope.NewVar(); - for (auto device : devices_) { - device->cpu_device_context->Wait(); -#ifndef PADDLE_ONLY_CPU - if (device->cuda_device_context) { - device->cuda_device_context->Wait(); - } -#endif + // print tensor value + for (auto& var : block.vars()) { + std::cout << var.name() << std::endl; + auto v = scope->FindVar(var.name()); + const LoDTensor& t = v->Get(); + for (int i = 0; i < t.numel(); ++i) std::cout << t.data()[i] << " "; + std::cout << std::endl; } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 11255af808..300de36b87 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -16,16 +16,49 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/framework/attribute.h" -#include #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" +#include + USE_OP(elementwise_add); +USE_OP(gaussian_random); using namespace paddle::platform; using namespace paddle::framework; +typedef paddle::framework::BlockDesc proto_block; +typedef paddle::framework::OpDesc proto_op; + +using std::string; + +void add_gaussian_random_op(string var_name, proto_block* block) { + std::vector dim{2, 3}; + + // insert variable + auto a = block->add_vars(); + a->set_name(var_name); + auto a_lt = a->mutable_lod_tensor(); + a_lt->set_data_type(paddle::framework::DataType::FP32); + for (int i : dim) { + a_lt->add_dims(i); + } + + // insert operation + auto op = block->add_ops(); + op->set_type("gaussian_random"); + auto dims = op->add_attrs(); + dims->set_name("dims"); + dims->set_type(paddle::framework::AttrType::INTS); + for (int i : dim) { + dims->add_ints(i); + } + auto Out = op->add_outputs(); + Out->set_parameter("Out"); + Out->add_arguments(var_name); +} + TEST(Executor, Init) { ProgramDesc pdesc; @@ -33,35 +66,25 @@ TEST(Executor, Init) { root_block->set_idx(0); root_block->set_parent_idx(-1); - auto a = root_block->add_vars(); - a->set_name("a"); - auto a_lt = a->mutable_lod_tensor(); - a_lt->set_data_type(paddle::framework::DataType::FP32); - a_lt->add_dims(640); - a_lt->add_dims(640); - - auto b = root_block->add_vars(); - b->set_name("b"); - auto b_lt = b->mutable_lod_tensor(); - b_lt->set_data_type(paddle::framework::DataType::FP32); - b_lt->add_dims(640); - b_lt->add_dims(640); + add_gaussian_random_op("a", root_block); + add_gaussian_random_op("b", root_block); auto c = root_block->add_vars(); c->set_name("c"); auto c_lt = c->mutable_lod_tensor(); c_lt->set_data_type(paddle::framework::DataType::FP32); - c_lt->add_dims(640); - c_lt->add_dims(640); - auto op1 = root_block->add_ops(); - op1->set_type("elementwise_add"); - auto X = op1->add_inputs(); + auto op = root_block->add_ops(); + op->set_type("elementwise_add"); + auto X = op->add_inputs(); X->set_parameter("X"); X->add_arguments("a"); - auto Y = op1->add_inputs(); + auto Y = op->add_inputs(); Y->set_parameter("Y"); Y->add_arguments("b"); + auto Out = op->add_outputs(); + Out->set_parameter("Out"); + Out->add_arguments("c"); CPUPlace cpu_place1, cpu_place2; std::vector places; From f4491fa46d1583caa7f007a581995435a32f8dab Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Oct 2017 16:34:21 -0700 Subject: [PATCH 187/342] Fix bug --- paddle/framework/backward.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 89583ade95..c0188c0e55 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -158,9 +158,8 @@ static std::unique_ptr BackwardRecursive( } insert_position.push_back( {dup_op.back(), - OpRegistry::CreateOp( - "sum", {{"X", {insert_add_x}}, {"X", {insert_add_y}}}, - {{"Out", {insert_add_out}}}, {})}); + OpRegistry::CreateOp("sum", {{"X", {insert_add_x, insert_add_y}}}, + {{"Out", {insert_add_out}}}, {})}); } } @@ -200,7 +199,8 @@ static std::unique_ptr BackwardRecursive( // process recurrent gradient op as a special operator. if (forwardOp.Type() == "recurrent") { - // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or + // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), + // or // this will result in infinite loop. const auto& rnnop = *static_cast(&forwardOp); From 2a7f59e73ef0350c92ec3174ed1aa97efe266f52 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 3 Oct 2017 16:41:35 -0700 Subject: [PATCH 188/342] more gan --- doc/design/dcgan.png | Bin 0 -> 57995 bytes doc/design/gan_api.md | 12 ++++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 doc/design/dcgan.png diff --git a/doc/design/dcgan.png b/doc/design/dcgan.png new file mode 100644 index 0000000000000000000000000000000000000000..15e8e290a111ff43900934341365cb4360d87d28 GIT binary patch literal 57995 zcmdR0^+QzO)3!lEkOpax?rx-eDe01qrMppLK{}*My1QFky1SN;?(Tjs`uYA5@BM`p z?%sROoH@_TJo6mCD}W@B5%3V6J$r^MB`K=(?Ac4^XV0D+z`p=~BW2f~2mE;HAT0G6 z9v&XHEdK}iC$6KIx}&mZ_2~2Fc8sV$ zB_j>K5i_sw^wy_dLW#evM6pU8>~_O(u{sR#>;JjiR1DR}c?FE(*)#QkDxJR{o_&>l z^Yq~h9C6I^rw`>QEVxfUpWXhy{l#A7`ADO=4$qs$daj0B2`_<7m$1bB;3QllmZyo$ zq$ag-ggs9Gp3W^DZgsc1Eh{T~I6iIh!7SJLd`ddY@O0C&KSG{m)?jbg2s2xd%<|pL z5U6>!kOvti#?IJd^mIcxFMff8cD94YVy9L2u+jb!&Ec8wFg#Ihh=uNm%iE`0cvYNN z%LaEj33(iJwh!8~=3LF%aUl9le=Ytsuu5}4<+Uj#GVX`F<=-|C7yYIblZh@e%BQbt zX3@e;H{W&^bMHDCPqbm~}X6&;D^g|?2o^0{uz>yot>T6 zd5qerbOCm;X&DEf)$t zxs(!3Y^9QX|5xURr$?1DIypHBXB>UYWvQAi9yKFJd#k0w&iS94kYR=7)Ye`cc3*o6 zM0A6%-8GR0nx1yyR|0A(sswg0*5K+WG8&GdaSFzXns}%gbRj*IY&0oFhJX0 zn=>)^-`?g;A?9)1rg3uSu6K?-Xjk80@w#zL1$A2ogu(q=-jZiRkpwF?-IYxu4{xJG zL9$)=KdMXy$IRIFRA(gr4QGT0l^ho%ofvpbwX+j2`T|16MIwy?{YwpU zPW~jtx(|Bw_gZi_uhXhlH>$t+Z&GMN(d{}-Aq25L#Trg!HKDnN(MoI>d6S(i|J=~5 z1l|{ZdrBl<0bjT7=78prT&|lu&w|Q&H`w^UIR`JldOwxLsYk)v?EQDeQ!+ES`_+og z=vkRb{sxDiPEcSyO^MP(qZ{-W77`OM8vvDNeNRRWLm_2ACL4^s=AYl0u;x|iv-t9ynx^?d(X zk7LD}4j4<=1Ly@AH4Vi_xncBybit*xI-08cBa>v|U|*n#8cbk@3K;(9Gu7-|;epPFA3syQD`}}q zp%aP*mLT-AetJdd)k+BCOz9Jrnjs=NbtbEpsg7C`h6}^q7wI|~4Ns-szSE)3cXT{O zwUhFXl+wH1eAwr`By9AonrL4T8OTvDnLGHbs%o;i!F_$jal*$H&Rg#>p`xnI>(xh3 z{56vso+O1h#nV$dI9T48FgQG%LaLHl+8S>J^b{i4@C+NA?O{RB{T8pc?&Ty-t%=ws z?>zPp4*`p3p4bPAcuHQbWL0naI2 z2tepuJ_jEz74mhGMh&L1c9%l#ME-73yuoaN^2J|!%q5m|BP`u+#78VX9>Iuwj+oQ#_J65hA=;cuM(;$9$o*8nU$oVu z3z3szJqbA79Ii`$&GK*4hKYX{e>^Q3{T`)#vKXeaHSKW<5mQXFM-t%*U&V-*7{zZ@ z_>^H0i$`L_G6hEV*CsJ3J8=G%o&U$*S~&6U=!PA+JFYV~7u;~y8G;S#wXgi^uwTWY zwgTya3qDAB4et=BzZsfXP2LqKf3$nq7~mGoNF1;u15{DG$+wAsZf=yiDP0RT~=fwTjQi{L^J~gfs*;U=G zaWTM^>(0$BVluc6#ss1ZUEDwPGQ$e3xPkdJ>!NcR9Jn01^B!5j*`=`HO-@?86z(w? zo66sPjS)Y|0>h5Z7H&FUlga^rX9`?X>xc35ylI20K9wT%P-7e@lcmmwC%^s>T39)| zt8;B*I!)1+w9>{aIi=}nP+%OtUuVgiam&IUG;G6#e2?puYqX9;vYF_aliXZH%olnu%}xXug0M%H0OVi=$viL zf?L^HdG8^|V|CdM#p_^!7NqiK_wbmUnPn)<%(Jt-`j-lf5CTzUlrw7On1!>njTb#8 zliSYx-r{{)RMhD2{@#DF>=SRms;KCaQE>m@eEq?JtZZ;DZ_c^$@>=vq8BP1skAI=> z(`jLTez`iGrP_1CciqQ99Y(?3{n~9)Kb!c`W3snF=Jz}B7VW;T--sYuNh173C^KDF z)1l&m0{te0BT24si8Hy(SHDHTY9FeB;C6$Em&9v2loI#-A4q@QFs{*@Wq8i_JRE(w z<? z+?!;^(F$twH?8HbHU9?IPqZfNg{qHtWi#R5TD_n1>m`(5mXZ386-}(N=j-vS!xlV%L7PbcA2IfwRQw~Qvt*u<*_qu zA%||3%4-nvvu8gW?i-GM9_!B5hEF-W>rbcd4_kchcZ;mrhD*z+GX3-f%6GgFNc>TI z(aHQLxIM(>uGjCmaj6Mn+XnAFw+l<&KjJcy&(JTGG9xJ~E014!6_n|<4KWQgIBZ^d zdXvd_t{(r8HYSsQK^T#pKXS0ZNYpzpUtfCrCqw%i>cS#A>;6P30g#=fvYDgB!{}i8 z?MW5So{a>rr_9+kEH>yJnjwf_I}a0GjP}LeUaB$TJ}wp-+&E0V}G|j+$a}@-d%RY4=+%~U3dH3 zZFe)1KJTA|)n#UqMoFG^eXwCW;lqK9vZY2wj$B1OG`SqMI`3;H4;x*4&EEP`x=OvlSnqzh#Cde^smXYA9nIKI`4)DQ6A_)ZyfG zne0*5Zz++l`E0~Dp2ADThIu-EI*rRY`|>KnbSUwCc=}^^n>Up*G*}}-SzUc=brovG z_Abyc#5hbawyDWOQSm)YFyX>Cb33whp@CF!qss{ya;Ka+u9q(~Q@P$hY2h@k`~E3C zs{vTgaad8@P7y}!IFr@X+g7+i~YUPS1gF6jsi|QkUGrl&wp#Po?yqBL#SP`o7AeD6RW3g@S zcd>RurTM>vz^s$*W=BsjvAJo&YRGq+Us&4?z?vWFF{yecS{WE*W_Tj-B*h|Ueo(!Z;u@CbHC8vfH=9t$K;qbDz{v7eZ{t#&te19P0ZA0RuyW!eq)=bEMBWw5rRkoayN*Q>Lfz0@5%O`c5~hlhuIdjrmxm+#?U+1c4y zs*N5*Vqnf?VGm;CYqm|F;Z#YW(WXv1swwTB%*Tl6q%K#Ceg$_{w=d8a}n=sz+ zmX+Z&odc}qOr_5QC)ucxlsz+snR~F?=KGj5-~D1mP_TC>}34aJ{Y#Ic`%y) z8SO>#UDt=OL+UE6i=;6H!xnw_mqdS!bRh*Ez(BTUisgzu2oj5aosT z_T=K^mLiK9JnhT~*Liik+Av&mjrLUZeXZS|s{!NU;sT4)D20#Di|l&*s}NJA^P*CT z7RWoLsQt6X%i10ap9RG@mI`8a6Cbf8tU5^)Z;Y74(>m9k+03k<#56@%XmLj;a5VUA1Tro+FW)f3JzN?7f;!H zzl#61)6Q17>Tn4M_b^oci$S#og@wWUXmc-+pyc7Lk z)Ak=gsES_b)TYyVKKCjPrkvs*+)&ZdYBC$moSU1&YGsg$1{___>s$p8wm2_{nG#NxI zX?xz!Fu-N-vN8|vEwtE?7Sn5CVUgy+{Z+Mn6iP;wv76|1D{Z~_r6~>u)Yc$bj-vqF{cteVu~0rm1=zaZmm|k(sgck*EgLm+*LkhEuAd<@{tvuZv7E@ul^Ot zN%2`(h8|jz;{bI6awfG<4Nt38uk8*85>g3Dirt1gpzizx)+l=Ul)WTP_cONbZJJ{H zwQCIkjt&!1#%)~CoH{>iUAU(xj~Af>>wF0n1WFo4h`u9F>ZFBnQ)HYt z>Z|yEt5akXz*XU$hUZ#X8Q2$kVS)ShsEz3TLwgG`6_&I06^8m0Cg8u^%#$HB%fASf z6ozlqhU8{7nFZq=pY-eM>h2TAxqVr1PnoGGIYqorcaMg6NC8)0sUSRnWNnhVr^{CR$1kFy7d{zDzO zcaC7OOUg=&7j8P>9d~*Z*uk*Uny4C=C_t|$JXB7`RTgYV>=OVAS5vwoi4pPu)Qi? zR*}y9w>>O~{^Iic#VEzwIL4B)QiGYfLW5;#GZ& zK)P^jxE<&Nd+4U0obEcHsKmvamfUqu^cteXT4)qEtNgN?&olJ5ahy*J?R46wj&loi zLfT$%xtMnYF3agLv}_Q_?jCMmuc5iHK4@Rl8Xy5ExG{SPa^=(Cl^8oC0-*ngFr-nN zUP-bZ!|lU2eH`JnBl$ejySuc&v&CwmR=%}Ol@DF#Z%L{9njA7wRNbrQ1`O6qUPD%O z^$G3`RxV8F%mpiupiE^W;RNpIsZC#mtgRVf>ukJ8P$LkiV*4JYL`&JOC=$TXDl3(6 zjBp@(qpgmcr38Z2R}Z8bnwW-*JeMw)kcdN0%ip5v_z(#N4G_jmv7X<;VlT1IiQ4Qs3>{)G|X!&UHN*y56rFzR}9= z{`(>2q?<3jLKAe)0Uj{j&0|bjX87thFTV?-1Dlo@a~%-iVU3h*`gY39Nzp0SH#qGr zTwYBr^`D;3macR7W*bB+)1l3#a67Q!A;Z?c-adu>d9t_idQB9DD?fF;pMT2|ykZfH zYuoQRmq1>qOPEOZ@p?QjtMzGFxzyM3I<9RU6B@7VWJ%9YxbJ8Eyl+B50T??&|GAC_ zoh4_*K>Gq&w#P6!Z(Z#AW`$0_)hns4+-%%{mDh;ag_9P*+%EsVvYqK8_og!eFBtYR zr~;2&yklLU`8ZZsiVh?Ao@LN$)_W%ie?Uu~iOy7NbP55wN zb@d+><`n51&W`p*a$p5IynTBn!F~X!H2_nOo0y&kwsJl4U;lB~PUM6{Bs3cIN09}Z zK3tB|@!6Y2JnQ<&-18*rI@iXCGqd2mK)H? zloo5Xz;YQ`4)CcHae}4E_4Ms_^R#ROWm*wWiyD)dAT3|^^Qu(C!#Qq!RtD)KH4O*5 zT>mn|U{8Kg5%{1#(W#Kthj6aO^iXd)SoYHDzVGuQU)y5@`dscJ%9Ifkj#DF~P1aek z>Uo#Xvu6jtm?8)bIp~Nt>9p(>41ZjS(rH{97Y7b#+vzXwJuc8qvPfUv8MgU&*sHa1 z3drOlr26{GTPOLP3>Cq(ak>acV<(^2fp8$VVq#>F-hb7WQVu!{v*s0kcZ&#=sbfH^ z{wQo(@AAarHXT1F9$Q#;YH^`Liu$5bg6{0>`f|U#r6Xs%I<7; z`vOC)x+~S+9mSL~p7U6wbJI3=M4Yk51@6slTy>yksnMK2a31uqGyep)k z89tcBO^q(KbpX%xvtw>O^#CsPxeka3R`-Z(jZVAfPFQOD%}H>=JzS7b_GUdc_PFb; z7b;^0gOXxn>gs&7J*O6RE?C#nv$qp|RP*?1R@fV#IvTLtx>yFudju%!xgPvBt^@h$ zc@*#lpbAgzN^lGbD%@}18aBxDbdIwo%hnj@Ojm+%op>^|y=gZKd(BotIy|FhX1lvt z^P$^1u)^RFB8N9tR#Mi^7lXWQ(li#yrHzzdoq;%4j))U}+ugx}#M9_zI4$G{lLF&H z^ab>ngY-=X6URZC{VB~Y%t*#S!WExXdR83*(h7d!04kji)hq1Rw-mEPNfl$#bmM(m zw}OpV1tV0zyG4B#-|O0vxf!Ts^`cIp!!kFI4i?o~=*3H2>Lol*UfY8oj2k$Sc~XB* zhB3y~!v${b?AU|S5rV&?(_-L2sQFt^e^=JpeD~1*Ip4XSk=M7#e7>n+%I_76BMh(q z@Ny}EDcfU}*7Gl&>Pn+pq*9`)RU*faip};_zurf-I41ouO6~(}IMN3R>Io%#*?5`6 z?_$ksPOt3l2ImaoYA{T`r=ye^b+nXjL?DYVQy-mRWv1uJip{vkE?FoCAGXQ|zPdZP ze?(B(39D)?LBYFo*EBK&4KjsJ8VJ=sH)^vfEGj+=3Evm66#b-48J)|F6vM;K>*}W0 z%VVwuj|yeV$;*>Bo9Qh{2KPx5-RO@`PTrD(ZcMHV597*itgrj95RKrU0ET+ov2n6U z4O~-RV0`-9ZeEaQYIaG`Vf}-7W_zwuTF5DDK@Qgx?4+tOLD|T~~-fPiSZGqVXG7Ojb@J#J(QysiDd83++>SHC`Lb~rP_q>9a$EW zKd>Ecr;=k9=I7_`W>_RphRf5u-r1y4b2?ygQbu^_;-K^79Wn-agQUhS%92LcD>|xW=atfQ~@9KY4fT> zO>cLmXqfdi3#^>=ndRYF%=!#t`nA@5RDoYermY2x|X84?Iu(lf{b%NM$5?6 zE9%)`nm$UFIFIf&NzWFJHP`z~L_N%jcbc*6k3DPKPkC|3=L$h>x*+1{ zSQ_P^?M|ujIsLZEUEjm(^yiNPV`t0Z`_S#HTK2rcRLU?pcOx*1nFE2{zymV~r-4p(J*2-w$ehkYT(7Chr?r*VA;M`GU)}o%*YdNKH)*Tf0>~ z(o2ds>PEbi3CVA@zN7s(u@ualtb)1D-YEvSYy8(wzM1|AaB`{AsA!}=B&`5u=#`JP zHnyNyP)r>POCbyA14X!0Fp{Gfyg5xssQ>5R;EW5vXa5MPG-y1!Z{GWD4)MC&S|-rP zFz520KAvtc!s!t*_t?AXEtQ6F5YA!m3Y&Uft;9F&1R_hZq`{(iydHjA!Ib<`tthDn z=2*OclUt3fsgVGrm!jh6+4i_xZXUsGA89msAQEn+J`_>|p03pCk6Gq=IA}Vo{RZO> zUu}J_2%q7#PZxI$ks$Ur8BXSOTM?C)mj_~=GYfWn!r8-Zyy+P(x8{K*QECDPW^%N1 z)qhp?*%9?DH>%@ zW(w_#?>`u$wD}%P)jfUbDT7cNeedFq@sr=fM+DG$<=6%`mL> zG9i&kvGDV^s5nv*aM}JDNMyAljv?`9!oPeQAqu~*6&J;2V#)j67zgrXBS>Q1m-dcn zUGQl#qdpD=P)&jMU2OLY(?lewv~_?Al*Iq3oh~&v*HZM_AW^rBk&&@(-6o`S1s4lz zlrxdI*EHBbyk*gn?WEJ7s@JGSJ=K%%W4N?$CSKYrC{wFwFlyt6ekLSkcUR&!##Xod zwM)MaY`j--8aQ{?FfrRd9Q9VuT!o9uAcOs7-w^&_I@)J=RdbxDcR*M`*H4>Ip2-7e zh(pTdJ5isne=D!SErM;i)VYkvgaoC7@E~t&5sSi;6zQL)*jP8byCbf!xMOsmS`?>M z{QP;MK$()E?yXvdweG9qav`FN?(Xi*2*HOhO~Dj?=>Eg|Q3Rnbcetw(|qx~7Ym_^3y>7S9hayJ6$K=FL^vrV3I%DnJgwLn{x%pVoP zOOwN1S4f9Lf{O57xF=JK_5QS~Qnx5ZnAzW&P*3pk(>V$vg)QS1?wx{i)@ArRf@OatXH`TaR&o5kku=#(72vazkmOZ zj*iY&+Gb5{H{e)mv~K_mpqqu+yABlaET-5=-OvNq0WdQXdf!@*(5yzDui>g?#Xv9g zC5fZSX86{GBevk9cuUJB&`{^Yg}jGnj)M`XhRzzbZ^Obqn#gsgFfQr1MI|tA=6#a* zma%hlxkQh5wee`sLXOQ?u4cEJF=?SC)33ZMJnpDmCN?C1Pw7dTg(`fjYIH`xo@=hv zUo3u}xW8L#HLqAOrbTBW7FeHQ(~S9k%RlnG>o_Yo$d$TGKPzCphw;$Jz?`UM{BZHp zhb~XEAs>_-k%z;U<%l*cLv|g(O5@49P_0;=R=>v(o0cWshp02X# zJZ9w4&!mB7$eaQiO>@bdkmxEntf>@i{hjH;oUqa6^2fTHYr#^=_%6 zLiai_PR{F8XNgR!`6>cm+^eZ3R3E5|Pftv6HYSGlQ^#(&=j7>R4qfcKYjb2+yTU%) z7FWm?O>S&#r180p8L@=nFbk8TeWavZ<$Cxb6s(V9m0Gx?s(*aGE1o(?e1_uUK$hSsJuH)kl1p z4i~&>T^QE%0H3#J`RS$xk&IU|F7jC%R2xj09*n`rkc4lW8;!w9RwVJe5*vD>TawP`d>zg?j@xXlKXM?Qu5R5s}XwK&)6dQfoch9nY@i$rjx6dmNjIJTT!ewZz<6qpAO zzn1AKb_4DE5Ua{K+%0DXwVUT8Mbn@0?F^v=C5#Z781T5=G9fwGPlwn4VB?)F1i%ew zD}xSZ;sCNVVKghCc9}N9xI!HP5pg(`*JXc>oiGBMfPer4!>Hduepc)Yb8<-8CdQ^V z`T4iRemn7{=Q3Y;$67ciQs zDxEoFsyB^qJ==MdyNw`bE{QJ+EQ%>|P^bJToPT?yFB7k`_c#St@x0RNnj0=0kHYK9 zL{j1ral8!ki(;r$jq#4pFfLE){jq}KcBcl>AQ@T{snO!aD(`K-@S^J>x~5w6@npRF zoc>)T$b~h2B5(W)5s)L|fO{JZor1T%bW<*+S*0exGoND7c^a=i=p!SyRq= zS2S5Tov(t4X#fWYshCnzb%#KDQ?$8W2pQHomzUs!S@7sgScRg+Q%iH}gM>mmEjPY* zkirFOP22PN`d*PT|BmcvSKL{#cfK6j`oZQV^Q7zg^W4?3Pr*FpUBdr zKK9t!x>eXu8M9Q}RKK|!@i^=kvUuvKw;GdTD&h_snM6Yl zZ~Y?`wHS{sI1+Fsh+{McZxDIohLRiTx9`ngDwC$Qg7CZVNlD)evziRvbZgl()Yn&E z^D|qK4FXBXC^h@S-&LSwC|T-Fy);M!<m<8?Pqa{)c#IGPr( zBAH-Z$TGq!coO_>yPMW>svo`Yg@F}V2MxiKAa$pE;jIUw6qOe+toEwJT;Cu z==ECfHU^WmGs*{-)HKr~yFc)ng!+lZAl13UcNt+^Iq7kn_@U@-IbQxOIGJFib@1k| zCTITiExNI*Md1&{=GJZ!I7R`f`{fD`nLpl#{(;7jZWiXv@{vz!(!|H_A{#3_hvgqRFsre#;=5WW=J}S?f<@pJeQHNbVO47vGvU0cV)i>vZ> zNG4);xJgzAwyn^YbO8`Ab_ueZ4LtTZf%-`iv+LE!w+ffQ&`E7U9-Fzjv*FEGjrN6o z0AJ?${bl?qQb>*=&Ib1TPW43GDw7s|W9g)!Hz$XGGN(ipD7ZNA z*l1#Pv+PiIelf`o6`;SV(cD;BnPk?cia8cD@B?`mE#Ud1KJ0}czcscG>D5-z)C7F~ z!^z>+fqsqp!=06KypRZapUK`cpi`5!vHd(O+||ZMVF-f|+nwrSbRU1x zqJ!19-bU&VSABXzvUk~zvKFl&^T-a$>BfcF9iZ~Md5c%WVQaj+q41!0_Yhw1X5|uo z`#2PggP}+Fi;HVme+!Q2@n*g55JhiuivTx${*W_*ABFi}6?VVdD^BLe^p%VC^CUJXQXxX&R5*pwNDKdN$ z5(?A3Uv6MVGGs}lp`~@Ox0j|VBH(NBTUoU^SY(LrPdC{Ihjd0KCc15p<(!<@`}jN( z^1EN%-U_ZOuoiVHDQhaZ?W%e?<>cmenK8jl?~K zW({y5@5=PX9cJL8?h@l;)+M#!nr{ccx;$Py8}f0wm{PN9BPw4XQp~)}lkosbN_&L; z)4@gomvjL(_%_yGk8Bx_+>ENqN!2YBkmE~*{;cu%bny}A1?MY!IS8uN^9xajO`A* z@tZXxhaa=3362HvE$>Sqg$uMcYz}esIuX5Q!8OCE(Of8o=EL4b3BONG@^ zKJtCmhzaQw0a*lI?tVQU&@`43@WG==)P|sX<^)kqRF9OF;e^9=Id`Ete_3aF+)8jh zr)GIS0gwfP4_B)uJ9*Z1I1pwWBzO{rcrX(VysxCZJob0dh(||efJhA0EU86?l@%Au zfH8YLR|Z7X?W)t-;u>>Ha(fbrln>&zajQu%rfwgl#`wZ_#U$K!eo=ojPPIz2nid ztI<%fbt-S?cj;2;DpA*UojTWVTR!4aWdJ!lU%&P=Z<;|J7vI@d@ji72baFU1i9+wwqXFqz4Dk>`EJu`ZZc)ahfDh#^|i;4^l4FTqDWqtj81j&>oF*qay z-~$rV)0c|W7;S89vPRVbViC|CVPVzY8mYRDpG#`eW2F!eP$+vD=j7Hxh^dqMb6ojK z6+}<3-JSS_C5_;S2!RZf+q_0IOdO!ERsa#7Grg6+0!4_N&+=KgRgk`A3GdGW*8m7J zyj(i->OQq{(oU_8H~nenn*Q|U#+>xzVm!2MBZ_E1;Nn4MkyVvGjq92|qHM=j>0$?N z2|4U^bmFtXu_fl~Ys`Sxa!Lu}IV-(u!_3u*Iv9bgA7I-JFcJn9pY(6P0DY4>FoNyv z@A~SBii$cq3oqquU0i^r7tcBdicJ(gaxw zM#awV_$A{HN%GRtQuvHyP9PA}9gs?m!C=Xm`8zv1m6erny7>xJKcsVJErEMXa40A# zDLFZ(M@N~dsHmu^w-2&AyYqp+6c!@D!+(N*|BBi3$`%DBBGCF(T1@}FtrrOsVszh|Ax zB>oLzxew0eb)%N;w!YA1RIc1>7#Oz;h&no2s;XL=N{R}ou)_c?wK8p9r{A>X6Sb?l zhJWBS&fH@fW3AeC-A={Dbgk!QL<$0tZfYOcAN>wZ_g7D?P?l45?}xL$-)lB@m?~;& zx}R++24~vh0}`ULf&#ZcZ*d?}IZ9chuBRJ=aD>s)Ij@9fSTwxuTiPTJ0IcUck^O^R?EyZ0Hh_agviOsU%h&zfH9dPgAL0P&#EYYx|ldQ>lz!~S316v zMg?W&2L%N^fB7azl+>3P&)V(Uli&aeYWBJ2Tt$Ti8=FB-K}FlqdYGZ~lNmGaYx{-} zpFa!xy>=aba7Z}6oJlU*nPkahByzIT7OD`1=)g*~AX!MokXF6zivQcNLx*lV-^U}lOQv9G z4k2@5@mkmUyo2cu*uoW%@|H$6)nAE}lj~wf+D{Y)(%WOc|d-mIm7To;ie)aScGWQ^BtJ%(^>Pvj)&JUcuNJ~Rm z_&ILL0njlcT3@ZK=wbC&P1eistG3j^!1+FdimMDpw&X$hj94@%`RX2VNO zNda`(qN4tJd3jdJGDJk_U6-Z#U<(=>%n22h$sZWI>=tPX9oRA*2yh(|ET-{kWuY89 zP7<8OG2Gna^QuRR4Mpt6EPm|@8X5)T+Y=TwMMXtQ1#F435)p(Y`T4E`MU76o8!IQ4 z%Eel;fE3Kt)fLE%XBpP6KYsjJq4V3JZ*Xv-*^L#Z!uJZIPuf6A!+k!Vwg7LS5d$vT!R#H;EC0cb1dac^JO2s8bt0A2)3fRh(n-xllse^y! zLzbr3ORdFI55_98GT!POfX*!hZGE>QN6vSSvrKC(!w()wy_VKUsu~xG9hKob-GS)G+8B^O9GW>M@I_32Ntxvr>6(bKtoI{K)1}r!2xQgO^gckR05&P z+Vjn=KY#vN=?nq3gSEA_VRv|^-Td${BOjkOIhtLAwpQ|zLp;FjS3A)-$%QrMYL+R& zbvSPvT^9zQH1UHE<0QykUGBOR?4Cz3S z?CkD_@w7d1IZD3Ag}QZ2_w7=KH#9WNT2T}g7W%fUTLPkDWZu_%!Sr!r6nQCt7n2O| zVnFUVagn;cc*Th!8qrbA)&mK7+O~HZ`%7()^9>H}ExL4AP4)G!-n_w5Q-Rewyr;z< zA?vEr$H}#IkU(;{zDg%aMR$!@SZI(Vm1Lo6-JM+R91m84xp2k^hwtWA zVwF7W)*@Cvj(?>*;pHBx?C6=pXJeMF$;ru{p{`~@ep8~Q52m2>#Y*mM`IQAi_3vus zB5y~xa}VT0E1^-C(WhxiDb%X$nK$%gtsh^{zD%r5j5gry? z;h(wf=l^K|9C8PhBj+^ITdqerGuc@e7kCfH ziM*8`ukMjt!Y#V;4%=?C40a#WD3AD0_Jcb42+Qj0xd5YgubS7)0p%(nZ3m9S&VL^Q zYJ|zjSn}TDV!Eu+{OR4pF7~9{mX;Q7e2|W}wYPU#MQ!Qwl~;}uASR{`=CNDx-%qKn zFoP#~7r45+D-&%5V6BQu?5w4ALL-n_i;_moS_*jI)dQ!#kx%RRJJ#)AzIXvN&KY$Y zMWeF=0|P}#?QCroXkz&Yhn>l~?6hfO=e$qP1Q7sgRJ90fVUev=AW90T&oyf-Vtb8N zPOqhN0B^|#MA&HGv-9)eN!BJFZlW~hD8MlgWhYHIQE@td2Q*jQL|)*osY6M;>dC`*BX zkiK1e_UHP%u~4h-Gv;~{4<9zNCa(@7zRCkYjTC#%@JiZWDJILwfNIB(3Tax&VI0(q#fVc@*8OQ>nB-d6~$MfWgOifMwQRT+<^@lm`eJeD>-uNjNO@i5- zVljbS%bU4^G*Mei&1H87v#OWe#SWd9YebttY8>d*7G_Hl$E|H@3&Hb)>A)}HgHPV= zCU_isPHiAF-{0IN9-n?R?8*NwV&-r@G`s8jre)=54+;!4;(+}X$!S=KX>jm~swH=< zz>%OmLn@a+<^Ed}o%1L|P9A3h*wvxMi*LW$+m{bUsDKWxzDbov)+o@Q$3P%DM@jT{ z3}lDslS>I$jC2~lF^nWP@3-;^T)i^Xr9%7e>E#7GXV9rf8EUNK1)9*tfZU$oZ;GhiMO^K=J=X5`W^#IZsopLN_@56xe(bFMsApu9 zEu3L%YwJ2S{1(c@lR9L?GCM~(F*(`ovt-GfT#SV%<&X^;dPhizPe?>UuENuKzU$+B zXdMlG&xV5+qMz`I0{WKp(Sk!*a{J)8I5nY!KuQr1^SNO&rYyF$!H%Y{ZZAt7E(BFY zZ=F7{+s_|~pV9AN`CLCt$%y%b&-~Le)>4TI6_p!IZhUHE>j$qla=Ki!lVmAcbeC+8 z=|A|j+wM-$qoAP7rVf_urzlWW)Bhh$UmZ{9`~QC!W_HBnnPz8dOb)};`vYnP>Bed9_Q|42BULi$1&cE9f&P-sJ}%WDB0bvX}Z0Q zEpIX!3cRzi&PZmr*fMk76UMB6j8Rjk7Ds2MO^VfXbbNeFWIUgCE!|;sfmnazq!^ zxKFSSHq4L0!F+esZOWYj<)c{mLv~UFBhhbd0#1L5ce9^PwOM`sjT06U`c&OV4{b9| z+}*a#9{5&R&0FI#mKOiXxbM303QMRW{uq639>D$$wDC!-`uAR+kiyjV4i1dW%v_|A zwRG&M-_TM0Z32Hk4!->aqazM?Am<^CB#rFO9;XfOax_bE0RA~LpB$y)eH`>E8qD^I zg0jgSDh&Z*n$FcD>qT};D=Vr^`j?pPwHXC!B{d)YnHd>HFabKhyV#mVjR|x`fo<7p zB|yal{3GV6|FoDfX==3g*46-e)CE^XV!(N!kIfd=8 zOQH<_NoJs={qO1KM)fkCw~lL4H=!nAm!-({BKdDe`bvcJOqvmBn>aA*FXwOCE&GY1 zJ#+TbuIJ0Eu%{=mJ^v7mWF)|;u0>16uYYAM9d%I$SyK?>viu8WHHLLt)(9>({&;81 zMe4$*3!aFkW=fmE50^a96J}>qdw6?`??)boZ*;V`XPTb8)PD6!<73y zAgpA{RkyC`#`fT>Zg1XYzGY{>VW5Hd$Dy){O4w)G<}bqdqh54lH*@#*4hGOT(WiL! zEq;ejoc_Ia+W#}hIpmRfZ~~lOVJ0?L8@uw^zP+S@BKSdx4O(G_Tzoj`j0-WKwH#q^ z(0h}%N`ORuGpcAVFH3W|(M@bJ>{K7Z4?}2O6zXdw`kFX=5znieuQp!3$nt3?_`sLF zk)(fftN8D_@ZoF#wSP(lL8Ixdsv6QAa_Z^Bl(4&Jo7h$FhM8Mv@?6qKn#i)l|-UMPlbBm<@s=>@%tr{_HE#a0sxij!Hhat=ehSBlE zoQpJ63{B+l=58x#w%UR~2}3;oHa(rZWPE013Pu@rRP@VqXS#-{rU}{^b&L=?<8{1j25H4xi}b_GK`e?5AfZv@jUKR62PaU{-ON`0x?sR<{-W6{}kGoIk>VTaiXi;}9UqA}EnVXBuN zKa5!>fI44EDJ%*843fQl*csYRc`u6u7=Po6+|>4W_>+ZuLt| z$b0li5n32w`oh7mm^*LetF2jJ8-#`n1{D}86ygn_hGM)=)rlt0i_Pa!Rb2-e3_s23 zgSbk`RQNtaPmY8A-m9nr8VO+BqK&`p&5K1m|5|gW$VqS09Ax zcUB28lo1Q9cZZKZ^Ll{V#&1ZyTZxWkCr;NC5B~Nl7+tc?Zod9LVMl#lB>I5kR|zh! zh-|CF)qUNf_jQUHrT3D5toe^545{O^4G#e5KdXBojUP(%JW}GLY64!Q^WL<0x}|Sr zW4%NE+g0G>Y!rl9oOX1tzQE2fw-uc+;|jT07~ zpZ4uRPXvZSL~{V32Qq5-GUWEQHW+{vQd)HRJ>iNdO^7`&`4fyZ++6~XO!-a|MSP{+ zcB^j<|EKV5B#eWs&sCH9)BS0$0e;^eGjDvWy$=K0q!E>O|5tLh4nMPX3QJO^v_%-iHM1D?iEGI>T($ZsDCieI^9mi+NAOG^Miw^*O-Zc0o=)i zgoL+m--hMFpur}6!bP>ba$!p}M#=15u&(W|D@ zrQnw;ABe>frqvPz1C_ke+br}q*{r^YV-QH^(Z(P8v2VQ^gb-l`xLgc)$r4Z@c7@?|tV*mYZxs`y?h7fFQOgseJ&rg`1nT7qx;9i(jnv`CBuYG}C#ggC-Xm94B{nvOKqoAHTN3C#X5F zv9Xy4g5!MYxF$W;?Rbt$^$8=cLxIJyvAv_mqZS-oVq-vm(1JwX-Q9(Xp+eY_gR`=- z^1J| z?PVv8{Hmv>qjUTFMRdezaY8~QcBz2GWrqZhdthCHEwO59difU zrrsc%SV0ElGx!h@xhR;1K;0pN_up5+&2nwjO`jL^Yij*lq+22l`9;J z98bC(6}GDx&~)i3J1d7|XG(q<_z@$<=_#0{zv=a_Twn;dSmO|QSzUN7_bhW z*#{J*`n{iz+S7ix`P1%oFw|g@L|J85?=qA|mg(^?#p|Hijc~m?92?pJSg(#{dw^y` zATB47T}Ble(04R2Ok}7SvRvMbYs;BWGcmS|5h6_3H8G*a^+btB92NWbkH$u_D<0zT zFS!hggk$2Wy~g;@@zMNKUp@{L$=rBMh6;(H89LcA^#=dloxu<0@Td_`KKy1;CM>9` zA`r!-sn#lmOM}XxnvM!f8Q>RVTf?fCcfQ69{f=H<{2bM@d%RISt4Bab8{Nan#+Hpl z>XlELb0Og9s3^t^(xXcy!qVNRLrQjbtU)m*)3vqW(Ee$^EGjlH!($VOH)CDf6c%=K z(CR0PGl77M(>`lm%!=p{*t1CoVRK|+nL-tAZcM~+Vl+h2G6F;Nafn|eAJcFkn>K>6 zVO@mcYGR6O9$|*mA_ly4{hy6AC8eK8D*+>mSIKln$Bzzpd80g6@(C8#wPEtS`fG4P z?TU9z1jL=XiYMaQYWR61IiG*V15|QdUESoeeYTcN>mpTnS6EmWpb|RR!g8T5&?Hy(G&<2PW>_{|?Ca^c{PoGuW^R;=WpsFph{6*fJA@jPk zR_J*Id2rhr@=9O(WT4w$tZm8zshMT%jY8ksd-88`Z>sjJ-i)}c9+x7j>ZkE%7`M|E z%MEwS)tuANqSMg)r8i_iHgH641*rYgIF>J~7O~^HC>TX&cBL@v(cFD%5Jk z0ih;}ezQ0Gskc{Txy%?&`xG^J^>1S(ZZvt)tH|Uowm%>&!fk)f zqHfmZKkL1C?yWfxfG9S26_k7)|BO3&x@5)yk~p z?Usj(x=g?2FJAkUkLqNfiTd5_2M$dl>dJp|doD%3jJ{p(Dor!}IrxDv6W$Bp-uS<~ zg@V(X5jdF3K~D$(=3Z+%-vHhm2xR-3In2nf#5E?PvUVwCh)74m$ z@@14u0c4&K5XGid1^k#~&|o{C9qmxD+}74su!)tGy(YvWA|jgiJ{jPR@&i>ETw1FX zn27=!2z7SFMKQ6}vU#%e%xf7atKu(X7HBw->{Z9-kk9EH;tX9_cdbyqSE zomeDJEuj~hbaEDTF2y6U@5#V-neA{*6KBmRNX3wLKFVGXIN6GieQBP7Ba|ij`fJs< zq5Jcr&pZr_OnIEHo%&B|Hwis(3Xw9j1B?s}iC z@2D+gCd4&&QcF3`tQ3TBw>G-YH;lV2cq}NoP3T=WANcH_Eb29%)H99@e!6|5e>qpV z61#5{*ZUW!N@VFC@BDs`DUA@DW+$`~p-@jpN58Z$9(T@IQiy?v_Th(o=`wmAS|vVq zDuq~tKJeCggd2nI32pEg z5XgoKflGg&ivzVbMXo~iE}$~O-rMB>n06`Zzl#%n+s;~fp|EheSHD@(nvc(Qcl6KQ zTs59zvE9z;Qe#T19q^vu$fzcy7+bKPlvH7e98{d4N1O5B+nNR2y%(}=%NeH7U*Bs6DKBts=A-|I@+nrH$^Sb0cvg^v<;{e)3H#f5%@(KaC zqF-mM&0q~Wufb**W(2sgZorLwO?U&K>LFeDp8oG!3p*bhOii^{FNgWf#5i} z=h{@1jW;chgQyu7DTQ2Qn=s&SnhiCE4)V2Kd$u!Jn>P=3tJfln=AE`;n@?EjR0=i@ z(^@rVkE5~uhIKT&ra!7Nv^z?RMI{BObQIMu15khk#lAqO2c*mod+PaDW(pp_t*F*44?g|Gg*^)7Au z?U0T{qKQCDy~WnpfM#>jtP=J%>Jv;N(IkL67~d;)`G=MfNF>T-&`b zp+HjFvcaSfgyIeiPOn(vg3T|V2TSRS7LH*LP=HSdN`}`$9rmXs3aW2nrQ8KFRhWpv zyRL6;!o$LhHEk^{o}8Uet&I5lqvYR;AvX%nxrjMfQdO1ccvMUgcp?0SsZ(Tg=dFRR$?F%D4oFS^ ze=`Cu_Sbm_Eo*?y4s0ojCXypLU2G8}Td)iqh~#xF5kTV*hkXqMTjfz4SdYD!FkTl<6{nx{@I zZzL9W+7a;gZOcg#iS0~XVF8aY8;|#iy3+?8GgD$(B3}?Q53ZS+)TE@I%I{-aVvWhk zvfZ=Vd#?N*AG&?w1)Zf{`c37GBrqwi|CI_Je3r@*AcCK?Vaba%Ad@!|fv_jV5@j+f0)@upXaZscoO)vo!>=%Bmhr0tmWkT_>>H$;jddHU^$0{_KQ)EZE z0KqPt6yxUlnv|5Z)^2g|#QQ(0gZg>6_?@6yPb~Dk5}^cM z$ATN4XqX{G@Cfb<29c(Oe77{mv(VF;>iiY<061fC&YPK)?dKRJ2~`@Fwt5vCXAd87 zpll7##;aIg0@~#ZZHg={N)4)EIctym86~m0~E>}h?zqkTn7hBgG&K|#w z`4W7@FRG{LLme^M_1ilnS?kYQ_BNl%TRjbeby7oq!7;(%-rRyBr^t7)Cj#eEMJVSO zTx9C9rLB?Tz=CIA(Q^hH>U4;W4Yoe44aN70tiT|@G)UW?8cj=wm?90ONzrlrESg4pD_qz1pwPcG7X(bg|4Ei%Z;AKi-$*BFU&2D ze#d$C;kOYRb+LBCKT1q?O#e^E0maI@JvenwA;zk;<;=redyYVgrYwk<$m*q z!rVd`UVJh&@^vVB8P-)9p=MS+JMgw%qVZ8pCB=G_Oxy*_K6yC*V@Ng=R+cQBHdYKC z&^h%&@Sx(Nu8KS-G?2C*!SKz9J$ThSBZI<4lI69Vug6;m z9>l+u|ie?Ao6F6JVYKym#ZG*YVqa?9-aR5fZs0vW6N}(|* zqB)>8?Aj1W%jYb4lhLK`YOg*j1~9lcr?}jZVuPKF=JTTk6h#4+Mv^G!l%=#!0JaobB6MEWZ^IufwIIuk&X}oQ>JryExyNcv=+r(8i@*Zi&_-=E&4p?c_ z$o8MIxS4knji2^)FTmpXI=N$EwP#yYM@)zZM~4iPLaWtwY=*3%eiaUbf+^)k9jw0h zWRf@6EUJemxTPSRDSID4jyHa%*FFyyPF4~zO9?uyf8F0Cyml6Jl~jryaV zX)X3UA6wnmn;R`1%UYVd4la1Oxsk{Q>uC<7>TLAdW0{BOqRQyoAZ9^5VPhxF#MvnF z4`MG24InbL! zo450E;9gG@@sR#ku0~L)9QR&FkV@F~vi_co#R`r?lSa)(z(X}mdZH-8FV1-5Du51p zAoY!msw(_K9uIBwl_cX|=#xjydab4dq<@u~f$jqox4x~d-@jX*ZIAOt+0E9n;D-W~ zS`04B!5nNTHReYsAD&+X76uYIqo;1h$~2gG27UUW2YvBjGOzjLWc-FlREyuAo;w5I zBX@%tAjMY!%<8&x)asY_iEyHhPMYgKpU~6c!AZMH>#KI<^YwK!cLgl1N1T&`eT~Mg z8*uTodD|0e3bAzfN#Cn>N_noO0~^aQj9WBuO-ILa%^#Tbn6VouXt3a{i5D(GDcEXh zZI=CvX<`-D+2xa(I$>+WaK$E6@5*o2-`0gLZ;*F>-nYMcB~@A;g#3KAzS-UGhv2fJ zBG+VVd{4@)CjVTGuKM}hjtP03lw7Dkf_%My0TM1wS;iN~o^8I5cWXJkz?N(nfrg=4 z*=jm%H1&RRR@{J|5e^R30+~7pZ-9JGvu&_2KIh>+I1gbph}6_M5?*Bgn@S@36AVUC z*Y_FUIV0wG3M)&obgb`HUUQ3^&*9OI-n+Ixu9+mW=FVr$%IOco>Mve~k8ypM=)jS0 z200V#jw@b-|DKEB$@P2{V0%|M>D0kFB!vLsY*!36U5sI*;$aiEWJm6KK*y(5y6w2* ztkZMEDr2;hsjn+myndydlQcBPabVxB1Stmn8E&+rnN>nwrloy$(;u zIF;j-Bn{Q zBm+m@xCm4f(nc*AnZH>qlZXPD-La$)f4Nb|uRH&X;qF4;*J|-w-Ma<%tB<(qtR!;{ z{ls+qR|K^E&sv8T=P6E4!;6j8-@hvvi!@AD#z%t~_Ak8q^k&)MH)4$`jcYGaJH+)w znW)}kBiK~_K380s1`Q${e#H^D!VcwpoBmBX4S__8a->9vEA~}hE#05zNty?blYW$F z)U~VGvf++gVCysUH=J6#db(ZPI_Yw&p(nncT=X~d@!&Lln$zqfCy4v{k|asD*DP+x z5)Ts{h}efC{|(CC@2%GkBWJGv_SMtRf8Rlx-5uge(mqjitK=kfPLwOl(SdvxzFEi; zIyN4>ee=ow%|wix>c23)KHuFvDB+HyXoT{y1;g}Ug@;O33f2ASl!P$ldMo~X93 zVCT}!kl@<`|BxwVO{v#@$7ejBIPIF7;qr|KT|$Cqf#qpg-uJ01W60N|1*vdp-<#P* z3<$vO0PS;se;*ti3;^yqKxZ*Pz_9@IzA@bYq+X>6`gr~aUD++hrwM^Q8`0|yk8r&m z{#v72F{r={;3td&!&mc~4Oo7H!+wlb?8`LX-^X!zg!mHSDvsTyfyUl}! zfdjoV6Ze@}&osH9>zkG3n2}|!Mkh!5gt|#e)9RV@(NNGYM5xO2?00(#$J7&%y!54! zQR*ne#j({8^6C1(F~*|2(knc&JH%4I6MU4i3o;EzKQ_#Myh`8c+t5jhkmwM!`+Kls z`#t(#{+-7Hco%J%33ov`Sj`XR_L6W*kF%YQvdI8@p^6&z1s@5;Ct*BdT6RTRXlmSs z3!;j^sFMooi$`-L|CWU#oi0;6BHwxC(W~P6oP3OYR2%)q8UF?IIOIO}4pgIO>O`vZ2rcE}| zQ6o+Y4II!@cyqRt_>ng#eT3^Ws1OP2o(UUYvi4mb8?GYZFJVo~BM<5vPJ`8H>IS@- z+Rc~u+g%W0%(h+r32Uv^nwpCB)D=!bgUz#RfHQ(}jj&ua3w;|~UtRrTkpIBFSufFK z-uB||VsXfUeCPx&jnjy$z?>Fk%NH(_-t`g!(L@-v2{Yq;#Xx1CCwq!`J7jKqa}P7u&+9Vx6B$1+QNCoWbUjIX=AX{%6gA%+DYLlC^@F^} zF}x^~?H_s!&?L7GQ2e<~P`JpGwrsSCcfl@= zVIrcK`O4&as)Q(=jA8KX&VD9Z#&E_bO70Rwfe0*tb0K|X-Td`&otknXE>Js9OiW0@ za~Z-G55Qb7o)lRU)iG~p&z|Ye|1*mLkq|1R(ssIsVAE@MZjK!5i&~iuFa`~n-mK9J zZMimr)R;*KQxIscw2qH&{(Vb7{y;lha3rhXPfB^ny*rOi zo$NWrLUF_G+C6UL7iPUEnri6SIXi|%U<-F z`E|Zhhi@^WB6(^?7EhgMF+;wS0)$;Upk(yyV9d;-`uMYJ&%1H=hJn2q0W0~`h^|Xb2Vzj>@EjeH}hglDCTHX|2 ztA&oLvgXfb3lj%fnw5OlFcxb`0}a7*hCqd_;&l$*c4+VfMqJ8-=o>BeR+rV#IU{~CmZ%i_o zE|5vm+g~?!2(`Zjk<~rXJa68B#|}7n;Hv}SxY>*U6lX>wQ8fua`3uvMPCOsr6&dBr zsH@|lj3l+;t}e}jhknvfdv_V}83Ln-s!Y|4e0+|AiM{sgsnCPdM(E8`UF*H>je_=_ zZ*q)H*;h1TI2TJ32S8#5w4cQ)NU*AagQ=jPYj$?bDVM!9S4paXFPiET;O4--NGh3I zDvT)|Cc66x@+;^$r_a0jo5i^p&SD)aHdF{-6s1Fg+%S}$+RbQD&ukjrd}b-*v6>XN z@}j?Ta%XRQd#2UTZ$)K19FDnPDV=a)3qURUuT@QFY~-w|O7nVNKevWantP$IBS>IJ zSCFWSH{3eE;buXD2D<`!br%UPW42dlVYeay%^Mfxeu!Gw0AJD91EKR8mxrs#^sQDd zoov{(3991=b<`vBjX$P?))LWiP^Wn$e{P#_=Y}%j%c;c;icO&kySl0$uy-Pgsx&E0 zMOI|sfwCno2=bWCH}I$pW#m7*$@FmDIdk)#&vARLtJQoST6mChM8q_5aXNGMRQ+!L zF!rE;-1kOb$gB3;?BUw3{!=o776>&!J*k=aA4gT(-0ZX39SJ;m{H=7uN%)|(Pxuho zt~0puAq0smO4Fu|P3APifn=AK4j8b_0Sw8s3S_gKydb1&Hr&i0UyaE;-g2*{ zGNS(Pb5jqatGrQKD5PzN(su31=F@H;XPg&U&T{+QlF|MJ#k zJujSP`uNza?C069ZXU;1bXM3B(4HpU_b8`dm zs&UH#1}5E8ErGN2G&HGP*trM6@>gc*y`Jb;UfW9kKXQ{E8^$V{!NNpwp(mU|UTbp6 zn>%~cS94X9rSLYFYfRFF zkNIZ0{#wW-R~236mMy3IRQf#(Q9R5|Z5`M={~Gty)Gn2GW1`3RCU9K;X4>D*+w9(j zHMVl)yVhwBt2gS4-~MJ??G7v_LB@jX#{yOv2|11#>x1^M44W*=S;Olk{63+#7jc)P>H-pcXxna z0<&WD8NG?M>B0BKvX~ zLUWnJ3=f3)>K)xr8go!^%{X>!Tr+y}mrqFpQ9RV~L}ldmPt$CtA125=5|TYZm(_49n3Pz%GY$07@Uw`{142xZxl2m5a|*BP`fE6amZLOr$!SIOB5t+hl&vE!kO#f*#L zEW6fXtgj>U{0ntSgCmw@$TfEig>1eJgxgzwv#i~TAwkDIT`Cg|i_#Hc)JlWvtSp_$ zv7H3=zU9F;mW+_kg6048S(_KecT^o_GSUh^30>b(ulk-&*ZKbOgR_b`ocGJXA|&$O zdog8UE8vm`PA*^a0^v*i&}@lFfuV!NR(*W5uw2{LyYsm@ zol>>5`}mwEbyQHG;G zdRAUna{n{=gD81cr9s7nc|=ISh(XNyPtZP6PT!X0MpT@S_n`R;8-NCNZ*Nav2-s=t zdFei;s%U8e;n|im&zyb7+ktPt9hqSfUr-IB>4ztKHl4?kSVHhpb!_wIbJ$b=yyj_0 z3cS0kR+Cgxk^Iz*_4yr-TRH(QW*8H)=^uc#FXnmP+4m6m-E}V%MkM1s(iyoEYSw=r zxpStU!@Ge!pQvov|NNTG_^ph^^wtx8OwDGOT;F7FZ@j`2nQp~6UBVMs!PXW72!e$C zM-hMk3ui#BYKH8FVOYAsFwwy5091d72wp)!0f2xFlL4_Ns`~c+-_Gl#aDIYUI*Rcc z%>OQK9%EntNiJNv;bOsyCg{jrfQRQ|#;Sb%KgHEEG>6@(3fa|VOnRy5h!OJ|IwwD~ z9$_~UxfrH}kmYIC*|+aMah*?4g}}HyMn;d+;QrfJ*xX)&ozh6;hg|9>v;&op&tmWy zO*a*MG);UH=`i%$`N%WE491t}kgp*DK9OhsWhZZq^%xb1#0AHqSo$4#$y+IQFpZl3!er%t_*J@h=(iL!H5l4Je>P;E2DYqqLZXs^Lq#dI}1{|49M2wzhiNs;8}s z0$o0R%5(G#e-~^Bz(5d6Fjg1bx0)0YU>@dNJbMOBqm0{ zp96^b<_1V6R8@Dt={`G469kO!tgPjgm55c|Yn7R=xeU_3Rnn=TiiFbtaTVZ91b4oY znE=fYgusF8>C+K--e&370BSgX?ytfXcA5l2d}+-@XH!myFg}{R3}+BTSQg(M@r^M! z0j*7w&!6%F+VAkyK%Z4)tl0jiZU494@Wcq7I%aH48tD;>?qn9B#ndEv8QLrS^M0#v6Sd{0x(_@p9g-9lRs!Xscj;8nbg6lb585_Qe z>c=jlP;)pj^s<3(b@{7TdndwT98LP4sw|yF>iE|0Wr0;ge}bUij2#=h9rL0jF-0>D z!wp)qIN44bxz1o`xyGU&>+5xs6!TZNRg;T12C$YX2Q1P^W})TFHTR5PfOYW`wdGpd7m?^@Ln3Bunq`8x8RT< zcc2-%xVXq$5PWYVuDH9sjgUW}ZLefDLc#eaTi{Rn7j)ae7!37yj9P^SD-wdZaI+~v zA<&t55goaw!==R)!9rU-x?BQIW<37RjT7uQvaOF8GAg{Uz>f?jM*%TEX`}=#&@9h_ zC$hMxuclTDgj>)KuzEpKDkv}745%?_OU1u4d;o_jGqDUD|HyrA=d9a^J-R25w)|7>~NxHerzjXt_z?^I(SfBh3dA%T`?;?Uj~|h9=3_U58s-dxzN}8N^h1JTs2SUZEwC}-oI3Y0=OeKPpAgu55l?coNqUNacNwvZ4bQds z{5Kh_$Ds(-6NI&Z%$mZn3}wv-B+zdLz@@`-(L^@wY~;Fg@j9F(;@*^JyCaFl8;p3P z__U*WqjSFsP(=rsD6XxBW*6eVNk|eC0a{5ye!29|ct09A54bHbi{U{Uzjof;^gkyM zhKA!nclB=C(;Fw*W{%z;s5J_?-@CQW*xyXHHZTt%4@WW{?pE`Jb{^KLS_;FdZf47~ zu14*s2CBZj(p4eCH+gF3qL^~_yKUj0Ud?U)wnQEsZontz=+mD2va9 z$@4-q2X6%js8glj2%!8CFD@)^O*_m1} zPS-@3eZ17-5j@IDL}}AySB?x92J@Ijs~|{P^Dcp62vR*I&o! zx7@__OGD!u_I*wEa0Oy3>;zKJdS4S>0ClRyM{ztc6O)%wt3^&)iAk=ZC^X-Tig-cB zPZR||utG{IsxT$sqtU>io&uCvLnPs1))g_i3}ow(z@s%qwjM<(tUQE;~NMh&IEC3u&dmKKgW zN+>>Vni@oqMOXG&#LPzpc1G05aA!KLX=M) zfvzg2h@z|VJU~?5i|q5Se}m6A<~T4#Y(2zj@u%zV3sMu@*czM^v|7xqXtAumJt5Qc zUZIL2vrV?BNWtz7NSP!-eW}V><#|)l+0L(4|9!VC*oSJ@qVcTdhF;#DYDQ;rmqDSk zbJp*6xy*dLP|(FG*J+XS!KSbN8RX02X(84A;aX|S1*O2m)##!hWk~c~Z4qsA1u6YN zv)^cM>&xE4@Zq5blNQ&_w zsK^lXEHg->3XjJD`u|@G09&yJ-?QaR8i_HoMkM}RAZV3$)cU=8tRe9BKM#&Ic?*M@ zas?JLV7=v%66RfHija6Ajp)z=Q_dOCpJROyQ~dK2xWd6t@sXRsX7)gY);Z9-J?{7c zwuTP{3SXMQt}!sM>>c!E-oI?`!Ch&i?&a7r2wd1d z%~BsY>k399E13a@_Mf)`s;aJ@HtDX42 z-T~-PIH5qTLNAxj5Vq15_?WXOo$KRJ3TFuQR{-dNE8?w;jLg%gPsJ7In!VgGtO8bl zZjBWZpt0|&f*ix2^S086TASHKb`W+`D4UYY(3kT*q<#4ZHD)fuwsQk07t;nXAivml z(*q1EndyOjo^}LW3k!m5Y|-ET0PF53>vu-s=n4d zS&BavXzUl^Xh_ecO-y9Zo_S@b<+v)KBTaz{R!a9{oPA53+q530Lakqad$A7$7ax5l z|L)}_Gdm#kU@09+@HybBil^nK`=*~nZMe{n4vDjS_5fOF;%gdt3nUwr1l+}b)u&gCPGG(OO-R)f*A|CtECZS4VA0}TZtysr-Ar#E+j z`Z%J=ram6c3(lGlUo4j)&&2_8*NMv~17&F~9#OmI$7=p#C+{h${7kjAFbnb6$M(sz3DonhXs+@4$7t19=Bq<_2(9rw#nqjiktKB47Foop)RDM&YJ8QV@sb{;rJp z8yy??N*dquQ?s5Ou=P{p6)j$&bmk}R0IVh>6m4G$b&8-gDd%mVh z<|&E*DKA^v!1D;%0BZM4T#P_y2M$p=4o0BVq^CK)zUuh>y|1_;-j(AaQ04;D2`sp` zQovbG8WHnfFv{TxW`J-oNi#*{veegSP9z!&$RW(6vgvaTA?*IGf3d+}sR{!H z#c^XUESgi7+5^9{^=Y?xpYtwkk|!`Z`Se!Hm8Jx5<|&VrDgQdX%lU_^V_bdj*MS`0 zhU&2Ix1~h62s=)3z&Q$;1MV+K%(2%eZ~Ij$zlIDzKzIxM`WfbqJ{-ubs%h=7GVutVzMdJTFp&4>@ehQ9npRQO3g%l1&6yi zMZHt&?BA8l(v=L|FAPIT#Qf#OY5C%wK&TJB{j9=~MZM-h0%OY`6jdQ8y!^-0ut%bw z4Iw?$zZ_qcG(?X-3GG}z>m>|5;Pu3-QYT!4nGXsWUe7&=lfh|q8as-z50+1K*bz?8 zmr98~0s5JuP$|r0NWqv=#7UbZRhrr^1j0~>MlCp{pql?}K>mKYT$}3hY|%{hC+jFi zIw-5iG!ozJoMqM$@BOvJq`6L^x<&h+ODxt5GN_fz6<&cFwF;a7H8nLrpPcd5$rL#C zUgGPO9{_EtSb}DM66+9%sNUMTx>kNX^7{1vx>!al~1X!(Zds(hmA6k#c z%IjIQvis;4Jc`u^UJa}o(8pYl+&n+v=^P7PU@FqJ6aC zIR>!*wuca0Dm7J?{Ju>nbm3(gPWIFluu@k6(N5!h=7$3CqJR)8upZc8peb7cgVn{Q zjWs9sTyO5~LGdxn6^SDS9=~BS80M^5I`MQj`8$$H0m!43Ml=L~yoWZ_ymQUDAx!3r zvE!swIbA!=Us>tA$r5Dl$kwo4yt-N$XL18+6o0tPu!{eFnS zptN*%r*wl#cXxMQy7S$BfA9aS8P_a_b-Cx9y`TNWC!V@RZ+v%9I|9)j_UK-be1n+F zx%zDL`reL@cAVZ5mEXk+yV2-dk&+s^!QjsIY;*&O-1po>fbIPKui)ITyu5GZKyqPhb~e#y?`so~*c!b6 z4KND{jRkwK!61GBK6`@3pV8XN*QTE-lMfnTtPDgJvJi-3x>qpCz-y3+WviOyj_1_+123Q0^xz?Rvig+Gv95=~5Be}%hk_q#vn2NGPnN40gBh{z## z|MI3EmaMmiEg3kt`Y1p8l_!Dq3l~5K&3v(Kd|0V%Dgj1*-vqFsRdge-;;Pn0PG0s$ zPRs=&Qz@Sfmpv(8F6QoIeGP{~GoL+QgHu)VYB;LZ-7z>m+wfk4$k*aq;@?7n$3qRG z5lnL4W+n2&y|lHJYk9}bmmn!$xXK`mSC1rFgOsb)Gr!L_P!>%Xzo|AIp&AS!z&9$ zRuhEYGoya}FY-0>;1fa!#M`1ISn#PO{u^YGjv>=~{SD2%7Ax$(VQPRh8L`K$X>1<9 z*~xwF`Z$y~o3lD~klJzkcTR{)nQhMfH)vM}12TWd}ZEcH&FB&*Nu8%xw}g(R_5 zdeM;I`)8Zs<4VEsQ5lnICSSfExPeT^(a{l%V6Qy*OQa=pi_jhC9z6c3(it-00JXqB z#GOozJ3}xKzPqhhP+YUg;YIYQ(){-pw%mw04FYMDkx3Wtrqpme&nz2^@75#*92FlR zU=RXW4yy`$Mr=Bu2>HIm;ula>XzRuWx8Ez7>>fAnV8&H}6)5q_UkE6&Q&V%N?nbd! zl$1?=2u%_+&2T-ba_%rfK=K2sHSTZ39BPfc>X)F-$C3JZQYXZ6p?{>R~d?S7k!tiBA)m;0yArO^I&onK#cQ*sknPHyL>!yjI^{Y zzEC+Z*Q z9W636IeE7kQhUvRX>*T0Vg#3z6>o-#^D#tyX@_5VfD<2wM=3%Gz`?@<>TRDte+Hpo z^Oo}Fm3$0&)IXvE(1?L$l!>CkeAlF_ucPsBx-Ob5y93sbaTPi`IuP@eRe(A+=?Y|4 zB6E+8k0&b`Hre#b|I`E$z(4yj7idYmhK(0gn?1~1re&*&>B=d?=8Un^UPI`l&G24T@uO^=?JCl{v z?1E`o*DTzX>T-)IKE8s1d_7@yqCfxZjLMWn>uE8&qT*Pm#fMFy#RW?Es4GRu+s0hy=WVdn_$hn8l*V0!yF-5Z9)m)uN(6|FK7&-Lp#OVWH2E$XP-2sYOBs zQw_u@VqAP-w1razidgp3_}7fOxiF9jtIdhyKy9KOcuychlsY`~h3$$T(*mTU1o+@= z7UwP9-r5@T$K4&x1iY2PKq7@GTjFAG&Wh*tojjXw5{xJlmq7wx_ zBc8ed<_i3{o`06GC`GJvW)08`eohbW85@fp-JPADW^XqF`4<>PhvtCRIUefj&`@}{ zF&J5MjNoM9%@Dng+yr$O2q5*mKuQmITzWA`G9&^iepj4LLF2CP1iY>*3S1}X3qcG` z-#3xoya+*l$F}rC`T16-gNW_mX1i8~F`+!4A9qQwD%4gZp7>70hIkOyTNJ$Q;I$Rl zZX*M+tB;;*r&8J^%~$JO3VF)I$X*E7CEwdB9`{%w>9dg0T|0BF*(% z$MSQdQd9AkLp#^m>@)p^76cSy>*#@jtE%bN40O@)c=-wltV^E*^b86L3Q%wXbS_X* z3Om1!>iwTC=J;zYgMM1FRSTeP2XsSI7Csg7w4*O5Ru#7mYc8fHC9$!CPLmreGUzC! zRwqo4k7wr|>;C|eftI%M737cKG-9SF)an#X75-r%!K3NYZg@=}egLpI*U0=BfLUI9e-f`K0!W zlG7z}knly(PM;7H$Ie@!jN|@vsYp)2esKrliR)rnFyv$L3U&$iZEL3d%-sGY{e|90nc?# zm|0w%*IF(UKcOAn5|S9eYg=e`=rC%zqv4^0PEx=ki^7U5`oAcZ$#?MtfnK1U^zCf3 z!Fn+vJ>dTK76O@=m>2@>HdD{Rz#OXdGf>)h89N9`c04#w8yOj0>ca_F*VLS>w3RAS z?G5}>0|fmsAOe%EI5jf^c1D2P(+~ip;PJW)9WS>`GW^&0&8fhAkO{tXTre7TsxuD{ z?s2w(3GTN!z=e{mcR;#VY%{)f3WKd|LJoW*R#KS`B;u^WW9hO_F2!f-Lt{^4P7 zl~}~S7I7^-rCeh=t*XD%B18~B48BgRbh+m_zmzgLMU8WN+bQ!i_Z?1_5iK+!a&);A zCZoxn;UWIr6mpMneY9kYi3aK24|=cu@D+4mDw~gh0wl@ z?oO8GM9jVc6WJ#^9iaM7%oK)lZOnv26JO$ghYRsbU<5Q%@|YeNn@f`hSqdvJt8DC# zdr3__Fl|`o1>SmJR{ky5m`Q3G1uuh|)_Q z7gwuU)8d>Pxbl7=&d@aPH_ zi#L_p@_IyvfT8Bw-yPawRG9L~eD0eCa_$Hlhnu3xamvL*{ryVR@c?L+bZ(GtYG`N( zWFnP|0j^+bZK(DmATR=%R$(d}sBZz#bcvU#ss5IpU4Ls1H$CJW>69y~J*~tkU z^LUFD6N7P1avSg@zYzk)vI&6}bj0SwtylYxA3sF0fh_(Pw09C{-(q7iLUXjVv?A~T zIOO2qKqg;#VYN;GNNcXF5O)3oYT@w6N|@}dte{f@zF350c4_ISm*u^HuFFPXlf?wL zH9MWo=MLK8Dapym1U#4!MP=oHO)fn;1y6V|o>x(s1Ro48py>#@R^j1DL_|cu&>%vh z#-tZT^nVwael(--(4Y52;?@B4>_Z}kZf!M^IoIpgDwut(8Q+N!43;LU*DziSzZurd zH!PdKoD^w-uemyCcU&+vdA2$b<$M*n87HHrr(H7XMBT693LeDJ5O_DhCj+P@E*STB z*kDk^&daL8=cz^aAF3+P7cb+v@X=1!d?n&?`>x~3pA>XCBP2I-5RJ+}Fy}7E;d|eA zQg~9_Au1_p4t&LZAMeajVCi(1K-Hg{YjOrnB@hvDLef7In_@$r#^ZrF1T zvmw2EXIWsx8Wb217?`@Sy|y-{I`uK6(?a)DCk(7cVDqkmoQfNin5d!B!i6u-`Zm5c zBoC`jS@<)58YVLcLeR_gyNg|r5bgUBg2e?|-pxy2Ri;yvFhMd)PfP20dun8COiHi} z2Cfg_pYyv%aMHCsTran{t#DI)>o+E$3SDn>v`2ub?nGEYAejEbtn@G=<3y~kcS%hr zJJzH>i!>T4dPll=zd2#Gu6t3^4+6QgkqrJpu$S|jfuL}_%+=+-O1TMQuu(!n0-Ufa zXfe&qs9|F%tGx)w0a>N7nknE$fjp6>MVAm1^g7rGh*?EYrUGfcx+{^7LlX{M_&l|= zf`T0m=LMjrCX5Eo)4RJn^BUHmXf@e}zcJwz05s0je?uLnx0;duHJ3vGXyZ3+L zikuxyIs~(uC_;V^bX=h;PkBXizk#WHiE_CzvnCw|-r`cNs)gEr0mS$OFyb+mz(qnC zU_aG4YJCIQ0wtE;nN_2~on2o`pl$UPbM00b*)<~p!OM&ceIU`TkSUN3T#&ZV)KPv#$P$oe2u8}F}%NL0?53;3F$IUst< zQUC)p21Z6-pv?w=SgJI`jD4ehYh(LdPL8)|i?BrpC8jK|%V%)mQhSYPrcQVFp#CXk z?ENhS4JO2%pE{Z@DL9#@+`t-Bd~7I&i%F(DefYJUE*H=yq<1OBJvZvgTNvnMtUq3d^BN z?4)O4h>VB;V)fvEG&eVojEoTQy5v1A?iJTz9n**d1d{hsohD(%a-dIKkl7!8LxQIFgxwxMZXyCP1iJ-2#sCMiN=J=^o}g6vREj6ZdUm+>&m0p z_aMTnFD9lR)NAG$VrhO9328S zlmun=q*4Y15+w4TfIv?}!~DyaFW`s)*cYDNlw4ICnfLC4~kv6pzz(3Ux?Xnp{dN z+4~J6LWn}+V#*w~_?ouC2+EqL}Z;EMR&rBq%{&PMmNb8zqz9WahiTLyB6{!%P# zY)+1jxl{+>CXkPuLAIEgNt8OwKt;|E}l=H8o_$cFQCZeU(WVq+>%olc@I~01Es1gA8dY#s!K$D{<=I7 z^){UT7;VCJ@Ve^WE=dK+{Xe2$3X__7(6$GPxL|h|k`-7G!9e^Z@&{VIs){X#&A1i( zcSD$?XxJQ{zp8(euR%2d$vn{3q@N~}LKNmr<>ld-umdXyXo&+_m8InYKo-&elmg27 z+s<{*uR^}0q5Hot5Fbuj6*z%CRnfNS>)-^eaAsn4P&ipJI+%7%=4@9RH z8yKHbMt7|YCdmn(2S*0O642+&z9y({6FvpO~un{IvoGvQQY; zGiC%q=1|7`4=W7VUjY3RWfheV-17rVg&_ZH^SH8%*8FW;wa{Q!8F32-0q#YU%Oz@th^O5#0p$XNDR5^yXjC<~{=Dvg)fWiU$CU>L9> zAI)G(6Y+a=#T}LhRLCa=(bP#v zO`ZId0FEs=dHMQZzp^l__Hx=5>K8mzfKdgwfOCPMpdbL^f{75&NyX)|&k76_6&LRY zLk)U5I$&Smvi945>vl53h=0ZD{bZe^h99w3!^=3k&G=v4Xw*1V(O41$5e_9r z^MwU-g-k4S!Txq{73x?eObBL>NJ~LO5Its!PQ6GgA2bx5-QzOy>B0LA(mID;Jo*IR zf?X)E2AWaj;N#WcEbg6nT~LR$XVt?L@&I+Ax5CDo)J$hy$Iy9=oo4WH`w|Kv%4_|x()2a|(txCVx0 zS!?K5)(LNT?;gV{>4@O|&z+3jAqez?J(g?_C?*V}+SCL$uTb0yYs z3iGj!V%v)^wj0>UgX+6oGjU$*5ef9{loHp_g#3pNe{8$EH-Cn%keWcwf1C)65K{$%Z5iPx zr4t-UpG!qUah)VND$q(nlOwY7QTsFS$)| zLKLYnaRQYDZ;`~A!F~Aoy_v;;fpwBIT6XFy^-n>9wLnhVQThq<;G<{5Q5=tVOq*WeDdNe)z-B@_Zlh z{KqSHT)2sk#6@bA2JJ04Hqi{WzqzwB=kk{ojB2+zJ=0 ztg7#t6jtpyqU^ShmGm`F=^oyOuJc6DXbBT5#tIXc`xQKnbTO*S&wCnMM-x!f8!$Wn z&QX)Re&$KIA|lMZ^LhqEwb+=LIGO(-zbZO;dj^(WmjM=x% zPR#d)v*V<7o^vw)|C`v*>Je=k>+Mhb%}mT>@5I96$Sz(1Q}%Klfr$VI&e)TtsOTGr zxPWKM&t%3cl+$&~%~tmFH#aBS@iPb!bbPH|5J>N6x+}sy-I#Zc@^hAoZBTi4T>+#CxjfgR@5Dv1a{wLmX*aX;j7hWSCyw*B1qq z+4Lb6jy0?uY^Ti2L06?^s!$$xl0O1mm;==u2$7pWhn5ZtR5&SIPJ3tNsq(@k_d8ff z0Yx6_x|dtb`6Mt|ntU0eLTCz96GhXQ(#DK5bR<**8|O#7@S5%}Qf+R++Ilv|C&ZP- z)T4tn$7|D%`4>c{kTj#6tqIasLA9{|TSJh6nq?3B-DYQ<-BDgYMa>YUu=Y@-PNSt0 zNxA)49i5kUL63ns$}su1ISFZ&$l*mVN*)q{qPE-dZ8FJY4Jvj_n|9xGuK-ag^TzM< zp8@nL6B+mG=~(t9ZDJZvP}0JD>cTwW^(a&2#aI@L1#BWggq$s;L5F}Vka~)9jG1lg z^bRg$kQjmD&sRjK2jjZedZ27}wvts8`~-#Hzqmp`x71R;vI06>;|0WsL@N;4iK?!EQ1bj(8S1~Guip@)0v0zrKDe_A6 z9*4mDRib>hvb6Jb%|>A{HOmh;bC%(B-Jx_nCV1P;?R1|j<@MYUYt0rcSP+3=Ey$*J zmOpYDr}-3)cS<^&!MacE?=h2&9)&*f_NiVRwvKaPpenp8#vTz-@<>48mtq|m1ew(N zsjkR*&aB=i{gh|%Ev~x3H3ySsf%dOo&gb5aQ${@`JFY$z9SHw&hl5OC)hl8qC&#|6 zF6~H50lq)yGI?a5pLtwm&0&Ec5Ol7JEP(jmHab{B{%BgOrAaq4HY)INxb~-Z@GZli z^eTz%9%g*(z5TbBm*ERKuOLzp5;Q-@#y`_E@j(a7f>gCNw6bFci9p%;Sa(oT3JDk> zpB^Tffh|&#@oUw(Qt0!A0S6u21Vvw4YFs(97Po6c3pE~OjGpT^D}z4vPyFS2b`Fwx z)n*|n`B_@N^taYUk{Ut}$9m;vvoV>k;atf3QV=Cw2wLJus_k8v-F#X|*+at*9XwIf zOO+voS?%uwTh26J7*I4cRJGtxl38}~@c2cJC!dGzgd0wF?~{4!Z?mYIWKz_6zyF9} zo+VL6p+Bs#w^+4ZAuIqO$IzTh(w$ZF(Pk%Pzy+YQ%KfETW+WQYgU@CE@-^U-6n5FX zh&ffTMY{i6lv&?3LwB3as7{AiMIyj|Y10AJDa9{jsZ& zgrnys3zwU3cK&XBviSgH%~0{MFfd|0&H{9HD_+_>&;y9A1zrRXQcEV#r371+UYDCRTR1jQO=q$Ey~eCkfO=d6Bv^N{HNO9Cw5)64z=qez&!XD#32B(FN58|@ z+uqfuXB<%$`9|yUI`X`G<~Ci<6=|x9AbAIY6lItHhmlvhUA^>Jm{>u7&V9=Y@l#eS z4dWW!qA?lis1m>w|4@+rV0g`V^!NE!Uf~%htJ%pC`pKAt zdAI#b7>5mFf|@>i0r@bfkgIY6!N=!i(!o6&ii1lN=5dTRQK#1nISQC>PZ?I9-^;Y+ zvpVakFNFp@2ngEutoAdVJQA&XzvPC^`siS!yRzdTAWxa1A>_%8d2UqyEeTphI$=?g zHx>%KK=O=y^!QVx2v2(U*6E&v^mHG!W3eNLfWz=0YL~+6_|TzCXmxIKF)&dHMsa72 zTd;661%2Fm*fbN$Lz!nLwXuQ@iE&2B5w4mbt~X>KOk=(Djj1_j^O6C(hN z<+qSI$J5HlSAv0INj6yJbS^WUjgv_A4p2C}5TX-xa(_LV#yeGlDOoOBCpRyLrP;iTW28Zw1&_0(p&OwhMp zApaMHe{;cObUb)(4eiKnNno#UGJF@*|C-gu%Wd)H-3 z{FY!k{7pbvIopp(6}a}xe*cCBCe+Zll!&u;r2MBPRp_4$gvO)$bw>TC_92?@WgWk| zHs1XX0p|#)g#iW=pMPW74kGzRUbR+<2?-2TkB-59X3X#{7w%c~GrabO$0bO*zDhb+ zBh2Pt7~vR+^nyX5I*vZuoOpMmaPJ-z_<{D5%FV6%c-qw)I;1%bU;Z4i={#-17B9ki zqXWjtdrKOkvX87-sC#dx*sDIZ)3Xu&+N&f%Qggq2EcU$7mP?&b;q8{h(Y*htIZ-(| z+U5$S_$Sm4#l<7ws9qI3c8AK3x zoshn=&Ax&tew^-X`N4T2DE?Ndmmys;>SLYV%X96&A$vB?%9#B1%d2&5cyHf5`-x(c ze$EX${KuLcEJR#EtG-`^NC7``aQ=KY!3lsE?k!J`=%Qw40MFC7eB1`dz`~pLx3}Ma z4{}c3N1_)EPS~z6UO}lCJd#i#O!ebYpQPgCj{9vIk4xmkmVY_-rZWpx&SImTIB}st zR!S*UX0`}v-5HUN(3Mz9>!gym?k8XJ9uj1A2okrq z_sJOtpYICUXo9F zN`U1;^hYx->9^hN>FuUhm)Y4}sZ$B84HpFHEBa9{37I=XpFHJvb>xYr_n*AJO^G*g z$3Z>&zm-t+duwao`>41QqxHQ3rUM`&CYL4@#6Fp*Ynk~-IKe_iTZ56=hgJSm)6oGc z(j*>YFT2NxH5dKfO*?tLs@<^T`nVh#n%bw8In&ijo(OzqQXUG5%>%REEP1ZLyZf4# zH&S(vnW8$FpZNm=t(D;a%(8=a;$qT+r-L|w8~0PLznchCMzn%VXbBafH(U|zc^DOn zVdy2Cb-G_4u8~hY1d=bCBRqHHp#pe%@HRIi0qs%D5n)4 z$%&-LtB#ig!@uKHhsS9zp1bX$#R{O$z0(#V8IC(A-uD**`RKmA-{Er13lG1%jR};o4xF9F-Q(aMzr*Oxz$1PvvB;6wv#C>R@n5#Y{-|mI?fFgn<#;M zrjE4!S!5{d`NenPdp>wbb+@SJz!OM+w5u(4tIX^2N_|%?CVO~O-oZdDo3;*$MO#cF z`Dn_paCA@!@z`@TC^^Duz6lA)Dq>#U+!RtBTy`U(kmwl}pJTlec3{9}^mejYNfR$t z$V_(qc-Ac>h-rd@g9}(wGS1Gc3fMGlQCF_{l&O0uNpGI-Y01t)O@t`Z?p_91;LPsB zpMq^Xj?Rm7`<>qjW%rDS>*g`Eu0P$P~AMFJMw-0Lx(q65Yv#?{nfie#S=?aH6LkSjnufg zbm~#_zUZ}NK~cHu5sUp6=0App2wCMj?}!RA+@oU?iRycYx>HF@Bfct!PP44JiRB~Z>W3GIi`8VJC zCr?u-eV?y4^YeyYEhZ$tSEJpPU2sTEVI~_8>1*`dJ{_tO#bi#*Y1V{+#5K+CKItyx z+BIe#%ojLYQEttqaW{@D`z`Z7FaDCY@(Pi?_wqZesU4*4NVTt2y_ntjiSYZYa)m*g zMzZN9*OxUM#f0YPhVGk;1cH_JuIT*7ha0;NP50`X#?e9`G2{j9dMwJ0QWtx{e7}JH z;)wuvPAH_rMobKW*x#eanb9M!j(z@H%e{YND@GJ1=utO2r4MF`5jpp_J4D#frUsu? z6!RxvHALX1V5YTGSBl~<{PJRrJAm1E^>K5?O16{0`Uz&Wf>5Z#Mf={1?CDD)YD5aK(SteGq?$!mrri`nHe^>z75P ztgppqtFx43_({m3!Zz8NN~;<=@I0dyY>^`kl2I9Zt__ z=3pek<=z_N7xkLbdTL_s(1fzN6ys#&0JdN)@Q0*`tyOU)!9`QmO)o6)y(^pe|9 z`X1JgA(?k$m$xYh{S!=Op6a;ST%H-NuXXPqDYeW2$p#H5zxGq76iGl1S6_wU$x35W zZW9XRqe<=8q4UlLLZ5MIO4qBNr}P~4mn!lAA(;6}%2Jxgmi!J`B3#Q$TT*;b1kwr( zvV`W0=*rk5K$jMl@5mJgE#JwMB1Oo(YPm)_S`t@vcxj2~rMNurGW)QIrLgJ|wuD}@ zIxEv5ycDmVU`DR*jC6EqEBZ`A1aqPj`&05I>29wU1r{P*^2w1fZ+Avs^A|Ge20^AVao><2>WW>S}Tsp7d(25Ua;*1`1wRLD6@F zb`Mq5G)7nI?2{F5*KF@h0Q}No4mR@Acw&<}{gDp}y|y)|(Ue^7aP&(qR*L!=TU!;+ zLnsWxF^cW$95IX=YQ++d4*6W!zxrlYH38GioJI{ZpHvgJx;n?e((%kyZ;aF940X5i=iPgWNoM$ zu^PY=jz@YGhGSDrfYeXDVaz`9(5g~Fe_n?Xi`d-^2(GMD<;P=V4 zZa~a%x6D=^(lztUKbzQqqD)DmSDxPo2?_bK+T56G4E$Cd!#;CT2t+2gtcY6D`RMj# z#LS4##8%IeK&!Gu-9}HYT-U*LbHkDM#aV*c%Dkc^VOmuk_BYmZYU&Te%1=;oHFn;F z(PeukVNx{ptZBoz7&;DQ1}=0(&=>ZH5~_?0F5P9U3a4rnP|*)=|X=y@t5e&22j!wZg-H zWXKQ@SeX@9N%keHv+ijYJ~!Y08YH2lSE$axtQ2gxhZymr zT8xjpXq2;;W7s$w`GqjYr2vdh5atmkupbvE=e(B-!^5XwAhwvkjRJE^K-#3 z$)Wvi!WjxK#Yb+(y~{lsC`_G9a`5H#RP6u50(d+pDpGCJurrV&P<#?Y3l@a7Gm#9{ z-x-MzN29*?c&|c-CFprwR`)P5;pX%X*t*zN4Q3;)UuL?zS#6BD=OQu-k~diBA6`(T zpiZiL6bN9_u zEAv%bQhWyLt!xO%7wzQv$|rIJxj zOIG%ScbmuaaR0$PE%>&fwPB#py@?_~j93LRp)6nwPq_KQun_%7H)7i!G6CkPP1s~^YkOxE0o9cq_xB^4j~j)_ zrMq4ZO)~hn+%Y)r%mLWf$1%gm2V` z=Ugt8B^ICtSVM~reDg~^2;#V2@VVp{3&Wlk-D+29dhfgD`_YvImWe#e%UZV%`k{_K zyhFxc!w8qCixW81?HRSRVFKZk-~YEd{Jh@NHni2Ha*<$D5xa3&{kbM#jT@yTf($AvNm7*BVBGC~2t*gv(Rw9bJ5Hes7cK z_)SXIub;lM+P-kTlpa~O7f?`gx2XQJFMPE48(>f`I6&`V&w89#rxW27z{{0#u=o&u zOyssTE8aoS7_AZQdwBHc=c{}1k=3lnsFz#?oEUQ8&cR{3#09)ofpUFfT%01~^E$b- zfZOR>FOhE=zvuOOXUKo(Z`(fQE0-H|JOkqA0@?Q0yArihU@xgrt{oo}BQpKa7Z)FI zv)o))U!Q%!B(9`Usn73vtQ!`N4*D@v-~|rtjrUlSUTwS%B{x`I!^j1K-Om*a)aLTk zrq#1VPfBB-*>E8|P*@a#h^i8~qb+MAYiRpZf@NoRhvaR8r^N>Le-c9m4Mr<#bD5Ny z$;tQEkww+}-Wy{?_kjE8xE`5X^RV!OHoe$iw>ffH*KxeSIk$+97X1EapxXW$(MM1G zw(G`~CbY%&N#Fa{#h(6UI7Lp>RC(`Q-xdFTQiMbY6@(rve!Fvr#C9}qm!FgEFu`^%W zWf(ZjCY(Wm2@Dk{@wL^pE$)OM{h5mAO^~>=OJna^8sc-hr4wHJw%Oui$nN+b0t8c> zfX{7gbybjzOq34GpxKF)OUH`>a`3;;k9@G+Q=*$fgQ zriTZOdrp@!=2WRB`$!LH?o*-UYPV*fXn${Dt@blz~kDezDj&(DhwOx z2p`Et|J``IS*?h#em@;-acxnwgkj;NdMA^1{L1-TrpqBPIPC*V*jJ@En*#Wntt2q~-c2$?h(t zgJc&Lyxlu$-Q^03Zyo0Y3^;pI=jQef|B7=+(VlMgW9aDUUkWwx1l}zVxTj9sJaTu)TsO*Q=Xl2q*sHC929{dC7joE{v0GTl$`T($Cn^ruw5a@$wz$jt~>fQn((zZD`yK*z*n zv!43}R05>)U!LPY)9gQhB_kutEh++QR8p_SC1?Q=_O2`2d@y`|2n=-<6WAf*V+b%Vd zRAL*Y#KyAPuL%W-tREhFf_Hg8eh=&iK1oYg0n1lgfPn-MEHx_3#d@1bTY$JFlZ;&I z@O=T#Y`Iv45`3`)Ttv`P8)U)>cQEPwSoOV@7MtDD*~+!h#@|R&v%yxF@R-_MP+JTB zWD%{;Y7abUK;{8|0<0xf=)mys(|QOI5)u*@{)mQwcO5VS2S}TlsVN8qux>i;cJLGw zM&X4G!D|wHJncN`c&X5vgoA_=^527L3^OzHD|jR!rCYggLLdVJWFOF;HWOo-#HbX~ znt|Z=cK-bvz_)ag&U>LWz~V;fC!`;kk+H(q@dzNHQ{_6GsHjmSqQPri?WXY1F&KLv(U|pfVp6Kw?Bf$eS)MFZVv*`XNh!@h%7p z3+t+_3ouN;eVVZ(|NQy0xR^R}^B?Gmm4zid0x-s_t^tPzNkc=U+<0(w6bz-NrfQ4B zV#q;M>t44{;f9Cx_loN?t@eJnNi1SRdG^N%gb^@e4DSJf=C;NhLq&P1)1%&4ZpigG zX#BG!tuV0-9$^$P)&wL$;2eI&0B{iL>3H}%hle`8Vadr_7ue!TQ!`jtlwgI6&sksh zy(hbt>^0R_sJ%h$!j)6Edxrbt(CVp)lU3ApJ4v|gS;xLw-2h*qCqj${Kmk}ltpQ|( zoTeM76_kI)hv3(^00i;|;@Z$+z7cmGm3NE&Z@L8s9+nb#-{03ek#TMV4}W~58q|mH zJp*8MxW*QWOo9A+s!+y1b^m&`fPTnuX+8hEx91ykqV065EOn^eBFQ-3tZs=D49rBr za*C`96E(a~E`W~x8(bRx3AnF50x<-Du>c@>f!?yt1Gu(;We6wibT|VAM+ZH- zI>MGof$z(+k9?%7e5NXmK&z~55tu^v6-MeZWU`veD=SYFs}xRIUI86-V`Jk4IxUeH zQvhu>&-9i+>jDM9wB;30uV&C~Je}4sppKVmh5{13mw@IX*pnvPd>tLzLk&>8LNGBOn7|qJ+5TM65xd1^q!sqE8Fa!AK zrlzJwM*-LWhtJ{hu>jB%08vL-yAq!T6r_Sak08<9{>lwm1CVv#2@Qy}SBxLAus95V zL++i`)e}-vQ-z4#Dmy`QnE>E=KzNtLE{waKl$n1+Hu7ohuMRums$Mh;yTa=Li2PxLBWcbJ97I7P<8F!CX<$O_91^Rqm zAi%qTBZNSUZU*38TL6J#EffS=L0#7(jHgXBcN2Ochx#3&)ZS2}^pXm^8y7QFW(3D}P)!A3xUt>M?N z?~#!LUbnxRn(n}%y59tt{8tyFg1x=HMBHD=fmE<)2nwx61;{`DGKG^%gTn+Q#`omb z9RHkrCkx~zgAZx|M5;5`%-mc@ON+fRMQVI?wYse#bfI?|TGK z!DVnJ8BE}Hb;{)@4B(SRP`4*JWvm{^96x>>ocZ9;1Lj6$Ma5jZIbfuG&(VUhb2qKE zvC(~ZJp*j_;3IwgL`bYd=TgVec3b-t@goH*t17tRBl3So-kG5iy-Y22_W`T*9&rM*d~iS2qf3wU^@m!6?reQcAf#;H zpR1*#b~*W4vUo!ZFYWLpEh-HfXX(W;9%k2xaqpyb2m?mno_&$7c@@H`x>k3|1k@35|fHxSrd7UJhOR=?=-t8Zi#y z8g(WnFOxF!b3E3F8?|cFv-z?52w+%4w%>Giy7fkWSaQ+qFc~-0-!wO$^gcoZ5L1vI z0MmhkLx4Sprr&iIaB95(ssyY}?#dc2G`YtdcRd@se*6Y-kb9};7uyhwQ_ z4|`G!mxiRrmE$mi+>yP_7VRhF^O7Qh({~okIqKnMEsk4Zq~vnw`+oW!3IQ$#US3|> zS3Md(00{$I0#_v@S{oXkJ3CLV?QDcH(9;9H=yRX|<55v~^WZOmL9w;0WjuO}fk8xA zxUB)$(7*tPpXNH?!S;VCvMo^_Gtm$h$RgC$-;W9P+4HTYEe;8robFC_J$8~!+QBf| z&Om>t=yIo;h88+#-bUxXzWxR6eyylSw^Q!^X)$^YI$j%b(GVxP(;hYz^B%^3#)f+E zCJ6*ob?K!**2{Ag@u|N47IQNT3=UP~eIjMNJ+|+mPee7-Gcb%`F<9kfX{a{+{r&Uu z9LCp7|DIZ0l!YPy?sTLQ&HCCF%it6or+A`BKbsnHPN2zH^xY~>d}X?)wnUkMy#q`B z9*qf5e;Qq6R^wpaEgt#zYd?Q~H1Zd3qFisP#sA!t&tpM8)%=tc#d5!-^zH3!D6{LU zt4AS90N7-IWs5MP!U(t>s}f&dUx0Rv4?e?YV_&f`Gdn}^0VNp(9R^OBLhs^ktE+JI z{si@SLAzY6oF@vH4`;Mf{VPtlaPC#yeK=UcDVUwv^37q_x=hZrqh>feRNW`Db4Txukk9^sOx{V4hUYdjGlMmZbe;or6pSkY)DFBvu9~kP zgkS^`k*;k(kXL`dFoGq+VgEC6H^9ONi$SUf7Tp1-}*$~nPkBfFpfLIha$sP1aM1E2e0>~&#CGj0>zm#|7Me1 z$QmzUe*tmMm(iw3hL_h|VIe&;DY{_X2lVC--k8A zZJ?g>zpoAYA}G_vF!Ga!a^3o!Na%hM(igA$t=w47S^fT-ZU&ChWdJX^D;RC zO>OZ=3X1ojm(_|BK|HXm<4i*+IEv=~~GuV&-UGvY2iCX~fsIPp)4`Llmr{rFT zY%NK_o15|9wJI}b2A_V;Y^^{&Z>F8A2{u#*n_3)hh}htL2Tl% z+#!7ps|XubfbPF)hby!n$1S=Y)p_@THGA+Q4- z6gqockdo4-J;lW(()_~EFrm}R$u+&xiUmq9SnR-Y&+N6~_8oYte*5-udiq64CkKZW z2*RNLh$}9HPxmt@_+)|Gp=qdrX{lInWB>gTdKt&5PXi)P-}?^j==gX6$U-R8Ai@Z9 zb90M{b$Q-?KMBq`Fn|Wup7&ke+Jct=cvOLl7ad#$RJ&Fuo54Ij{Ogy1FH{A}*47!w z(Dxo zcAbI#V8004ZM>?`QzR$x*s)_-TG?Xk!N3e!l7ylh452{sg_h;%=>jq`DA@coK!VkV zFM{&2A%_sM&aC}KnX)s%-@ji^>z>u!dXWXq6moN6->w1-t0kLIFa#uh6OR*Y1`jJ0 z6%~+8XPwGovvrGXVAl^i#}KNB_P`qJZ{ha^P%lKv!)@5>U^;hq-|?ek-JROsJ##RN zl@%2gVIG2u+R`#@{uUI3pvD1823==*e*J)S)d_o_+u_gvq!{E0g)0juxxumskzC_h!N}KoE?70Mm6LS} zELtNp&mKUguV9Jk=mvvh<=2iq(FceF$;}fiGYGpM81u{MLO=IJajOVXi zb8Cqf%{gnP1IU1!^mM3MfP(|TJvurFy!{n50;(W+^x-vejhK+bP#(djAzG2%kSMVe zuflf(fmoKK4Cc$NgE&?ZdJs!IDffgR90a@}|4rLRsY6j3{e|mp#idi@6+-UQ^MbMo zB=&==xP7g%%HNiVTCUbySobN^UHr&>p(=JGVp@KGZ3E3NjY}|kUs{qcD_KEQ=`*1A zuYcSY%Vm)FsnG?K{6L%qKA7*(K8tc1&8J&@5-RMzKoe27oo?@mDqDBO4wG0nZW1{5 zeycFhl*hzxw-z*>Imxx1ZZ_5M9W8%)GSx%JAm*wmzA$l-Sljd=;V}49SA^8Xy<REQQ7gdYUt@A z?(XxJo}iSY83nQ!684KHd2_&#MrBUcldo}0m&AstuV>LO7R)YEmd^JFc2ew% zJ_-snMEp+nFA)!qxvpNMt#N+cR-&csm`I?!r{Ss9;ZkpQ9a?VRls+pz#vj&bj3vH! zi@f?HmwaVy^m#uod*#UT_VO#bsQ9zkd#aQPb7c4k3KKbOVn_J4re`f*=s39f<2d&_ zJ)wSGj#FfVwE;IH69rHi&cf1HoGJjkJ7 zB^!IB=nCb@S&^$qs-&vI%)Z&(>{WM@wPFfWE-5)TS9Hkk^EDi@$fjuKug0n`!sy)( zto}T*8$X&#eaimXe#fIKWXmBL+dcl@cgVU{$)(Q5Z05Dr8W^0Tn|q1;7CydtO&`cH zmxe8G<89nE_KfWv%hBoV)UwqtN$tbnE{n*qEoIH(K8-m&epa+gz2mm87*1IS)H2T3 zhv;IpkYp$0+S3}CZB27huWdi}o6F=GM#VA&f~rYX@y5fhG~$RDX;4I-Hk6Y!iCH22 zY|vS+#dd9^OuPbZt=HOORN}w`-pcL7*l`7?;&OdD)A-_$tc{H@hf@ibVw)7TaEtsd z&n3-b8GEKgSRE}dAgGwjVkguJx_Wmgac{S@R};m}WR3dqaK5dk*RHtniB0oolkO0C zoUANN)P<$t>wS?V3s3dkmV#5w8l)w62BR{j1%66! ziAMFd7jJtOKbx=WcCl>F(hV59X0Tj`iSH-X*KwiGHXJ>es*XOUe_@K8H*$uY>$76f zPdnBJ@+=))bHzwv(sIjHsHp*Q4M{~ev&xDI}wcpwJ +
+The original GAN paper. +

+ # Conditional-GAN should be a class. ### Class member function: the initializer. ```python @@ -21,7 +25,7 @@ class DCGAN(object): self.z_dim = z_dim # input noise dimension # define parameters of discriminators - self.D_W0 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) + self.D_W0 = pd.Variable(shape=[3,3, 1, 128], data=pd.gaussian_normal_randomizer()) self.D_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data self.D_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer()) self.D_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a numpy data @@ -51,7 +55,7 @@ def generator(self, z, y = None): G_h0_bn = pd.batch_norm(G_h0) G_h0_relu = pd.relu(G_h0_bn) - G_h1 = pd.fc(G_h0_relu, self.G_w1, self.G_b1) + G_h1 = pd.deconv(G_h0_relu, self.G_w1, self.G_b1) G_h1_bn = pd.batch_norm(G_h1) G_h1_relu = pd.relu(G_h1_bn) From 395051512dbaaa8baa4570f8bac10da152bb68ad Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 3 Oct 2017 16:56:50 -0700 Subject: [PATCH 189/342] remove device context manager --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/executor.cc | 12 ++-- paddle/framework/executor.h | 3 +- paddle/platform/CMakeLists.txt | 2 - paddle/platform/device_context_manager.cc | 68 ----------------------- paddle/platform/device_context_manager.h | 58 ------------------- 6 files changed, 7 insertions(+), 138 deletions(-) delete mode 100644 paddle/platform/device_context_manager.cc delete mode 100644 paddle/platform/device_context_manager.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 898b3a990d..dde96d19e4 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -44,7 +44,7 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS op_registry device_context_manager scope framework_proto ${GLOB_OP_LIB}) +cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto ${GLOB_OP_LIB}) if(WITH_GPU) nv_test(executor_test SRCS executor_test.cc DEPS executor) else() diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 717f9bf81a..766945db9b 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -25,14 +25,12 @@ Executor::Executor(const std::vector& places) { device_contexts_.resize(places.size()); for (size_t i = 0; i < places.size(); i++) { if (platform::is_cpu_place(places[i])) { - device_contexts_[i] = platform::DeviceContextManager::Get() - ->GetDeviceContext( - boost::get(places[i])); + device_contexts_[i].reset(new platform::CPUDeviceContext( + boost::get(places[i]))); } else { #ifndef PADDLE_ONLY_CPU - device_contexts_[i] = platform::DeviceContextManager::Get() - ->GetDeviceContext( - boost::get(places[i])); + device_contexts_[i].reset(new platform::CUDADeviceContext( + boost::get(places[i]))); #else PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); #endif @@ -63,7 +61,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, } // TODO(tonyyang-svail): need to test gpu device - for (auto device_context : device_contexts_) { + for (auto& device_context : device_contexts_) { device_context->Wait(); } } diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 795b8ffdab..d5c21c59fe 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -18,7 +18,6 @@ limitations under the License. */ #include "paddle/framework/op_info.h" #include "paddle/framework/scope.h" #include "paddle/framework/tensor.h" -#include "paddle/platform/device_context_manager.h" namespace paddle { namespace framework { @@ -30,7 +29,7 @@ class Executor { void Run(const ProgramDesc&, Scope*, std::vector*); private: - std::vector device_contexts_; + std::vector> device_contexts_; }; } // namespace framework diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index b4ddf721dd..daf519b91d 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -23,7 +23,5 @@ cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) -cc_library(device_context_manager SRCS device_context_manager.cc DEPS device_context) - nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) diff --git a/paddle/platform/device_context_manager.cc b/paddle/platform/device_context_manager.cc deleted file mode 100644 index 156d317c8a..0000000000 --- a/paddle/platform/device_context_manager.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/platform/device_context_manager.h" - -namespace paddle { -namespace platform { - -DeviceContextManager::DeviceContextManager() { -#ifndef PADDLE_ONLY_CPU - device_count_ = GetDeviceCount(); - cuda_contexts_.reserve(device_count_); - for (int i = 0; i < device_count_; i++) { - cuda_contexts_[i] = nullptr; - } -#endif -} - -template <> -CPUDeviceContext* DeviceContextManager::GetDeviceContext< - CPUPlace, CPUDeviceContext>(const CPUPlace& place) { - if (!cpu_context_) { - cpu_context_ = new CPUDeviceContext(place); - } - return cpu_context_; -} - -#ifndef PADDLE_ONLY_CPU -template <> -CUDADeviceContext* DeviceContextManager::GetDeviceContext< - GPUPlace, CUDADeviceContext>(const GPUPlace& place) { - int gpu_id = place.device; - PADDLE_ENFORCE(gpu_id < device_count_, - "GPU device id must less than device count"); - SetDeviceId(gpu_id); - if (!cuda_contexts_[gpu_id]) { - cuda_contexts_[gpu_id] = new CUDADeviceContext(place); - } - return cuda_contexts_[gpu_id]; -} -#endif - -DeviceContextManager::~DeviceContextManager() { - if (cpu_context_) { - delete cpu_context_; - } -#ifndef PADDLE_ONLY_CPU - for (int i = 0; i < device_count_; i++) { - if (cuda_contexts_[i]) { - delete cuda_contexts_[i]; - } - } -#endif -} - -} // namespace platform -} // namespace paddle diff --git a/paddle/platform/device_context_manager.h b/paddle/platform/device_context_manager.h deleted file mode 100644 index da15808a60..0000000000 --- a/paddle/platform/device_context_manager.h +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/platform/device_context.h" - -namespace paddle { -namespace platform { - -template -struct Converter; - -template <> -struct Converter { - using DeviceContextType = CPUDeviceContext; -}; - -#ifndef PADDLE_ONLY_CPU -template <> -struct Converter { - using DeviceContextType = CUDADeviceContext; -}; -#endif - -class DeviceContextManager { - public: - DeviceContextManager(); - ~DeviceContextManager(); - - template ::DeviceContextType> - DeviceType* GetDeviceContext(const PlaceType& place); - - static DeviceContextManager* Get() { - static DeviceContextManager inst; - return &inst; - } - - private: - CPUDeviceContext* cpu_context_; -#ifndef PADDLE_ONLY_CPU - int device_count_; - std::vector cuda_contexts_; -#endif -}; -} // namespace platform -} // namespace paddle From 81fc7774ec02e23163014e66eeddeea43e5dd703 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 3 Oct 2017 16:59:32 -0700 Subject: [PATCH 190/342] optimize infershape context --- paddle/framework/operator.h | 64 ++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 5bb5c8e2f9..99f721cc67 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -323,74 +323,76 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block) : op_(op), block_(block) {} - bool HasInput(const std::string& name) const { + bool HasInput(const std::string& name) const override { const std::vector& input_names = op_.Input(name); PADDLE_ENFORCE_EQ(input_names.size(), 1UL, "Inputs(%s) length is not 1", name); return block_.HasVar(input_names[0]); } - bool HasOutput(const std::string& name) const { + bool HasOutput(const std::string& name) const override { const std::vector& output_names = op_.Output(name); PADDLE_ENFORCE_EQ(output_names.size(), 1UL, "Outputs(%s) length is not 1", name); return block_.HasVar(output_names[0]); } - bool HasInputs(const std::string& name) const { + bool HasInputs(const std::string& name) const override { const std::vector& input_names = op_.Input(name); - PADDLE_ENFORCE_GT(input_names.size(), 0UL, "Inputs(%s) length is 0", name); + PADDLE_ENFORCE(!input_names.empty(), "Inputs(%s) length is 0", name); for (auto& input : input_names) { if (!block_.HasVar(input)) return false; } return true; } - bool HasOutputs(const std::string& name) const { + bool HasOutputs(const std::string& name) const override { const std::vector& output_names = op_.Output(name); - PADDLE_ENFORCE_GT(output_names.size(), 0UL, "Inputs(%s) length is 0", name); + PADDLE_ENFORCE(!output_names.empty(), "Inputs(%s) length is 0", name); for (auto& output : output_names) { if (!block_.HasVar(output)) return false; } return true; } - DDim GetInputDim(const std::string& name) const { + DDim GetInputDim(const std::string& name) const override { std::vector ddims = GetInputsDim(name); PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Inputs(%s) length is not 1", name); return ddims[0]; } - void SetInputDim(const std::string& name, const DDim& dim) { + void SetInputDim(const std::string& name, const DDim& dim) override { SetInputsDim(name, {dim}); } - DDim GetOutputDim(const std::string& name) const { + DDim GetOutputDim(const std::string& name) const override { std::vector ddims = GetOutputsDim(name); PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Outputs(%s) length is not 1", name); return ddims[0]; } - void SetOutputDim(const std::string& name, const DDim& dim) { + void SetOutputDim(const std::string& name, const DDim& dim) override { SetOutputsDim(name, {dim}); } - AttrReader Attrs() const { return AttrReader(op_.GetAttrMap()); } + AttrReader Attrs() const override { return AttrReader(op_.GetAttrMap()); } - const std::vector& Inputs(const std::string& name) const { + const std::vector& Inputs( + const std::string& name) const override { return op_.Input(name); } - const std::vector& Outputs(const std::string& name) const { + const std::vector& Outputs( + const std::string& name) const override { return op_.Output(name); } private: - DDim GetDim(const std::string& name) const { + DDim GetDim(const std::string& name) const override { return framework::make_ddim(block_.Var(name)->Shape()); } - void SetDim(const std::string& name, const DDim& dim) { + void SetDim(const std::string& name, const DDim& dim) override { block_.Var(name)->SetShape(framework::vectorize(dim)); } @@ -403,21 +405,21 @@ class RuntimeInferShapeContext : public InferShapeContextBase { RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope) : op_(op), scope_(scope) {} - bool HasInput(const std::string& name) const { + bool HasInput(const std::string& name) const override { auto ipt = op_.Input(name); auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; } - bool HasOutput(const std::string& name) const { + bool HasOutput(const std::string& name) const override { auto ipt = op_.Output(name); auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; } - bool HasInputs(const std::string& name) const { + bool HasInputs(const std::string& name) const override { auto inputs = op_.Inputs(name); - if (inputs.size() == 0UL) { + if (inputs.empty()) { return false; } for (auto& input : inputs) { @@ -428,9 +430,9 @@ class RuntimeInferShapeContext : public InferShapeContextBase { return true; } - bool HasOutputs(const std::string& name) const { + bool HasOutputs(const std::string& name) const override { auto outputs = op_.Outputs(name); - if (outputs.size() == 0UL) { + if (outputs.empty()) { return false; } for (auto& output : outputs) { @@ -441,29 +443,31 @@ class RuntimeInferShapeContext : public InferShapeContextBase { return true; } - DDim GetInputDim(const std::string& name) const { + DDim GetInputDim(const std::string& name) const override { return GetDim(op_.Input(name)); } - void SetInputDim(const std::string& name, const DDim& dim) { + void SetInputDim(const std::string& name, const DDim& dim) override { SetDim(op_.Input(name), dim); } - DDim GetOutputDim(const std::string& name) const { + DDim GetOutputDim(const std::string& name) const override { return GetDim(op_.Output(name)); } - void SetOutputDim(const std::string& name, const DDim& dim) { + void SetOutputDim(const std::string& name, const DDim& dim) override { SetDim(op_.Output(name), dim); } - AttrReader Attrs() const { return AttrReader(op_.Attrs()); } + AttrReader Attrs() const override { return AttrReader(op_.Attrs()); } - const std::vector& Inputs(const std::string& name) const { + const std::vector& Inputs( + const std::string& name) const override { return op_.Inputs(name); } - const std::vector& Outputs(const std::string& name) const { + const std::vector& Outputs( + const std::string& name) const override { return op_.Outputs(name); } @@ -484,11 +488,11 @@ class RuntimeInferShapeContext : public InferShapeContextBase { return t; } - DDim GetDim(const std::string& name) const { + DDim GetDim(const std::string& name) const override { return GetTensor(name)->dims(); } - void SetDim(const std::string& name, const DDim& dim) { + void SetDim(const std::string& name, const DDim& dim) override { GetTensor(name)->Resize(dim); } From 324876bbbfb0dd84f2172f951a2a4880bee32df4 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Tue, 3 Oct 2017 17:26:02 -0700 Subject: [PATCH 191/342] Changing learning rate from type Input(float) to Input(tensor) (#4578) --- paddle/operators/sgd_op.cc | 3 +++ paddle/operators/sgd_op.h | 2 +- python/paddle/v2/framework/tests/test_sgd_op.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 8f9eae4186..1a4d3fb8c5 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -32,6 +32,9 @@ class SGDOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("param_out"), "Output(param_out) of SGDOp should not be null."); + auto lr_dims = ctx->GetInputDim("learning_rate"); + PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, + "Learning rate should have 1 element"); auto param_dim = ctx->GetInputDim("param"); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("grad"), "Two input of SGD Op's dimension must be same."); diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 977d201ced..e2ae65beb0 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -31,7 +31,7 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("param"); auto grad = ctx.Input("grad"); auto param_out = ctx.Output("param_out"); - float lr = *ctx.Input("learning_rate"); + float lr = ctx.Input("learning_rate")->data()[0]; param_out->mutable_data(ctx.GetPlace()); diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index f1125f4edb..c05364490f 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -8,7 +8,7 @@ class TestSGDOp(OpTest): self.op_type = "sgd" w = np.random.random((102, 105)).astype("float32") g = np.random.random((102, 105)).astype("float32") - lr = 0.1 + lr = np.array([0.1]).astype("float32") self.inputs = {'param': w, 'grad': g, 'learning_rate': lr} self.outputs = {'param_out': w - lr * g} From ab9545aa95fb482e7b51b58e0abe2191c9ef3bea Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 4 Oct 2017 00:44:07 -0700 Subject: [PATCH 192/342] add shape_inference_map --- paddle/framework/CMakeLists.txt | 4 +- paddle/framework/op_registry.h | 4 ++ paddle/framework/shape_inference.h | 1 + paddle/framework/shape_inference_map.cc | 57 +++++++++++++++++++++++++ paddle/framework/shape_inference_map.h | 56 ++++++++++++++++++++++++ 5 files changed, 121 insertions(+), 1 deletion(-) create mode 100644 paddle/framework/shape_inference_map.cc create mode 100644 paddle/framework/shape_inference_map.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index a2efcdb55c..986b45451f 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,8 +26,10 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) +cc_library(shape_inference_map SRCS shape_inference_map.cc DEPS op_info operator) + cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) -cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) +cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info shape_inference_map) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4ee2c7d275..f04b6c503a 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -26,6 +26,7 @@ limitations under the License. */ #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" +#include "paddle/framework/shape_inference_map.h" namespace paddle { namespace framework { @@ -54,9 +55,12 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; + ShapeInferenceMap::Instance().CreateOpWithKernel(reg.info, op_type); // register gradient op if (!grad_op_type.empty()) { OperatorRegistrar grad_reg(grad_op_type.c_str()); + ShapeInferenceMap::Instance().CreateOpWithKernel(grad_reg.info, + grad_op_type); } } diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index bc8af0eb3e..ac6f238638 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include "paddle/framework/attribute.h" #include "paddle/framework/ddim.h" namespace paddle { diff --git a/paddle/framework/shape_inference_map.cc b/paddle/framework/shape_inference_map.cc new file mode 100644 index 0000000000..1a27037221 --- /dev/null +++ b/paddle/framework/shape_inference_map.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/shape_inference_map.h" + +namespace paddle { +namespace framework { + +static VariableNameMap ConvertOpProtoVarsToVarNameMap( + const google::protobuf::RepeatedPtrField& op_proto_vars) { + VariableNameMap ret_val; + for (auto& var : op_proto_vars) { + ret_val[var.name()] = {}; + } + return ret_val; +} + +static ShapeInferenceMap* g_shape_inference_map = nullptr; + +ShapeInferenceMap& ShapeInferenceMap::Instance() { + if (g_shape_inference_map == nullptr) { + g_shape_inference_map = new ShapeInferenceMap(); + } + return *g_shape_inference_map; +} + +void ShapeInferenceMap::CreateOpWithKernel(const OpInfo& op_info, + const std::string& op_type) { + const VariableNameMap inputs = + ConvertOpProtoVarsToVarNameMap(op_info.Proto().inputs()); + const VariableNameMap outputs = + ConvertOpProtoVarsToVarNameMap(op_info.Proto().outputs()); + auto* op = op_info.Creator()(op_type, inputs, outputs, {}); + auto* op_with_kernel = dynamic_cast(op); + auto it = op_shape_inference_map_.find(op_type); + if (it != op_shape_inference_map_.end()) { + PADDLE_THROW("OpWithKernel(%s) is already registered for infer_shape", + op_type); + } + if (op_with_kernel != nullptr) { + op_shape_inference_map_[op_type] = op_with_kernel; + } +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/shape_inference_map.h b/paddle/framework/shape_inference_map.h new file mode 100644 index 0000000000..fb12669026 --- /dev/null +++ b/paddle/framework/shape_inference_map.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include "paddle/framework/op_info.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/shape_inference.h" + +namespace paddle { +namespace framework { + +class ShapeInferenceMap { + public: + static ShapeInferenceMap& Instance(); + + const OperatorBase* GetOperator(const std::string& op_type) { + auto it = op_shape_inference_map_.find(op_type); + if (it == op_shape_inference_map_.end()) { + PADDLE_THROW("op with kernel for Op(%s) is not registered", op_type); + } + return it->second; + } + + void CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type); + + OperatorWithKernel* GetOpWithKernel(const std::string& op_type) { + auto it = op_shape_inference_map_.find(op_type); + if (it == op_shape_inference_map_.end()) { + return nullptr; + } + return it->second; + } + + private: + ShapeInferenceMap() = default; + DISABLE_COPY_AND_ASSIGN(ShapeInferenceMap); + + std::unordered_map op_shape_inference_map_; +}; + +} // namespace framework +} // namespace paddle From eed2c1e1d6237f421c9b8c0bbd2fd51d53beddcf Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Wed, 4 Oct 2017 09:29:13 -0700 Subject: [PATCH 193/342] Changing SGD inputs and outputs to conform to Operator naming convention (#4586) --- paddle/operators/sgd_op.cc | 32 +++++++++---------- paddle/operators/sgd_op.h | 8 ++--- .../paddle/v2/framework/tests/test_sgd_op.py | 4 +-- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 1a4d3fb8c5..31d491f130 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -23,22 +23,22 @@ class SGDOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("param"), - "Input(param) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("grad"), - "Input(grad) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("learning_rate"), - "Input(learning_rate) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("param_out"), - "Output(param_out) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(Param) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(Grad) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of SGDOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of SGDOp should not be null."); - auto lr_dims = ctx->GetInputDim("learning_rate"); + auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, "Learning rate should have 1 element"); - auto param_dim = ctx->GetInputDim("param"); - PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("grad"), + auto param_dim = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), "Two input of SGD Op's dimension must be same."); - ctx->SetOutputDim("param_out", param_dim); + ctx->SetOutputDim("ParamOut", param_dim); } }; @@ -46,10 +46,10 @@ class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("param", "input parameter"); - AddInput("learning_rate", "learning rate of sgd"); - AddInput("grad", "input gradient"); - AddOutput("param_out", "output parameter"); + AddInput("Param", "Input parameter"); + AddInput("LearningRate", "Learning rate of SGD"); + AddInput("Grad", "Input gradient"); + AddOutput("ParamOut", "output parameter"); AddComment(R"DOC( Simplest sgd algorithm. diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index e2ae65beb0..d72d333a9a 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -28,10 +28,10 @@ template class SGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto param = ctx.Input("param"); - auto grad = ctx.Input("grad"); - auto param_out = ctx.Output("param_out"); - float lr = ctx.Input("learning_rate")->data()[0]; + auto param = ctx.Input("Param"); + auto grad = ctx.Input("Grad"); + auto param_out = ctx.Output("ParamOut"); + float lr = ctx.Input("LearningRate")->data()[0]; param_out->mutable_data(ctx.GetPlace()); diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index c05364490f..2dd881e5e1 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -10,8 +10,8 @@ class TestSGDOp(OpTest): g = np.random.random((102, 105)).astype("float32") lr = np.array([0.1]).astype("float32") - self.inputs = {'param': w, 'grad': g, 'learning_rate': lr} - self.outputs = {'param_out': w - lr * g} + self.inputs = {'Param': w, 'Grad': g, 'LearningRate': lr} + self.outputs = {'ParamOut': w - lr * g} def test_check_output(self): self.check_output() From 5f51d0afc49f4bd4c624ec62aa1e3ccd31840aee Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 4 Oct 2017 10:00:39 -0700 Subject: [PATCH 194/342] Add -D PADDLE_WITH_CUDA in cmake/configure.cmake --- cmake/configure.cmake | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 51c3b918cc..4e044ca421 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -49,11 +49,16 @@ if(NOT WITH_GOLANG) endif(NOT WITH_GOLANG) if(NOT WITH_GPU) + # Will gradually remove uses of PADDLE_ONLY_CPU in source files, + # so could we remove -DPADDLE_ONLY_CPU. + # c.f. https://github.com/PaddlePaddle/Paddle/issues/4588 add_definitions(-DPADDLE_ONLY_CPU) add_definitions(-DHPPL_STUB_FUNC) list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) else() + add_definitions(-DPADDLE_WITH_CUDA) + FIND_PACKAGE(CUDA REQUIRED) if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) From 37b0bb15973632fca96fef31c8a5b30a78a80042 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 4 Oct 2017 11:25:17 -0700 Subject: [PATCH 195/342] Fix compile errors --- paddle/framework/backward.cc | 54 ++++++++++++++++++----------------- paddle/framework/block_desc.h | 5 ++-- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 4c76326e7c..a84262e007 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -14,9 +14,11 @@ #include "paddle/framework/backward.h" +#include #include #include +#include "paddle/framework/block_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -254,23 +256,22 @@ static bool AllGradInSet(const std::vector& names, std::vector> MakeGradOpDescs( const std::unique_ptr& op_desc, - unordered_set& no_grad_vars) { + std::unordered_set& no_grad_vars) { std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - if (AllGradInSet(op_desc->InputArgumentNames(), kGradVarSuffix, - no_grad_vars)) { + if (AllGradInSet(op_desc->InputArgumentNames(), no_grad_vars)) { return grad_op_descs; // empty vector } // All output gradients of forwarding operator do not need to calculate. - const std::vector& outputs = op_desc->OutputArugumentNames(); - if (AllGradInSet(outputs, kGradVarSuffix, no_grad_vars)) { + const std::vector& outputs = op_desc->OutputArgumentNames(); + if (AllGradInSet(outputs, no_grad_vars)) { for (const std::string& name : outputs) { no_grad_vars.insert(GradVarName(name)); } return grad_op_descs; // empty vector } - grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc); + grad_op_descs = OpRegistry::CreateGradOpDescs(*op_desc); std::list> pending_fill_zeros_ops; for (auto& desc : grad_op_descs) { @@ -280,43 +281,43 @@ std::vector> MakeGradOpDescs( 0, in_name.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); std::string new_name = prefix + kZeroVarSuffix; desc->Rename(in_name, new_name); - OpDescBind* fill_zeros_op = new OpDescBind( - "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {}); - pending_fill_zeros_ops.push_back({fill_zeros_op}); + std::unique_ptr fill_zeros_op(new OpDescBind( + "fill_zeros_like", {{"X", {prefix}}}, {{"Y", {new_name}}}, {})); + pending_fill_zeros_ops.push_back(std::move(fill_zeros_op)); } } - for (const std::string& out_name : desc->OutputArgumentName()) { + for (const std::string& out_name : desc->OutputArgumentNames()) { if (no_grad_vars.count(out_name)) { desc->Rename(out_name, kEmptyVarName); } } } - grad_op_descs.insert(std::begin(grad_op_descs), - std::begin(pending_fill_zeros_ops), - std::end(pending_fill_zeros_ops)); + for (auto& p : pending_fill_zeros_ops) { + grad_op_descs.push_back(std::move(p)); + } - // TODO (fengjiayi): RNN op + // TODO(fengjiayi): RNN op return grad_op_descs; } -void AppendBackwardOpDescs( - BlockDescBind& block_desc, - const std::unordered_set& no_grad_vars) { +void AppendBackwardOpDescs(BlockDescBind& block_desc, + std::unordered_set& no_grad_vars) { std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; - std::deque> block_op_descs = block_desc.ops_; + std::deque>& block_op_descs = block_desc.ops_; std::vector> backward_descs; for (auto it = block_op_descs.rbegin(); it != block_op_descs.rend(); ++it) { std::vector> op_grads = MakeGradOpDescs(*it, no_grad_vars); for (const auto& desc : op_grads) { - for (const std::string& out_name : desc->OutputArugumentNames()) { + for (const std::string& out_name : desc->OutputArgumentNames()) { dup_out_ops[out_name].emplace_back(grad_desc_idx); } ++grad_desc_idx; } - backward_descs.insert(backward_descs.end(), op_grads.begin(), - op_grads.end()); + std::transform( + op_grads.begin(), op_grads.end(), std::back_inserter(backward_descs), + [](std::unique_ptr& ptr) { return std::move(ptr); }); } // Check whether some variables are written more than once std::list>> pending_sum_ops; @@ -330,9 +331,9 @@ void AppendBackwardOpDescs( backward_descs[dup_op[i]]->Rename(out_name, new_name); sum_op_inputs.emplace_back(new_name); } - OpDescBind* sum_op = new OpDescBind("sum", {{"X", sum_op_inputs}}, - {{"Out", {out_name}}}, {}); - pending_sum_ops.push_back({dup_op.back(), {sum_op}}); + std::unique_ptr sum_op(new OpDescBind( + "sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, {})); + pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); } } pending_sum_ops.sort( @@ -345,8 +346,9 @@ void AppendBackwardOpDescs( std::move(p.second)); } // Append backward_descs to BlockDescBind::ops_ - block_op_descs.insert(std::end(block_op_descs), std::begin(backward_descs), - std::end(backward_descs)); + for (std::unique_ptr& ptr : backward_descs) { + block_op_descs.push_back(std::move(ptr)); + } return; } diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index a171dfef30..fd95ef1901 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -32,9 +32,8 @@ class ProgramDescBind; class BlockDescBind { public: - friend void AppendBackwardOps( - BlockDescBind &block_desc, - const std::unordered_set &no_grad_vars); + friend void AppendBackwardOpDescs( + BlockDescBind &block_desc, std::unordered_set &no_grad_vars); BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} From 84500f9487164f3cf17625c876c15d754b932ced Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Oct 2017 11:25:46 -0700 Subject: [PATCH 196/342] Change `PADDLE_ONLY_CPU` to `PADDLE_WITH_GPU` By shell command ```bash sed -i 's#ifdef PADDLE_ONLY_CPU#ifndef PADDLE_WITH_GPU#g' `find ./paddle/ -name '*.h' -o -name '*.cc' -o -name '*.cpp' -o -name '*.c' -o -name '*.cu'` sed -i 's#ifndef PADDLE_ONLY_CPU#ifdef PADDLE_WITH_GPU#g' `find ./paddle/ -name '*.h' -o -name '*.cc' -o -name '*.cpp' -o -name '*.c' -o -name '*.cu'` ``` --- cmake/configure.cmake | 2 +- paddle/api/Util.cpp | 2 +- paddle/capi/Matrix.cpp | 2 +- paddle/framework/lod_tensor.h | 4 +-- paddle/framework/op_registry.h | 2 +- paddle/framework/operator.cc | 2 +- paddle/framework/tensor_impl.h | 4 +-- paddle/framework/tensor_test.cc | 8 +++--- paddle/function/BlockExpandOp.cpp | 2 +- paddle/function/ContextProjectionOp.cpp | 2 +- paddle/function/CosSimOp.cpp | 2 +- paddle/function/CropOp.cpp | 2 +- paddle/function/CrossMapNormalOp.cpp | 2 +- paddle/function/DepthwiseConvOp.cpp | 2 +- paddle/function/DepthwiseConvOpTest.cpp | 2 +- paddle/function/GemmConvOp.cpp | 2 +- paddle/function/GemmConvOpTest.cpp | 2 +- paddle/function/Im2ColTest.cpp | 2 +- paddle/function/MulOp.cpp | 2 +- paddle/function/PadOp.cpp | 2 +- paddle/function/RowConvOp.cpp | 2 +- paddle/function/SwitchOp.cpp | 2 +- paddle/gserver/layers/BatchNormBaseLayer.cpp | 2 +- .../layers/BatchNormalizationLayer.cpp | 6 ++--- paddle/gserver/layers/PoolLayer.cpp | 4 +-- paddle/gserver/tests/LayerGradUtil.cpp | 2 +- paddle/gserver/tests/test_BatchNorm.cpp | 2 +- paddle/gserver/tests/test_ConvUnify.cpp | 2 +- paddle/gserver/tests/test_DetectionOutput.cpp | 2 +- paddle/gserver/tests/test_Evaluator.cpp | 2 +- paddle/gserver/tests/test_KmaxSeqScore.cpp | 2 +- paddle/gserver/tests/test_LayerGrad.cpp | 26 +++++++++---------- paddle/gserver/tests/test_NetworkCompare.cpp | 2 +- paddle/gserver/tests/test_PriorBox.cpp | 2 +- .../gserver/tests/test_ProtoDataProvider.cpp | 6 ++--- paddle/gserver/tests/test_PyDataProvider.cpp | 4 +-- .../gserver/tests/test_SelectiveFCLayer.cpp | 8 +++--- .../gserver/tests/test_SeqSliceLayerGrad.cpp | 2 +- paddle/gserver/tests/test_WarpCTCLayer.cpp | 2 +- paddle/math/Matrix.cpp | 6 ++--- paddle/math/SparseMatrix.cpp | 2 +- paddle/math/Vector.cpp | 6 ++--- paddle/math/tests/test_Allocator.cpp | 4 +-- paddle/math/tests/test_BaseMatrix.cpp | 2 +- paddle/math/tests/test_CpuGpuVector.cpp | 2 +- paddle/math/tests/test_ExecViaCpu.cpp | 2 +- paddle/math/tests/test_GpuProfiler.cpp | 2 +- paddle/math/tests/test_Matrix.cpp | 2 +- paddle/math/tests/test_SparseMatrix.cpp | 6 ++--- paddle/math/tests/test_Tensor.cu | 20 +++++++------- paddle/math/tests/test_TrainingAlgorithm.cpp | 2 +- paddle/math/tests/test_batchTranspose.cpp | 2 +- paddle/math/tests/test_lazyAssign.cu | 4 +-- paddle/math/tests/test_matrixCompare.cpp | 2 +- paddle/math/tests/test_perturbation.cpp | 2 +- .../math/tests/test_sparseMatrixCompare.cpp | 2 +- paddle/memory/detail/buddy_allocator.cc | 2 +- paddle/memory/detail/system_allocator.cc | 2 +- paddle/memory/detail/system_allocator.h | 2 +- paddle/memory/detail/system_allocator_test.cc | 2 +- paddle/memory/memcpy.cc | 2 +- paddle/memory/memcpy.h | 2 +- paddle/memory/memory.cc | 2 +- paddle/memory/memory_test.cc | 2 +- paddle/operators/detail/strided_memcpy.h | 2 +- paddle/operators/math/im2col_test.cc | 4 +-- paddle/operators/math/math_function_test.cc | 2 +- paddle/operators/strided_memcpy_test.cc | 2 +- paddle/platform/device_context.cc | 2 +- paddle/platform/device_context.h | 4 +-- paddle/platform/enforce.h | 4 +-- paddle/platform/gpu_info.h | 2 +- paddle/platform/variant.h | 2 +- paddle/pserver/test/SocketTest.cpp | 2 +- paddle/pserver/test/test_ProtoServer.cpp | 2 +- paddle/pybind/pybind.cc | 12 ++++----- paddle/pybind/tensor_py.h | 2 +- paddle/trainer/MergeModel.cpp | 2 +- paddle/trainer/tests/test_Compare.cpp | 2 +- paddle/trainer/tests/test_CompareSparse.cpp | 4 +-- paddle/trainer/tests/test_Trainer.cpp | 4 +-- paddle/trainer/tests/test_TrainerOnePass.cpp | 6 ++--- .../test_recurrent_machine_generation.cpp | 2 +- paddle/utils/Flags.cpp | 2 +- paddle/utils/Util.h | 2 +- paddle/utils/Version.h | 2 +- 86 files changed, 141 insertions(+), 141 deletions(-) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 51c3b918cc..926a7b1d69 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -49,11 +49,11 @@ if(NOT WITH_GOLANG) endif(NOT WITH_GOLANG) if(NOT WITH_GPU) - add_definitions(-DPADDLE_ONLY_CPU) add_definitions(-DHPPL_STUB_FUNC) list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) else() + add_definitions(-DPADDLE_WITH_GPU) FIND_PACKAGE(CUDA REQUIRED) if(${CUDA_VERSION_MAJOR} VERSION_LESS 7) diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index d369df5d4e..7446d892fd 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -47,7 +47,7 @@ bool isUsingGpu() { return FLAGS_use_gpu; } void setUseGpu(bool useGpu) { FLAGS_use_gpu = useGpu; } bool isGpuVersion() { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return false; #else return true; diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index d898ebe261..5b3737a759 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -46,7 +46,7 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat, if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; paddle::real* buf = ptr->mat->getRowBuf(rowID); size_t width = ptr->mat->getWidth(); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU hl_memcpy(buf, rowArray, sizeof(paddle::real) * width); #else std::copy(rowArray, rowArray + width, buf); diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 49786a4a66..b12c95b6b7 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -15,7 +15,7 @@ #pragma once #include -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include #include @@ -29,7 +29,7 @@ namespace paddle { namespace framework { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU template using Vector = std::vector; #else diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4ee2c7d275..aca6579f36 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -211,7 +211,7 @@ class OpKernelRegistrar : public Registrar { // TODO(fengjiayi): The following macros // seems ugly, do we have better method? -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU #define USE_OP_KERNEL(op_type) USE_OP_DEVICE_KERNEL(op_type, CPU) #else #define USE_OP_KERNEL(op_type) \ diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 1012a30b0a..21c1c6f9e6 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -25,7 +25,7 @@ Eigen::DefaultDevice& ExecutionContext::GetEigenDevice< return *device_context_.GetEigenDevice(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> Eigen::GpuDevice& ExecutionContext::GetEigenDevice() const { diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index a5405f9c31..1cde1f74b8 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -65,7 +65,7 @@ inline T* Tensor::mutable_data(platform::Place place) { holder_.reset(new PlaceholderImpl( boost::get(place), size)); } else if (platform::is_gpu_place(place)) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); } #else @@ -103,7 +103,7 @@ inline void Tensor::CopyFrom(const Tensor& src, memory::Copy(boost::get(dst_place), dst_ptr, boost::get(src_place), src_ptr, size); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU else if (platform::is_gpu_place(src_place) && platform::is_cpu_place(dst_place)) { memory::Copy(boost::get(dst_place), dst_ptr, diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index e2ec738de3..86c6945ab5 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -74,7 +74,7 @@ TEST(Tensor, MutableData) { EXPECT_EQ(p1, p2); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; float* p1 = nullptr; @@ -126,7 +126,7 @@ TEST(Tensor, ShareDataWith) { ASSERT_EQ(src_tensor.data(), dst_tensor.data()); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; Tensor dst_tensor; @@ -163,7 +163,7 @@ TEST(Tensor, Slice) { EXPECT_EQ(src_data_address + 3 * 4 * 1 * sizeof(int), slice_data_address); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; src_tensor.mutable_data(make_ddim({6, 9}), GPUPlace()); @@ -218,7 +218,7 @@ TEST(Tensor, CopyFrom) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU { Tensor src_tensor; Tensor gpu_tensor; diff --git a/paddle/function/BlockExpandOp.cpp b/paddle/function/BlockExpandOp.cpp index a89b6bba45..ad78f5f584 100644 --- a/paddle/function/BlockExpandOp.cpp +++ b/paddle/function/BlockExpandOp.cpp @@ -194,7 +194,7 @@ public: REGISTER_TYPED_FUNC(BlockExpand, CPU, BlockExpandForward); REGISTER_TYPED_FUNC(BlockExpandGrad, CPU, BlockExpandBackward); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(BlockExpand, GPU, BlockExpandForward); REGISTER_TYPED_FUNC(BlockExpandGrad, GPU, BlockExpandBackward); #endif diff --git a/paddle/function/ContextProjectionOp.cpp b/paddle/function/ContextProjectionOp.cpp index b87750b742..ab18c39df8 100644 --- a/paddle/function/ContextProjectionOp.cpp +++ b/paddle/function/ContextProjectionOp.cpp @@ -395,7 +395,7 @@ REGISTER_TYPED_FUNC(ContextProjectionForward, REGISTER_TYPED_FUNC(ContextProjectionBackward, CPU, ContextProjectionBackwardFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(ContextProjectionForward, GPU, ContextProjectionForwardFunc); diff --git a/paddle/function/CosSimOp.cpp b/paddle/function/CosSimOp.cpp index 7ece7b2dfe..4418f144d3 100644 --- a/paddle/function/CosSimOp.cpp +++ b/paddle/function/CosSimOp.cpp @@ -233,7 +233,7 @@ private: REGISTER_TYPED_FUNC(CosSimForward, CPU, CosSimForwardFunc); REGISTER_TYPED_FUNC(CosSimBackward, CPU, CosSimBackwardFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(CosSimForward, GPU, CosSimForwardFunc); REGISTER_TYPED_FUNC(CosSimBackward, GPU, CosSimBackwardFunc); #endif diff --git a/paddle/function/CropOp.cpp b/paddle/function/CropOp.cpp index f12ee43e3d..39504cc2c1 100644 --- a/paddle/function/CropOp.cpp +++ b/paddle/function/CropOp.cpp @@ -169,7 +169,7 @@ private: REGISTER_TYPED_FUNC(Crop, CPU, CropFunc); REGISTER_TYPED_FUNC(CropGrad, CPU, CropGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(Crop, GPU, CropFunc); REGISTER_TYPED_FUNC(CropGrad, GPU, CropGradFunc); #endif diff --git a/paddle/function/CrossMapNormalOp.cpp b/paddle/function/CrossMapNormalOp.cpp index ef878bfbba..1cf0918bed 100644 --- a/paddle/function/CrossMapNormalOp.cpp +++ b/paddle/function/CrossMapNormalOp.cpp @@ -336,7 +336,7 @@ private: REGISTER_TYPED_FUNC(CrossMapNormal, CPU, CrossMapNormalFunc); REGISTER_TYPED_FUNC(CrossMapNormalGrad, CPU, CrossMapNormalGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(CrossMapNormal, GPU, CrossMapNormalFunc); REGISTER_TYPED_FUNC(CrossMapNormalGrad, GPU, CrossMapNormalGradFunc); #endif diff --git a/paddle/function/DepthwiseConvOp.cpp b/paddle/function/DepthwiseConvOp.cpp index 2f3112fe65..7656ab3d0a 100644 --- a/paddle/function/DepthwiseConvOp.cpp +++ b/paddle/function/DepthwiseConvOp.cpp @@ -292,7 +292,7 @@ REGISTER_TYPED_FUNC(DepthwiseConvGradInput, REGISTER_TYPED_FUNC(DepthwiseConvGradFilter, CPU, DepthwiseConvGradFilterFunction); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(DepthwiseConv, GPU, DepthwiseConvFunction); REGISTER_TYPED_FUNC(DepthwiseConvGradInput, GPU, diff --git a/paddle/function/DepthwiseConvOpTest.cpp b/paddle/function/DepthwiseConvOpTest.cpp index d8e8c889d5..39033ecb2b 100644 --- a/paddle/function/DepthwiseConvOpTest.cpp +++ b/paddle/function/DepthwiseConvOpTest.cpp @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(DepthwiseConv, Forward) { DepthwiseConvolution( "GemmConv-CPU", "DepthwiseConv-GPU", forward); diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp index f8cf4ebea8..68e08c1480 100644 --- a/paddle/function/GemmConvOp.cpp +++ b/paddle/function/GemmConvOp.cpp @@ -340,7 +340,7 @@ public: REGISTER_TYPED_FUNC(GemmConv, CPU, GemmConvFunction); REGISTER_TYPED_FUNC(GemmConvGradInput, CPU, GemmConvGradInputFunction); REGISTER_TYPED_FUNC(GemmConvGradFilter, CPU, GemmConvGradFilterFunction); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(GemmConv, GPU, GemmConvFunction); REGISTER_TYPED_FUNC(GemmConvGradInput, GPU, GemmConvGradInputFunction); REGISTER_TYPED_FUNC(GemmConvGradFilter, GPU, GemmConvGradFilterFunction); diff --git a/paddle/function/GemmConvOpTest.cpp b/paddle/function/GemmConvOpTest.cpp index 5283d79a5a..bd1cf3c6a4 100644 --- a/paddle/function/GemmConvOpTest.cpp +++ b/paddle/function/GemmConvOpTest.cpp @@ -24,7 +24,7 @@ TEST(GemmConv, NaiveConv) { "NaiveConv-CPU", "GemmConv-CPU", forward); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(GemmConv, Forward) { Convolution( "GemmConv-CPU", "GemmConv-GPU", forward); diff --git a/paddle/function/Im2ColTest.cpp b/paddle/function/Im2ColTest.cpp index acc88a553a..55325e94b5 100644 --- a/paddle/function/Im2ColTest.cpp +++ b/paddle/function/Im2ColTest.cpp @@ -116,7 +116,7 @@ void TestIm2ColFunctor() { TEST(Im2ColFunctor, CPU) { TestIm2ColFunctor(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(Im2ColFunctor, GPU) { TestIm2ColFunctor(); } diff --git a/paddle/function/MulOp.cpp b/paddle/function/MulOp.cpp index 25e41edad5..655026320c 100644 --- a/paddle/function/MulOp.cpp +++ b/paddle/function/MulOp.cpp @@ -341,7 +341,7 @@ private: }; REGISTER_TYPED_FUNC(MulOp, CPU, MulFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(MulOp, GPU, MulFunc); #endif } // namespace paddle diff --git a/paddle/function/PadOp.cpp b/paddle/function/PadOp.cpp index adba7c92ec..24c9bf4e72 100644 --- a/paddle/function/PadOp.cpp +++ b/paddle/function/PadOp.cpp @@ -207,7 +207,7 @@ private: REGISTER_TYPED_FUNC(Pad, CPU, PadFunc); REGISTER_TYPED_FUNC(PadGrad, CPU, PadGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(Pad, GPU, PadFunc); REGISTER_TYPED_FUNC(PadGrad, GPU, PadGradFunc); #endif diff --git a/paddle/function/RowConvOp.cpp b/paddle/function/RowConvOp.cpp index b6501e8f4d..09e702f71a 100644 --- a/paddle/function/RowConvOp.cpp +++ b/paddle/function/RowConvOp.cpp @@ -217,7 +217,7 @@ public: REGISTER_TYPED_FUNC(RowConv, CPU, RowConvFunc); REGISTER_TYPED_FUNC(RowConvGrad, CPU, RowConvGradFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(RowConv, GPU, RowConvFunc); REGISTER_TYPED_FUNC(RowConvGrad, GPU, RowConvGradFunc); #endif diff --git a/paddle/function/SwitchOp.cpp b/paddle/function/SwitchOp.cpp index 01e252a8dc..db839b5b76 100644 --- a/paddle/function/SwitchOp.cpp +++ b/paddle/function/SwitchOp.cpp @@ -132,7 +132,7 @@ public: REGISTER_TYPED_FUNC(NCHW2NHWC, CPU, NCHW2NHWCFunc); REGISTER_TYPED_FUNC(NHWC2NCHW, CPU, NHWC2NCHWFunc); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU REGISTER_TYPED_FUNC(NCHW2NHWC, GPU, NCHW2NHWCFunc); REGISTER_TYPED_FUNC(NHWC2NCHW, GPU, NHWC2NCHWFunc); #endif diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/gserver/layers/BatchNormBaseLayer.cpp index f7a80e23e1..55f52816ab 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/gserver/layers/BatchNormBaseLayer.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "BatchNormalizationLayer.h" #include "Layer.h" #include "paddle/utils/Stat.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "CudnnBatchNormLayer.h" #endif diff --git a/paddle/gserver/layers/BatchNormalizationLayer.cpp b/paddle/gserver/layers/BatchNormalizationLayer.cpp index 412762d384..33cf24431d 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.cpp +++ b/paddle/gserver/layers/BatchNormalizationLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/utils/Stat.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "hl_batch_transpose.h" #endif #include "BatchNormalizationLayer.h" @@ -90,7 +90,7 @@ void BatchNormalizationLayer::expandMat(const MatrixPtr& in, MatrixPtr& out) { size_t batchSize = in->getHeight(); CHECK_EQ(out->getHeight(), batchSize * imgPixels_); if (useGpu_) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU LOG(FATAL) << "paddle is compiled only for cpu"; #else batchTranspose( @@ -127,7 +127,7 @@ void BatchNormalizationLayer::shrinkMat(const MatrixPtr& in, MatrixPtr& out) { } CHECK_EQ(in->getHeight(), static_cast(batchSize * imgPixels_)); if (useGpu_) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU LOG(FATAL) << "paddle is compiled only for cpu"; #else batchTranspose( diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index 96d5c54acc..43ab4e4d47 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "PoolLayer.h" #include "PoolProjectionLayer.h" #include "paddle/utils/Logging.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "CudnnPoolLayer.h" #endif namespace paddle { @@ -53,7 +53,7 @@ Layer* PoolLayer::create(const LayerConfig& config) { const std::string& pool = config.inputs(0).pool_conf().pool_type(); if (pool == "max-projection" || pool == "avg-projection") { return new PoolProjectionLayer(config); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU } else if (CudnnPoolLayer::typeCheck(pool)) { return new CudnnPoolLayer(config); #endif diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index a38880e14c..59df057a80 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -674,7 +674,7 @@ void testLayerGradKernel(TestConfig testConf, bool useGpu, bool useWeight, float epsilon) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) return; #endif FLAGS_use_gpu = useGpu; diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 659eefa31b..c1c85f8fac 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -119,7 +119,7 @@ TEST(Layer, batchNorm) { CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void batchNormInference(int n, int c, int h, int w) { MatrixPtr input = std::make_shared(n, c * h * w); MatrixPtr cudnnOut = std::make_shared(n, c * h * w); diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index e7325e0cc3..16556469cb 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -117,7 +117,7 @@ MatrixPtr doOneConvTest(size_t imgSize, } TEST(Layer, convParaUnified) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU MatrixPtr input, resultCpu, resultGpu; /// TEST1 for conv /// diff --git a/paddle/gserver/tests/test_DetectionOutput.cpp b/paddle/gserver/tests/test_DetectionOutput.cpp index af43dc51fa..1a83f48fae 100644 --- a/paddle/gserver/tests/test_DetectionOutput.cpp +++ b/paddle/gserver/tests/test_DetectionOutput.cpp @@ -150,7 +150,7 @@ TEST(Layer, detectionOutputLayerFwd) { useGpu, result2); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU // GPU case 1. useGpu = true; inputLoc = Matrix::create(1, 16, false, useGpu); diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index 93996392d2..42bb570572 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -51,7 +51,7 @@ void testEvaluator(TestConfig testConf, string testEvaluatorName, size_t batchSize, bool useGpu) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) return; #endif FLAGS_use_gpu = useGpu; diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp index 308abe6816..1594de8502 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -97,7 +97,7 @@ TEST(Layer, kmaxSeqScoreLayer) { Matrix::create(subSeqStartPosition.back(), 1, false, false); std::vector mode = {false}; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU mode.push_back(true); #endif diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 090bde7b20..e887dee5f9 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #endif #include @@ -258,7 +258,7 @@ void testProjectionConv(size_t groups, bool isDeconv) { true); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(Projection, conv) { /// test ConvProjection testProjectionConv(1, false); @@ -422,7 +422,7 @@ TEST(Layer, depthwiseConvLayer) { // 'depthwise_conv' is a sepecial case of 'exconv' whose // groups size equals to the input channels size. testDepthwiseConvLayer("exconv", /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testDepthwiseConvLayer("exconv", /* useGpu= */ true); #endif } @@ -480,7 +480,7 @@ void testConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, convLayer) { testConvLayer("exconv", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testConvLayer("exconv", /* trans= */ false, /* useGpu= */ true); testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */ true); #endif @@ -525,7 +525,7 @@ TEST(Layer, convTransLayer) { for (auto useGpu : {false, true}) { testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testConvTransLayer("cudnn_convt", /* trans= */ false, /* useGpu= */ true); #endif } @@ -638,7 +638,7 @@ TEST(Layer, SelectiveFullyConnectedLayer) { /* trans= */ false, /* useGup= */ false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testLayerGrad(config, "selective_fc", 100, @@ -1210,7 +1210,7 @@ void testPoolLayer(const string& poolType, bool trans, bool useGpu) { testLayerGrad(config, "pool", 100, trans, useGpu); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void testPoolLayer2(const string& poolType, bool trans, bool useGpu) { TestConfig config; config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0}); @@ -1236,7 +1236,7 @@ TEST(Layer, PoolLayer) { testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ false); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ true); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ true); testPoolLayer("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true); @@ -1309,7 +1309,7 @@ void testPool3DLayer(const string& poolType, bool trans, bool useGpu) { TEST(Layer, Pool3DLayer) { testPool3DLayer("avg", /* trans= */ false, /* useGpu= */ false); testPool3DLayer("max", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testPool3DLayer("avg", /* trans= */ false, /* useGpu= */ true); testPool3DLayer("max", /* trans= */ false, /* useGpu= */ true); #endif @@ -1695,7 +1695,7 @@ void testBatchNormLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, BatchNormalizationLayer) { testBatchNormLayer("batch_norm", false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testBatchNormLayer("batch_norm", false, true); if (hl_get_cudnn_lib_version() >= int(4000)) { testBatchNormLayer("cudnn_batch_norm", false, true); @@ -1744,7 +1744,7 @@ void testBatchNorm3DLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, testBatchNorm3DLayer) { testBatchNorm3DLayer("batch_norm", false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testBatchNorm3DLayer("batch_norm", false, true); if (hl_get_cudnn_lib_version() >= int(4000)) { testBatchNorm3DLayer("cudnn_batch_norm", false, true); @@ -2262,7 +2262,7 @@ void test3DConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, test3DConvLayer) { test3DConvLayer("conv3d", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU test3DConvLayer("conv3d", /* trans= */ false, /* useGpu= */ true); #endif } @@ -2339,7 +2339,7 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, test3DDeConvLayer) { test3DDeConvLayer("deconv3d", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU test3DDeConvLayer("deconv3d", /* trans= */ false, /* useGpu= */ true); #endif } diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index d36f72360f..e322fef9a4 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -243,7 +243,7 @@ TEST(Compare, concat_slice) { compareNetwork(config_file_a, config_file_b); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(Compare, img_pool) { std::string config_file_a = "./gserver/tests/img_pool_a.conf"; std::string config_file_b = "./gserver/tests/img_pool_b.conf"; diff --git a/paddle/gserver/tests/test_PriorBox.cpp b/paddle/gserver/tests/test_PriorBox.cpp index ae0e3bc3d2..cbc0fff7b8 100644 --- a/paddle/gserver/tests/test_PriorBox.cpp +++ b/paddle/gserver/tests/test_PriorBox.cpp @@ -151,7 +151,7 @@ TEST(Layer, priorBoxLayerFwd) { useGpu, result); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU // reset the input parameters variance[1] = 0.1; variance[3] = 0.2; diff --git a/paddle/gserver/tests/test_ProtoDataProvider.cpp b/paddle/gserver/tests/test_ProtoDataProvider.cpp index e11bf402c2..988dbc2513 100644 --- a/paddle/gserver/tests/test_ProtoDataProvider.cpp +++ b/paddle/gserver/tests/test_ProtoDataProvider.cpp @@ -485,7 +485,7 @@ TEST(ProtoDataProvider, test) { // Currently in async mode, useGpu is not supported continue; } -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { continue; } @@ -525,7 +525,7 @@ TEST(ProtoDataProvider, constant_slots) { for (int numConstantSlots : {1, 2}) { for (int useGpu : numTwoArray) { for (int dataCompression : numTwoArray) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { continue; } @@ -708,7 +708,7 @@ TEST(ProtoSequenceDataProvider, test) { // Currently in async mode, useGpu is not supported continue; } -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { continue; } diff --git a/paddle/gserver/tests/test_PyDataProvider.cpp b/paddle/gserver/tests/test_PyDataProvider.cpp index db883543c3..f6522febf8 100644 --- a/paddle/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/gserver/tests/test_PyDataProvider.cpp @@ -37,7 +37,7 @@ TEST(PyDataProvider, py_fill_slots) { config.clear_files(); std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU bool useGpu = false; #else bool useGpu = true; @@ -71,7 +71,7 @@ TEST(PyDataProvider, py_fill_nest_slots) { std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); EXPECT_EQ(config.IsInitialized(), true); -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU bool useGpu = false; #else bool useGpu = true; diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index ab23d00a2c..b25d32fb2c 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -321,7 +321,7 @@ TEST(Layer, SelectiveFcLayer_train_dense_mul) { "filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; for (auto useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) { break; } @@ -388,7 +388,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, outMatSelfc->getWidth(), outMatSelfc->getElementCnt())); cpuOutMatSelfc->copyFrom(*outMatSelfc, HPPL_STREAM_DEFAULT); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (useGpu) { hl_stream_synchronize(HPPL_STREAM_DEFAULT); } @@ -418,7 +418,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, MatrixPtr cpuOutMatFc( new CpuMatrix(outMatFc->getHeight(), outMatFc->getWidth())); cpuOutMatFc->copyFrom(*outMatFc, HPPL_STREAM_DEFAULT); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (useGpu) { hl_stream_synchronize(HPPL_STREAM_DEFAULT); } @@ -443,7 +443,7 @@ TEST(Layer, SelectiveFcLayer_train_sparse_mul) { selLayerConfig.set_size(fcLayerWidth); testSelectiveFcLayerTrainSparseMul(selLayerConfig, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testSelectiveFcLayerTrainSparseMul(selLayerConfig, true); #endif } diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp index e1d4ae1617..f28149081b 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -195,7 +195,7 @@ TEST(Layer, SeqSliceLayer) { vector> ends; std::vector mode = {false}; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU mode.push_back(true); #endif genSeqInfo(seqStartPos, subSeqStartPos); diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index 55427e2f12..ae5b64257f 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -199,7 +199,7 @@ TEST(Layer, WarpCTCLayer) { for (auto batchSize : {1, 10, 32}) { for (auto normByTimes : {false, true}) { for (auto useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) continue; #endif LOG(INFO) << "layerSize=" << layerSize << " batchSize=" << batchSize diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 0023b4d0f5..de02f9c0d5 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -670,7 +670,7 @@ void GpuMatrix::leftMul(Matrix& a, real scaleAB, real scaleT) { } void GpuMatrix::selectRows(Matrix& table, IVector& ids) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(dynamic_cast(&table)); CHECK(table.useGpu()); CHECK(ids.useGpu()); @@ -694,7 +694,7 @@ void GpuMatrix::selectRows(Matrix& table, IVector& ids) { } void GpuMatrix::addToRows(Matrix& table, IVector& ids) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(dynamic_cast(&table)); CHECK(table.useGpu()); CHECK(ids.useGpu()); @@ -741,7 +741,7 @@ void GpuMatrix::rowMax(Matrix& max) { } void GpuMatrix::rowMax(IVector& maxIds, Matrix& maxVal) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(maxIds.useGpu() && maxVal.useGpu()) << "Matrix type are not equal"; size_t numSamples = getHeight(); size_t beam = maxVal.getWidth(); diff --git a/paddle/math/SparseMatrix.cpp b/paddle/math/SparseMatrix.cpp index 6370c77386..1f31082ae8 100644 --- a/paddle/math/SparseMatrix.cpp +++ b/paddle/math/SparseMatrix.cpp @@ -836,7 +836,7 @@ void GpuSparseMatrix::zeroMem() { } void GpuSparseMatrix::rowMax(IVector& maxIds, Matrix& maxVal) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU CHECK(maxIds.useGpu() && maxVal.useGpu()) << "Matrix type are not equal"; size_t numSamples = getHeight(); size_t beam = maxVal.getWidth(); diff --git a/paddle/math/Vector.cpp b/paddle/math/Vector.cpp index eb87ee9bb7..54e57b255d 100644 --- a/paddle/math/Vector.cpp +++ b/paddle/math/Vector.cpp @@ -172,7 +172,7 @@ void GpuVectorT::isEqualTo(const VectorT& b, const T& value) { template void GpuVectorT::selectFrom(const VectorT& src, const VectorT& ids) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU hl_vector_select_from(this->getData(), this->getSize(), src.getData(), @@ -850,7 +850,7 @@ CpuGpuVectorT::CpuGpuVectorT(CpuGpuVectorT& src, size_t size) : sync_(nullptr) { CHECK_LE(offset + size, static_cast(src.getSize())); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU SyncedFlag* flag = src.getSync(); if (*flag == DATA_AT_CPU) { src.copyToGpu(); // will set synchronous data between CPU and GPU @@ -861,7 +861,7 @@ CpuGpuVectorT::CpuGpuVectorT(CpuGpuVectorT& src, auto cMemHandle = (src.getVector(false))->getMemoryHandle(); cpuVectorT_ = std::make_shared>( size, std::dynamic_pointer_cast(cMemHandle), offset); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU auto gMemHandle = (src.getVector(true))->getMemoryHandle(); gpuVectorT_ = std::make_shared>( size, std::dynamic_pointer_cast(gMemHandle), offset); diff --git a/paddle/math/tests/test_Allocator.cpp b/paddle/math/tests/test_Allocator.cpp index 1ca70ea84c..cf2f66aea1 100644 --- a/paddle/math/tests/test_Allocator.cpp +++ b/paddle/math/tests/test_Allocator.cpp @@ -68,7 +68,7 @@ void testPoolAllocator() { TEST(Allocator, Pool) { testPoolAllocator(); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testPoolAllocator(); #endif } @@ -92,7 +92,7 @@ TEST(MemoryHandle, Cpu) { EXPECT_EQ(ptr1, ptr2); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(MemoryHandle, Gpu) { int numGpu = hl_get_device_count(); diff --git a/paddle/math/tests/test_BaseMatrix.cpp b/paddle/math/tests/test_BaseMatrix.cpp index 22ce39701f..730759f3db 100644 --- a/paddle/math/tests/test_BaseMatrix.cpp +++ b/paddle/math/tests/test_BaseMatrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /** * This test file use autotest::AutoCompare and cmpWithoutArg to compares the * implementation of CPU and GPU member function in diff --git a/paddle/math/tests/test_CpuGpuVector.cpp b/paddle/math/tests/test_CpuGpuVector.cpp index 58bc43a38b..ccb4a902b0 100644 --- a/paddle/math/tests/test_CpuGpuVector.cpp +++ b/paddle/math/tests/test_CpuGpuVector.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include "paddle/math/Vector.h" diff --git a/paddle/math/tests/test_ExecViaCpu.cpp b/paddle/math/tests/test_ExecViaCpu.cpp index 04c856453d..2d439cd060 100644 --- a/paddle/math/tests/test_ExecViaCpu.cpp +++ b/paddle/math/tests/test_ExecViaCpu.cpp @@ -94,7 +94,7 @@ void testWrapper(F&& f) { } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(ExecViaCpu, test1) { testWrapper(f); testWrapper(&f); diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index e6b5dba446..6dab187e3e 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include "paddle/math/Matrix.h" diff --git a/paddle/math/tests/test_Matrix.cpp b/paddle/math/tests/test_Matrix.cpp index 1c21da5b76..7a145eae6a 100644 --- a/paddle/math/tests/test_Matrix.cpp +++ b/paddle/math/tests/test_Matrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /** * This test file use autotest::AutoCompare and cmpWithArg to compares the * implementation of CPU and GPU member function in Matrix.cpp. diff --git a/paddle/math/tests/test_SparseMatrix.cpp b/paddle/math/tests/test_SparseMatrix.cpp index c0572dfdbf..8151dde106 100644 --- a/paddle/math/tests/test_SparseMatrix.cpp +++ b/paddle/math/tests/test_SparseMatrix.cpp @@ -47,7 +47,7 @@ struct MatrixPara { SparseFormat format; }; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void test_sparse_matrix_mul(MatrixPara paraA, MatrixPara paraB, MatrixPara paraC) { @@ -452,7 +452,7 @@ TEST(Matrix, SparseMatrixCSRFormatTrimFrom) { matB->trimFrom(*mat); checkSMatrixEqual2(matA, matB); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU GpuSparseMatrixPtr matC = std::make_shared( height, trimedWidth, height, FLOAT_VALUE, SPARSE_CSR, true); matC->trimFrom(*mat); @@ -546,7 +546,7 @@ TEST(Matrix, SparseMatrixCSCFormatTrimFrom) { matB->trimFrom(*mat); checkSMatrixEqual2(matA, matB); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU GpuSparseMatrixPtr matC = std::make_shared( height, trimedWidth, height, FLOAT_VALUE, SPARSE_CSC, true); matC->trimFrom(*mat); diff --git a/paddle/math/tests/test_Tensor.cu b/paddle/math/tests/test_Tensor.cu index 31b693afa8..d03698dee2 100644 --- a/paddle/math/tests/test_Tensor.cu +++ b/paddle/math/tests/test_Tensor.cu @@ -270,7 +270,7 @@ TEST(Unary, BaseOp) { TestUnaryVectorT testCpuIVector( testUnaryBaseOpInt); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestUnaryMatrix testGpuMatrix(testUnaryBaseOp); TestUnaryVectorT testGpuVector(testUnaryBaseOp); TestUnaryVectorT testGpuIVector( @@ -317,7 +317,7 @@ void testUnayrMathOp(Tensor& A1, Tensor& A2) { TEST(Unary, MathOp) { TestUnaryMatrix testCpu(testUnayrMathOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestUnaryMatrix testGpu(testUnayrMathOp); #endif } @@ -374,7 +374,7 @@ void testUnayrCompareOp(Tensor& A1, Tensor& A2) { TEST(Unary, CompareOp) { TestUnaryMatrix testCpu(testUnayrCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestUnaryMatrix testGpu(testUnayrCompareOp); #endif } @@ -536,7 +536,7 @@ void testBinaryBaseOp(Tensor& A1, Tensor& A2, Tensor& B) { TEST(Binary, BaseOp) { TestBinaryMatrix testCpu(testBinaryBaseOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestBinaryMatrix testGpu(testBinaryBaseOp); #endif } @@ -710,7 +710,7 @@ void testBinaryMathOp(Tensor& A1, Tensor& A2, Tensor& B) { TEST(Binary, MathOp) { TestBinaryMatrix testCpu(testBinaryMathOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestBinaryMatrix testGpu(testBinaryMathOp); #endif } @@ -810,7 +810,7 @@ void testBinaryCompareOp(Tensor& A1, Tensor& A2, Tensor& B) { TEST(Binary, CompareOp) { TestBinaryMatrix testCpu(testBinaryCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestBinaryMatrix testGpu(testBinaryCompareOp); #endif } @@ -955,7 +955,7 @@ void testTernaryBaseOp(Tensor& A1, Tensor& A2, Tensor& B, Tensor& C) { TEST(Ternary, BaseOp) { TestTernaryMatrix testCpu(testTernaryBaseOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestTernaryMatrix testGpu(testTernaryBaseOp); #endif } @@ -1058,7 +1058,7 @@ void testTernaryCompareOp(Tensor& A1, Tensor& A2, Tensor& B, Tensor& C) { TEST(Ternary, CompareOp) { TestTernaryMatrix testCpu(testTernaryCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestTernaryMatrix testGpu(testTernaryCompareOp); #endif } @@ -1086,7 +1086,7 @@ void testQuaternaryAdd( TEST(Quaternary, BaseOp) { TestQuaternaryMatrix testCpu(testQuaternaryAdd); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestQuaternaryMatrix testGpu(testQuaternaryAdd); #endif } @@ -1156,7 +1156,7 @@ void testQuaternaryCompareOp( TEST(Quaternary, CompareOp) { TestQuaternaryMatrix testCpu(testQuaternaryCompareOp); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TestQuaternaryMatrix testGpu(testQuaternaryCompareOp); #endif } diff --git a/paddle/math/tests/test_TrainingAlgorithm.cpp b/paddle/math/tests/test_TrainingAlgorithm.cpp index 4a88844b43..36ac024007 100644 --- a/paddle/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/math/tests/test_TrainingAlgorithm.cpp @@ -91,7 +91,7 @@ int VectorCheckErr(const VectorPtr& vector1, const VectorPtr& vector2) { typedef std::function testMatrixFunc; void testCase(testMatrixFunc matrixFunc) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU for (auto useGpu : {false, true}) { #else for (auto useGpu : {false}) { diff --git a/paddle/math/tests/test_batchTranspose.cpp b/paddle/math/tests/test_batchTranspose.cpp index 4eb9837909..0189e534eb 100644 --- a/paddle/math/tests/test_batchTranspose.cpp +++ b/paddle/math/tests/test_batchTranspose.cpp @@ -17,7 +17,7 @@ limitations under the License. */ using namespace paddle; // NOLINT -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(MatrixBatchTransTest, test_batch_matrix_transpose) { const int nx = 100; const int ny = 50; diff --git a/paddle/math/tests/test_lazyAssign.cu b/paddle/math/tests/test_lazyAssign.cu index 92afab4ff7..04f23cff55 100644 --- a/paddle/math/tests/test_lazyAssign.cu +++ b/paddle/math/tests/test_lazyAssign.cu @@ -72,7 +72,7 @@ void testLazyAssign(int height, int width) { TEST(lazyAssign, CPU) { testMatrixCase(testLazyAssign); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(lazyAssign, GPU) { testMatrixCase(testLazyAssign); } #endif @@ -142,6 +142,6 @@ void testSgdUpdate(int height, int width) { TEST(sgdUpdate, CPU) { testMatrixCase(testSgdUpdate); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(sgdUpdate, GPU) { testMatrixCase(testSgdUpdate); } #endif diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 061fb22e3f..7735877ac8 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /// This unittest checks GpuMatrix/CpuMatrix get same result, so disable when /// only cpu version. diff --git a/paddle/math/tests/test_perturbation.cpp b/paddle/math/tests/test_perturbation.cpp index 60ebae0153..dff18136ae 100644 --- a/paddle/math/tests/test_perturbation.cpp +++ b/paddle/math/tests/test_perturbation.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include diff --git a/paddle/math/tests/test_sparseMatrixCompare.cpp b/paddle/math/tests/test_sparseMatrixCompare.cpp index a9185a4b24..e39cc0a2f6 100644 --- a/paddle/math/tests/test_sparseMatrixCompare.cpp +++ b/paddle/math/tests/test_sparseMatrixCompare.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /// This unittest checks GpuSparseMatrix/CpuSparseMatrix get same result, // so disable when /// only cpu version. diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc index bb44970109..ed0c3374ff 100644 --- a/paddle/memory/detail/buddy_allocator.cc +++ b/paddle/memory/detail/buddy_allocator.cc @@ -175,7 +175,7 @@ void* BuddyAllocator::SystemAlloc(size_t size) { } BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (system_allocator_->UseGpu()) { if ((total_used_ + total_free_) == 0) { // Compute the maximum allocation size for the first allocation. diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index a270bd5958..64f8182b5c 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -62,7 +62,7 @@ void CPUAllocator::Free(void* p, size_t size, size_t index) { bool CPUAllocator::UseGpu() const { return false; } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU void* GPUAllocator::Alloc(size_t& index, size_t size) { // CUDA documentation doesn't explain if cudaMalloc returns nullptr diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h index 82ba322e05..6b1f40347b 100644 --- a/paddle/memory/detail/system_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -40,7 +40,7 @@ class CPUAllocator : public SystemAllocator { virtual bool UseGpu() const; }; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU class GPUAllocator : public SystemAllocator { public: virtual void* Alloc(size_t& index, size_t size); diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index ba44e06ddb..57d5443d50 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -56,7 +56,7 @@ TEST(CPUAllocator, LockMem) { TestAllocator(a, 0); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(GPUAllocator, Alloc) { paddle::memory::detail::GPUAllocator a; TestAllocator(a, 2048); diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc index c96a697a7e..184d0f8fa7 100644 --- a/paddle/memory/memcpy.cc +++ b/paddle/memory/memcpy.cc @@ -26,7 +26,7 @@ void Copy(platform::CPUPlace, void* dst, std::memcpy(dst, src, num); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> void Copy(platform::CPUPlace dst_place, void* dst, diff --git a/paddle/memory/memcpy.h b/paddle/memory/memcpy.h index 2b9c0eada6..7142831d43 100644 --- a/paddle/memory/memcpy.h +++ b/paddle/memory/memcpy.h @@ -33,7 +33,7 @@ namespace memory { template void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU /** * \brief Copy memory from one place to another place. diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 29bc26f9d3..6d5a74dafe 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -62,7 +62,7 @@ size_t Used(platform::CPUPlace place) { return GetCPUBuddyAllocator()->Used(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { using BuddyAllocVec = std::vector; diff --git a/paddle/memory/memory_test.cc b/paddle/memory/memory_test.cc index 53cc63a098..7a617f04dc 100644 --- a/paddle/memory/memory_test.cc +++ b/paddle/memory/memory_test.cc @@ -80,7 +80,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU size_t align(size_t size, paddle::platform::GPUPlace place) { size += sizeof(paddle::memory::detail::Metadata); diff --git a/paddle/operators/detail/strided_memcpy.h b/paddle/operators/detail/strided_memcpy.h index b165224b37..9f05a26322 100644 --- a/paddle/operators/detail/strided_memcpy.h +++ b/paddle/operators/detail/strided_memcpy.h @@ -34,7 +34,7 @@ struct StridedMemcpyFunctor { auto& cpu_place = boost::get(place); memory::Copy(cpu_place, dst, cpu_place, src, sizeof(T) * dst_dim.head); } else { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU auto& gpu_place = boost::get(place); auto& cuda_ctx = reinterpret_cast(dev_ctx); diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc index f0b8c88591..3d040ca2b5 100644 --- a/paddle/operators/math/im2col_test.cc +++ b/paddle/operators/math/im2col_test.cc @@ -71,7 +71,7 @@ void testIm2col() { context = new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); } else { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU context = new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); #else @@ -116,7 +116,7 @@ void testIm2col() { TEST(math, im2col) { testIm2col(); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU testIm2col(); #endif } diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index 22468a0c4a..2252268620 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -1,7 +1,7 @@ #include "paddle/operators/math/math_function.h" #include "gtest/gtest.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(math_function, notrans_mul_trans) { paddle::framework::Tensor input1; paddle::framework::Tensor input1_gpu; diff --git a/paddle/operators/strided_memcpy_test.cc b/paddle/operators/strided_memcpy_test.cc index 05882a8873..e0dd7b19f1 100644 --- a/paddle/operators/strided_memcpy_test.cc +++ b/paddle/operators/strided_memcpy_test.cc @@ -72,7 +72,7 @@ TEST(StridedMemcpy, CPUConcat) { } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(StridedMemcpy, GPUCrop) { // clang-format off int src[] = { diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index 36af1ac677..8dcc357a16 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -35,7 +35,7 @@ Eigen::DefaultDevice* CPUDeviceContext::eigen_device() const { Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> Eigen::GpuDevice* diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index d805d2ab08..c1c4c7f760 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cudnn.h" #include "paddle/platform/gpu_info.h" @@ -61,7 +61,7 @@ class CPUDeviceContext : public DeviceContext { std::unique_ptr eigen_device_; }; -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template <> struct EigenDeviceConverter { using EigenDeviceType = Eigen::GpuDevice; diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 52bd23039b..f9fe521d50 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -29,7 +29,7 @@ limitations under the License. */ #include // for __cxa_demangle #endif -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cudnn.h" @@ -113,7 +113,7 @@ inline typename std::enable_if::type throw_on_error( } } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template inline typename std::enable_if::type throw_on_error( diff --git a/paddle/platform/gpu_info.h b/paddle/platform/gpu_info.h index f0c825bd9b..ac884386dd 100644 --- a/paddle/platform/gpu_info.h +++ b/paddle/platform/gpu_info.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU #include #include diff --git a/paddle/platform/variant.h b/paddle/platform/variant.h index 16ee00efe7..8145799dfd 100644 --- a/paddle/platform/variant.h +++ b/paddle/platform/variant.h @@ -16,7 +16,7 @@ #include -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU // Because boost's variadic templates has bug on nvcc, boost will disable // variadic template support when GPU enabled on nvcc. diff --git a/paddle/pserver/test/SocketTest.cpp b/paddle/pserver/test/SocketTest.cpp index 6f6c9e596c..96724530f5 100644 --- a/paddle/pserver/test/SocketTest.cpp +++ b/paddle/pserver/test/SocketTest.cpp @@ -215,7 +215,7 @@ int main(int argc, char** argv) { uint64_t dataSize = FLAGS_dim * sizeof(real); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU GpuVector gpuParam(FLAGS_dim); GpuVector gpuGrad(FLAGS_dim); #else diff --git a/paddle/pserver/test/test_ProtoServer.cpp b/paddle/pserver/test/test_ProtoServer.cpp index 04236fda2f..74ab1f2f77 100644 --- a/paddle/pserver/test/test_ProtoServer.cpp +++ b/paddle/pserver/test/test_ProtoServer.cpp @@ -99,7 +99,7 @@ TEST(ProtoServer, regular) { } TEST(ProtoServer, extended) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU ProtoClient* client; if (FLAGS_rdma_tcp == "rdma") client = new ProtoClient(FLAGS_server_addr, FLAGS_port, F_RDMA); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index d480427f59..761d82fc4d 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -34,7 +34,7 @@ static size_t UniqueIntegerGenerator() { } bool IsCompileGPU() { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return false; #else return true; @@ -78,7 +78,7 @@ PYBIND11_PLUGIN(core) { .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) @@ -96,7 +96,7 @@ PYBIND11_PLUGIN(core) { .def( "__init__", [](LoDTensor &instance, const std::vector> &lod) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU new (&instance) LoDTensor(lod); #else LoD new_lod; @@ -107,7 +107,7 @@ PYBIND11_PLUGIN(core) { }) .def("set_lod", [](LoDTensor &self, const std::vector> &lod) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU self.set_lod(lod); #else LoD new_lod; @@ -117,7 +117,7 @@ PYBIND11_PLUGIN(core) { #endif }) .def("lod", [](LoDTensor &self) -> std::vector> { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return self.lod(); #else auto lod = self.lod(); @@ -203,7 +203,7 @@ All parameter, weight, gradient are variables in Paddle. .def_static("create", [](paddle::platform::GPUPlace& place) -> paddle::platform::DeviceContext* { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU PADDLE_THROW("GPUPlace is not supported in CPU device."); #else return new paddle::platform::CUDADeviceContext(place); diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 3e3e6bc031..62e85fa54f 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -106,7 +106,7 @@ void PyCPUTensorSetFromArray( std::memcpy(dst, array.data(), sizeof(T) * array.size()); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU template void PyCUDATensorSetFromArray( framework::Tensor &self, diff --git a/paddle/trainer/MergeModel.cpp b/paddle/trainer/MergeModel.cpp index 91d89b61a3..a37d53bc72 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/trainer/MergeModel.cpp @@ -29,7 +29,7 @@ int main(int argc, char** argv) { initMain(argc, argv); initPython(argc, argv); string confFile = TrainerConfigHelper::getConfigNameFromPath(FLAGS_model_dir); -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU FLAGS_use_gpu = false; #endif auto config = std::make_shared(confFile); diff --git a/paddle/trainer/tests/test_Compare.cpp b/paddle/trainer/tests/test_Compare.cpp index e855a8fe2e..b5d29da45a 100644 --- a/paddle/trainer/tests/test_Compare.cpp +++ b/paddle/trainer/tests/test_Compare.cpp @@ -146,7 +146,7 @@ void compareGradient(comData& comDataCpu, comData& comDataGpu) { } int main(int argc, char** argv) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU exit(0); #endif paddle::initMain(argc, argv); diff --git a/paddle/trainer/tests/test_CompareSparse.cpp b/paddle/trainer/tests/test_CompareSparse.cpp index 813275518e..4da9ce20fb 100644 --- a/paddle/trainer/tests/test_CompareSparse.cpp +++ b/paddle/trainer/tests/test_CompareSparse.cpp @@ -174,7 +174,7 @@ TEST(compareSparse, multiGradientMachine) { FLAGS_local = local; FLAGS_ports_num_for_sparse = 5; for (bool useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) continue; #endif FLAGS_parallel_nn = useGpu; @@ -198,7 +198,7 @@ TEST(compareSparse, NeuralNetwork) { FLAGS_local = local; FLAGS_ports_num_for_sparse = 5; for (bool useGpu : {false, true}) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU if (useGpu) continue; #endif FLAGS_parallel_nn = useGpu; diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/trainer/tests/test_Trainer.cpp index 264bc46ebc..f69e1aafee 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/trainer/tests/test_Trainer.cpp @@ -51,7 +51,7 @@ void checkGradientTest(const string& configFile, TEST(checkGradient, cpu) { checkGradientTest(configFile1, false, false); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(checkGradient, gpu) { checkGradientTest(configFile1, true, false); } TEST(checkGradient, multiGpu) { @@ -97,7 +97,7 @@ TEST(checkGradient, hsigmoid) { checkGradientTest(configFile2, false, false); } TEST(checkGradient, chunk) { checkGradientTest(configFile3, false, false); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU checkGradientTest(configFile3, true, true); #endif } diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index 00ba61377a..4c4d124fa9 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -79,7 +79,7 @@ void trainerOnePassTest(const string& configFile, // 1. test trainer (cpu, gpu). TEST(trainerOnePass, cpu) { trainerOnePassTest(configFile1, false, false); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(trainerOnePass, gpu) { trainerOnePassTest(configFile1, true, false); } TEST(trainerOnePass, gpu2) { trainerOnePassTest(configFile1, true, false, 2); } @@ -94,7 +94,7 @@ TEST(trainerOnePass, parallel) { #endif // 2. test average_window. -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(average_window, gpu) { trainerOnePassTest(configFile1, true, false, 4, 0.01); } @@ -266,7 +266,7 @@ TEST(checkRemoteUpdater, cpuTrainerOldUpdater) { checkRemoteParameterUpdaterTest(configFile1, false, false, 1, true); } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST(checkRemoteUpdater, gpuTrainer) { checkRemoteParameterUpdaterTest(configFile1, true, false); } diff --git a/paddle/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/trainer/tests/test_recurrent_machine_generation.cpp index 1322e77178..74b4fed7ed 100644 --- a/paddle/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/trainer/tests/test_recurrent_machine_generation.cpp @@ -113,7 +113,7 @@ void testGeneration(const string& configFile, #ifndef PADDLE_TYPE_DOUBLE TEST(RecurrentGradientMachine, test_generation) { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU const auto useGpuConfs = {false}; #else const auto useGpuConfs = {true, false}; diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index ab1c181c62..32155ded35 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Flags.h" -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU DEFINE_bool(use_gpu, false, "Only support CPU training"); #else DEFINE_bool(use_gpu, true, "Whether to use GPU for training"); diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index 22ce2534d3..904d0f5061 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -218,7 +218,7 @@ protected: * *d2* is peer device to enable direct access to by the d1 device. */ inline void enablePeerAccess(int d1, int d2) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU if (hl_device_can_access_peer(d1, d2)) { SetDevice dev(d1); hl_device_enable_peer_access(d2); diff --git a/paddle/utils/Version.h b/paddle/utils/Version.h index f53d6420bb..611fda83d9 100644 --- a/paddle/utils/Version.h +++ b/paddle/utils/Version.h @@ -48,7 +48,7 @@ void printVersion(std::ostream& os); * @return return true if paddle compiled with GPU */ constexpr bool isWithGpu() { -#ifdef PADDLE_ONLY_CPU +#ifndef PADDLE_WITH_GPU return false; #else return true; From e119177a8c2372335573d27f273c286580db2bd8 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Oct 2017 12:11:34 -0700 Subject: [PATCH 197/342] Use unique_ptr --- paddle/framework/backward.cc | 8 +++--- paddle/framework/backward_test.cc | 20 +++++++-------- paddle/operators/mean_op.cc | 14 +++++------ paddle/operators/minus_op.cc | 25 +++++++++++-------- paddle/operators/pad_op.cc | 20 +++++++-------- paddle/operators/scale_op.cc | 14 +++++------ .../softmax_with_cross_entropy_op.cc | 22 ++++++++-------- paddle/operators/sum_op.cc | 17 +++++++------ 8 files changed, 72 insertions(+), 68 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 22c8c83f13..40390d4150 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -36,9 +36,11 @@ static inline std::unique_ptr CreateGradOp( auto grad_descs = info.grad_op_maker_(op_desc); std::vector> grad_ops; grad_ops.reserve(grad_descs.size()); - std::transform( - grad_descs.begin(), grad_descs.end(), std::back_inserter(grad_ops), - [](OpDescBind& grad_desc) { return OpRegistry::CreateOp(&grad_desc); }); + std::transform(grad_descs.begin(), grad_descs.end(), + std::back_inserter(grad_ops), + [](const std::unique_ptr& grad_desc) { + return OpRegistry::CreateOp(grad_desc.get()); + }); PADDLE_ENFORCE_GT(grad_ops.size(), 0); if (grad_ops.size() == 1) { return std::move(grad_ops[0]); diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index c88e85f8c4..830d0427fa 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -39,13 +39,13 @@ class RowWiseAddGradMaker : public SingleGradOpDescMaker { using SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - OpDescBind Apply() const override { - OpDescBind grad_op; - grad_op.SetInput(GradVarName("Out"), OutputGrad("Out")); - grad_op.SetOutput(GradVarName("X"), InputGrad("X")); - grad_op.SetOutput(GradVarName("b"), InputGrad("b")); - grad_op.SetType("rowwise_add_grad"); - return grad_op; + std::unique_ptr Apply() const override { + auto grad_op = new OpDescBind(); + grad_op->SetInput(GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(GradVarName("X"), InputGrad("X")); + grad_op->SetOutput(GradVarName("b"), InputGrad("b")); + grad_op->SetType("rowwise_add_grad"); + return std::unique_ptr(grad_op); } }; @@ -147,10 +147,8 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: SumOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "the input tensors of sum operator.") - .AsDuplicable() - .NotInGradient(); - AddOutput("Out", "the output tensor of sum operator.").NotInGradient(); + AddInput("X", "the input tensors of sum operator.").AsDuplicable(); + AddOutput("Out", "the output tensor of sum operator."); AddComment(""); } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 0c84cbb5a7..339c089e87 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -57,13 +57,13 @@ class MeanGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - framework::OpDescBind Apply() const override { - framework::OpDescBind grad_op; - grad_op.SetType("mean_grad"); - grad_op.SetInput("X", Input("X")); - grad_op.SetInput(framework::GradVarName("Out"), OutputGrad("Out")); - grad_op.SetOutput(framework::GradVarName("X"), InputGrad("X")); - return grad_op; + std::unique_ptr Apply() const override { + auto* grad_op = new framework::OpDescBind(); + grad_op->SetType("mean_grad"); + grad_op->SetInput("X", Input("X")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 1b3ae9a9a6..aced8636b9 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -69,19 +69,22 @@ class MinusGradMaker : public framework::GradOpDescMakerBase { public: using framework::GradOpDescMakerBase::GradOpDescMakerBase; - std::vector operator()() const override { - std::vector ops; + std::vector> operator()() + const override { + std::vector> ops; ops.resize(2); - ops[0].SetType("scale"); - ops[0].SetInput("X", OutputGrad("Out")); - ops[0].SetOutput("Out", InputGrad("X")); - ops[0].SetAttr("scale", 1.0f); - - ops[1].SetType("scale"); - ops[1].SetInput("X", OutputGrad("Out")); - ops[1].SetOutput("Out", InputGrad("Y")); - ops[1].SetAttr("scale", -1.0f); + ops[0].reset(new framework::OpDescBind()); + ops[0]->SetType("scale"); + ops[0]->SetInput("X", OutputGrad("Out")); + ops[0]->SetOutput("Out", InputGrad("X")); + ops[0]->SetAttr("scale", 1.0f); + + ops[1].reset(new framework::OpDescBind()); + ops[1]->SetType("scale"); + ops[1]->SetInput("X", OutputGrad("Out")); + ops[1]->SetOutput("Out", InputGrad("Y")); + ops[1]->SetAttr("scale", -1.0f); return ops; } }; diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 4bd25fa46a..9445917739 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -111,18 +111,18 @@ class PadOpGrad : public framework::OperatorWithKernel { }; class PadOpGradMaker : public framework::SingleGradOpDescMaker { - protected: - framework::OpDescBind Apply() const override { - framework::OpDescBind bind; - bind.SetInput("X", Input("X")); - bind.SetInput(framework::GradVarName("Out"), OutputGrad("Out")); - bind.SetOutput(framework::GradVarName("X"), InputGrad("X")); - bind.SetAttrMap(Attrs()); - return bind; - } - public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* bind = new framework::OpDescBind(); + bind->SetInput("X", Input("X")); + bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + bind->SetOutput(framework::GradVarName("X"), InputGrad("X")); + bind->SetAttrMap(Attrs()); + return std::unique_ptr(bind); + } }; } // namespace operators diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 40f0960923..e225aecc27 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -57,13 +57,13 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - framework::OpDescBind Apply() const override { - framework::OpDescBind grad_op; - grad_op.SetType("scale"); - grad_op.SetInput("X", OutputGrad("Out")); - grad_op.SetOutput("Out", InputGrad("X")); - grad_op.SetAttr("scale", GetAttr("scale")); - return grad_op; + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDescBind(); + grad_op->SetType("scale"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttr("scale", GetAttr("scale")); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 87dcc3f240..bc9868874d 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -167,17 +167,17 @@ class SoftmaxGradMaker : public framework::SingleGradOpDescMaker { using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; protected: - framework::OpDescBind Apply() const override { - framework::OpDescBind grad_op; - grad_op.SetType("softmax_with_cross_entropy_grad"); - grad_op.SetInput("Label", Input("Label")); - grad_op.SetInput("Softmax", Output("Softmax")); - grad_op.SetInput("Loss", Output("Loss")); - grad_op.SetInput(framework::GradVarName("Softmax"), OutputGrad("Softmax")); - grad_op.SetInput(framework::GradVarName("Loss"), OutputGrad("Loss")); - grad_op.SetOutput(framework::GradVarName("Logits"), InputGrad("Logits")); - grad_op.SetAttrMap(Attrs()); - return grad_op; + std::unique_ptr Apply() const override { + auto* grad_op = new framework::OpDescBind(); + grad_op->SetType("softmax_with_cross_entropy_grad"); + grad_op->SetInput("Label", Input("Label")); + grad_op->SetInput("Softmax", Output("Softmax")); + grad_op->SetInput("Loss", Output("Loss")); + grad_op->SetInput(framework::GradVarName("Softmax"), OutputGrad("Softmax")); + grad_op->SetInput(framework::GradVarName("Loss"), OutputGrad("Loss")); + grad_op->SetOutput(framework::GradVarName("Logits"), InputGrad("Logits")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); } }; diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 5ae13492b3..c701ee8dde 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -60,19 +60,20 @@ class SumGradMaker : public framework::GradOpDescMakerBase { public: using framework::GradOpDescMakerBase::GradOpDescMakerBase; - std::vector operator()() const override { + std::vector> operator()() + const override { auto x_grads = InputGrad("X"); - std::vector grad_ops; + std::vector> grad_ops; grad_ops.reserve(x_grads.size()); auto og = OutputGrad("Out"); std::transform(x_grads.begin(), x_grads.end(), std::back_inserter(grad_ops), [&og](const std::string& x_grad) { - framework::OpDescBind grad_op; - grad_op.SetType("scale"); - grad_op.SetInput("X", og); - grad_op.SetOutput("Out", {x_grad}); - grad_op.SetAttr("scale", 1.0f); - return grad_op; + auto* grad_op = new framework::OpDescBind(); + grad_op->SetType("scale"); + grad_op->SetInput("X", og); + grad_op->SetOutput("Out", {x_grad}); + grad_op->SetAttr("scale", 1.0f); + return std::unique_ptr(grad_op); }); return grad_ops; } From 94855f4af08002253fca10aab4bffc187e5c982f Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Wed, 4 Oct 2017 12:45:56 -0700 Subject: [PATCH 198/342] Fixed changes proposed in the review --- paddle/operators/rmsprop_op.cc | 69 +++++++++++++------ paddle/operators/rmsprop_op.h | 19 +++-- .../v2/framework/tests/test_rmsprop_op.py | 24 ++++--- 3 files changed, 77 insertions(+), 35 deletions(-) diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index 602efab3db..1e06e08ede 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -25,25 +25,32 @@ class RmspropOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContextBase *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of RmspropOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("MeanSquare"), + "Input(MeanSquare) of RmspropOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Grad"), "Input(Grad) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Moment"), "Input(Moment) of RmspropOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), "Output(param_out) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), - "Output(moment_out) of RmspropOp should not be null."); + "Output(Momentum_out) of RmspropOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("MeanSquareOut"), + "Output(MeanSquareOut) of RmspropOp should not be null."); auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("Grad"), "Param and grad input of RmspropOp should have the same dimension."); - PADDLE_ENFORCE_EQ( - param_dim, ctx->GetInputDim("Moment"), - "Param and moment input of RmspropOp should have the same dimension."); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Moment"), + "Param and Momentum input of RmspropOp " + "should have the same dimension."); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("MeanSquare"), + "Param and Momentum input of RmspropOp " + "should have the same dimension."); auto lr_dim = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, @@ -51,6 +58,7 @@ class RmspropOp : public framework::OperatorWithKernel { ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("MomentOut", param_dim); + ctx->SetOutputDim("MeanSquareOut", param_dim); } }; @@ -59,27 +67,46 @@ class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { RmspropOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Param", "Input parameter"); - AddInput("Grad", "Input gradient"); - AddInput("Moment", "Second moment"); - AddInput("LearningRate", "Learning Rate"); - - AddOutput("ParamOut", "Output parameter"); - AddOutput("MomentOut", "Output second moment"); - - AddAttr("epsilon", "Constant for numerical stability"); - AddAttr("decayRate", "Decay rate for moving average of gradients"); + AddInput("Param", + "(Tensor, default Tensor) " + "Input parameter value that has to be updated"); + AddInput("MeanSquare", + "(Tensor, default Tensor)" + " The mean square value that gets updated"); + AddInput("LearningRate", + "(Tensor, default Tensor) " + "The learning rate should be a tensor of size 1"); + AddInput("Grad", + "(Tensor, default Tensor) " + "Input gradient of the parameter"); + AddInput("Moment", + "(Tensor, default Tensor) The moment that gets updated"); + + AddOutput("ParamOut", "(Tensor) Output updated parameter value"); + AddOutput("MomentOut", "(Tensor) Output updated moment"); + AddOutput("MeanSquareOut", "(Tensor) Output Mean squared updated value"); + + AddAttr("epsilon", + "(float, default 1e-10) Constant " + "for numerical stability.") + .SetDefault(1e-10); + AddAttr("decay", + "(float, default 0.9) " + "Discounting factor for coming gradient.") + .SetDefault(0.9); + AddAttr("momentum", "(float, default 0.0) Constant value") + .SetDefault(0.0); AddComment(R"DOC( RMSprop -MomentOut = decayRate * Moment + (1 - decayRate) * Grad * Grad -ParamOut = Param - LearningRate * Grad / (sqrt(MomentOut) + epsilon) +MeanSquareOut = decay * MeanSquare + (1 - decay) * Grad * Grad +MomentOut = momentum * Moment + + LearningRate * Grad / sqrt(MeanSquareOut + epsilon) +ParamOut = Param - MomentOut -The original slide(Slide 29 of +The original slides that proposed RMSprop: Slide 29 of http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) -does not have the epsilon attribute. It is added here for numerical stability -to avoid division by zero. )DOC"); } diff --git a/paddle/operators/rmsprop_op.h b/paddle/operators/rmsprop_op.h index 65b9edd35b..ed4b283ce4 100644 --- a/paddle/operators/rmsprop_op.h +++ b/paddle/operators/rmsprop_op.h @@ -30,23 +30,30 @@ class RmspropOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto param_out = ctx.Output("ParamOut"); auto moment_out = ctx.Output("MomentOut"); + auto mean_square_out = ctx.Output("MeanSquareOut"); param_out->mutable_data(ctx.GetPlace()); moment_out->mutable_data(ctx.GetPlace()); + mean_square_out->mutable_data(ctx.GetPlace()); float epsilon = ctx.Attr("epsilon"); - float decay = ctx.Attr("decayRate"); + float rho = ctx.Attr("decay"); + float momentum = ctx.Attr("momentum"); auto p = EigenVector::Flatten(*ctx.Input("Param")); - auto g = EigenVector::Flatten(*ctx.Input("Grad")); - auto m = EigenVector::Flatten(*ctx.Input("Moment")); + auto ms = EigenVector::Flatten(*ctx.Input("MeanSquare")); float lr = ctx.Input("LearningRate")->data()[0]; + auto g = EigenVector::Flatten(*ctx.Input("Grad")); + auto mom = EigenVector::Flatten(*ctx.Input("Moment")); + auto p_out = EigenVector::Flatten(*param_out); - auto m_out = EigenVector::Flatten(*moment_out); + auto mom_out = EigenVector::Flatten(*moment_out); + auto ms_out = EigenVector::Flatten(*mean_square_out); auto place = ctx.GetEigenDevice(); - m_out.device(place) = decay * m + (1 - decay) * g * g; - p_out.device(place) = p - lr * g / (m_out.sqrt() + epsilon); + ms_out.device(place) = rho * ms + (1 - rho) * g * g; + mom_out.device(place) = momentum * mom + lr * g / (ms_out + epsilon).sqrt(); + p_out.device(place) = p - mom_out; } }; diff --git a/python/paddle/v2/framework/tests/test_rmsprop_op.py b/python/paddle/v2/framework/tests/test_rmsprop_op.py index 64ca5da48e..84bd815c8c 100644 --- a/python/paddle/v2/framework/tests/test_rmsprop_op.py +++ b/python/paddle/v2/framework/tests/test_rmsprop_op.py @@ -8,27 +8,35 @@ class TestRmspropOp(OpTest): self.op_type = "rmsprop" param = np.random.random((123, 321)).astype("float32") + mean_square = np.random.random((123, 321)).astype("float32") + learning_rate = np.array([0.01]).astype("float32") grad = np.random.random((123, 321)).astype("float32") moment = np.zeros((123, 321)).astype("float32") - learning_rate = np.array([0.01]).astype("float32") epsilon = 1e-6 - decay_rate = 0.9 + decay = 0.9 + momentum = 0.0 self.inputs = { 'Param': param, + 'MeanSquare': mean_square, + 'LearningRate': learning_rate, 'Grad': grad, 'Moment': moment, - 'LearningRate': learning_rate } - self.attrs = {'epsilon': epsilon, 'decayRate': decay_rate} + self.attrs = {'epsilon': epsilon, 'decay': decay, 'momentum': momentum} - moment_out = decay_rate * moment + (1 - decay_rate) * grad * grad - param_out = param - learning_rate * grad / (np.sqrt(moment_out) + - epsilon) + ms_out = decay * mean_square + (1 - decay) * grad * grad + moment_out = momentum * moment + \ + learning_rate * grad / np.sqrt(ms_out + epsilon) + param_out = param - moment_out - self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'MeanSquareOut': ms_out + } def test_check_output(self): self.check_output() From fa12e51675dbbc77eef75e5c346f2deecd45b0dc Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Wed, 4 Oct 2017 13:27:40 -0700 Subject: [PATCH 199/342] Adding the default attribute test case --- paddle/operators/rmsprop_op.cc | 6 +-- paddle/operators/rmsprop_op.h | 6 +-- .../v2/framework/tests/test_rmsprop_op.py | 45 ++++++++++++++++++- 3 files changed, 50 insertions(+), 7 deletions(-) diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index 1e06e08ede..8f61c7fdda 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -89,13 +89,13 @@ class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("epsilon", "(float, default 1e-10) Constant " "for numerical stability.") - .SetDefault(1e-10); + .SetDefault(1.0e-10f); AddAttr("decay", "(float, default 0.9) " "Discounting factor for coming gradient.") - .SetDefault(0.9); + .SetDefault(0.9f); AddAttr("momentum", "(float, default 0.0) Constant value") - .SetDefault(0.0); + .SetDefault(0.0f); AddComment(R"DOC( RMSprop diff --git a/paddle/operators/rmsprop_op.h b/paddle/operators/rmsprop_op.h index ed4b283ce4..9c04276ec6 100644 --- a/paddle/operators/rmsprop_op.h +++ b/paddle/operators/rmsprop_op.h @@ -28,9 +28,9 @@ template class RmspropOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto param_out = ctx.Output("ParamOut"); - auto moment_out = ctx.Output("MomentOut"); - auto mean_square_out = ctx.Output("MeanSquareOut"); + auto* param_out = ctx.Output("ParamOut"); + auto* moment_out = ctx.Output("MomentOut"); + auto* mean_square_out = ctx.Output("MeanSquareOut"); param_out->mutable_data(ctx.GetPlace()); moment_out->mutable_data(ctx.GetPlace()); diff --git a/python/paddle/v2/framework/tests/test_rmsprop_op.py b/python/paddle/v2/framework/tests/test_rmsprop_op.py index 84bd815c8c..3e5ff733e9 100644 --- a/python/paddle/v2/framework/tests/test_rmsprop_op.py +++ b/python/paddle/v2/framework/tests/test_rmsprop_op.py @@ -3,7 +3,10 @@ import numpy as np from op_test import OpTest -class TestRmspropOp(OpTest): +class TestRmspropOp1(OpTest): + ''' Test RMSProp with explicit inputs + ''' + def setUp(self): self.op_type = "rmsprop" @@ -42,5 +45,45 @@ class TestRmspropOp(OpTest): self.check_output() +class TestRmspropOp2(OpTest): + '''Test RMSProp with defaukt values for attributes + ''' + + def setUp(self): + self.op_type = "rmsprop" + + param = np.random.random((123, 321)).astype("float32") + mean_square = np.random.random((123, 321)).astype("float32") + learning_rate = np.array([0.01]).astype("float32") + grad = np.random.random((123, 321)).astype("float32") + moment = np.zeros((123, 321)).astype("float32") + + epsilon = 1.0e-10 + decay = 0.9 + momentum = 0.0 + + self.inputs = { + 'Param': param, + 'MeanSquare': mean_square, + 'LearningRate': learning_rate, + 'Grad': grad, + 'Moment': moment, + } + + ms_out = decay * mean_square + (1 - decay) * grad * grad + moment_out = momentum * moment + \ + learning_rate * grad / np.sqrt(ms_out + epsilon) + param_out = param - moment_out + + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'MeanSquareOut': ms_out + } + + def test_check_output(self): + self.check_output() + + if __name__ == "__main__": unittest.main() From a9e298bebef29390b815e431e1a475ab1417015a Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Wed, 4 Oct 2017 13:40:32 -0700 Subject: [PATCH 200/342] fix according to comments --- doc/design/refactor/session.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/design/refactor/session.md b/doc/design/refactor/session.md index 9a7451ece5..1d9a26683c 100644 --- a/doc/design/refactor/session.md +++ b/doc/design/refactor/session.md @@ -86,10 +86,10 @@ Evaluates the target Operations or Variables in `targets`. OP's input as well: ```python - a = pd.constant(1.0, name="a") - b = pd.constant(2.0) + a = pd.constant(2.0, name="a") + b = pd.variable(name="b") c = pd.mul(a,b) - sess.eval(targets=c, feed_dict={"a":3.0}) # returns 6.0 + sess.eval(targets=c, feed_dict={"b":3.0}) # returns 6.0 ``` ```python @@ -107,14 +107,14 @@ session( ) ``` -Creates a new session. One session owns one scope, so creating +Creates a new session. One session owns one global scope, so creating multiple sessions will create different scopes. - *devices*: a single `string` or a list of `string` of device names, the corresponding devices will be the computation devices for `eval()`. If not specified, all available devices (e.g., all GPUs) will be used. The user doesn't need to specify the CPU device since - it will be always used. + it will be always used. Multiple sessions can use the same device. #### Example From 4558807c48035ee1d279d16975d6e2c607cb35f5 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 4 Oct 2017 14:01:34 -0700 Subject: [PATCH 201/342] Use PADDLE_WITH_CUDA instead of PADDLE_WITH_GPU --- paddle/api/Util.cpp | 2 +- paddle/capi/Matrix.cpp | 2 +- paddle/framework/grad_op_builder_test.cc | 2 +- paddle/framework/lod_tensor.h | 4 +-- paddle/framework/op_proto_maker_test.cc | 2 +- paddle/framework/op_registry.h | 2 +- paddle/framework/op_registry_test.cc | 2 +- paddle/framework/operator.cc | 2 +- paddle/framework/tensor_impl.h | 4 +-- paddle/framework/tensor_test.cc | 8 +++--- paddle/function/BlockExpandOp.cpp | 2 +- paddle/function/ContextProjectionOp.cpp | 2 +- paddle/function/CosSimOp.cpp | 2 +- paddle/function/CropOp.cpp | 2 +- paddle/function/CrossMapNormalOp.cpp | 2 +- paddle/function/DepthwiseConvOp.cpp | 2 +- paddle/function/DepthwiseConvOpTest.cpp | 2 +- paddle/function/GemmConvOp.cpp | 2 +- paddle/function/GemmConvOpTest.cpp | 2 +- paddle/function/Im2ColTest.cpp | 2 +- paddle/function/MulOp.cpp | 2 +- paddle/function/PadOp.cpp | 2 +- paddle/function/RowConvOp.cpp | 2 +- paddle/function/SwitchOp.cpp | 2 +- paddle/gserver/layers/BatchNormBaseLayer.cpp | 2 +- .../layers/BatchNormalizationLayer.cpp | 6 ++--- paddle/gserver/layers/PoolLayer.cpp | 4 +-- paddle/gserver/tests/LayerGradUtil.cpp | 2 +- paddle/gserver/tests/test_BatchNorm.cpp | 2 +- paddle/gserver/tests/test_ConvUnify.cpp | 2 +- paddle/gserver/tests/test_DetectionOutput.cpp | 2 +- paddle/gserver/tests/test_Evaluator.cpp | 2 +- paddle/gserver/tests/test_KmaxSeqScore.cpp | 2 +- paddle/gserver/tests/test_LayerGrad.cpp | 26 +++++++++---------- paddle/gserver/tests/test_NetworkCompare.cpp | 2 +- paddle/gserver/tests/test_PriorBox.cpp | 2 +- .../gserver/tests/test_ProtoDataProvider.cpp | 6 ++--- paddle/gserver/tests/test_PyDataProvider.cpp | 4 +-- .../gserver/tests/test_SelectiveFCLayer.cpp | 8 +++--- .../gserver/tests/test_SeqSliceLayerGrad.cpp | 2 +- paddle/gserver/tests/test_WarpCTCLayer.cpp | 2 +- paddle/math/Matrix.cpp | 6 ++--- paddle/math/SparseMatrix.cpp | 2 +- paddle/math/Vector.cpp | 6 ++--- paddle/math/tests/test_Allocator.cpp | 4 +-- paddle/math/tests/test_BaseMatrix.cpp | 2 +- paddle/math/tests/test_CpuGpuVector.cpp | 2 +- paddle/math/tests/test_ExecViaCpu.cpp | 2 +- paddle/math/tests/test_GpuProfiler.cpp | 2 +- paddle/math/tests/test_Matrix.cpp | 2 +- paddle/math/tests/test_SparseMatrix.cpp | 6 ++--- paddle/math/tests/test_TrainingAlgorithm.cpp | 2 +- paddle/math/tests/test_batchTranspose.cpp | 2 +- paddle/math/tests/test_matrixCompare.cpp | 2 +- paddle/math/tests/test_perturbation.cpp | 2 +- .../math/tests/test_sparseMatrixCompare.cpp | 2 +- paddle/memory/detail/buddy_allocator.cc | 2 +- paddle/memory/detail/system_allocator.cc | 2 +- paddle/memory/detail/system_allocator.h | 2 +- paddle/memory/detail/system_allocator_test.cc | 2 +- paddle/memory/memcpy.cc | 2 +- paddle/memory/memcpy.h | 2 +- paddle/memory/memory.cc | 2 +- paddle/memory/memory_test.cc | 2 +- paddle/operators/detail/strided_memcpy.h | 2 +- paddle/operators/math/im2col_test.cc | 4 +-- paddle/operators/math/math_function_test.cc | 2 +- paddle/operators/strided_memcpy_test.cc | 4 +-- paddle/platform/device_context.cc | 2 +- paddle/platform/device_context.h | 4 +-- paddle/platform/enforce.h | 4 +-- paddle/platform/enforce_test.cc | 2 +- paddle/platform/gpu_info.h | 2 +- paddle/platform/variant.h | 2 +- paddle/pserver/test/SocketTest.cpp | 2 +- paddle/pserver/test/test_ProtoServer.cpp | 2 +- paddle/pybind/pybind.cc | 12 ++++----- paddle/pybind/tensor_py.h | 2 +- paddle/string/to_string_test.cc | 2 +- paddle/trainer/MergeModel.cpp | 2 +- paddle/trainer/tests/test_Compare.cpp | 2 +- paddle/trainer/tests/test_CompareSparse.cpp | 4 +-- paddle/trainer/tests/test_Trainer.cpp | 4 +-- paddle/trainer/tests/test_TrainerOnePass.cpp | 6 ++--- .../test_recurrent_machine_generation.cpp | 2 +- paddle/utils/Flags.cpp | 2 +- paddle/utils/Util.h | 2 +- paddle/utils/Version.h | 2 +- 88 files changed, 134 insertions(+), 134 deletions(-) diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index 7446d892fd..11bd05c09d 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -47,7 +47,7 @@ bool isUsingGpu() { return FLAGS_use_gpu; } void setUseGpu(bool useGpu) { FLAGS_use_gpu = useGpu; } bool isGpuVersion() { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA return false; #else return true; diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index 5b3737a759..4547afaf1d 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -46,7 +46,7 @@ paddle_error paddle_matrix_set_row(paddle_matrix mat, if (rowID >= ptr->mat->getHeight()) return kPD_OUT_OF_RANGE; paddle::real* buf = ptr->mat->getRowBuf(rowID); size_t width = ptr->mat->getWidth(); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA hl_memcpy(buf, rowArray, sizeof(paddle::real) * width); #else std::copy(rowArray, rowArray + width, buf); diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 2dbc2e6620..793780ea44 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -183,4 +183,4 @@ TEST(GradOpDescBuilder, IOIgnoredInGradient) { {f::GradVarName("in3_1"), f::GradVarName("in3_2")})); delete forw_op; delete grad_op; -} \ No newline at end of file +} diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index b12c95b6b7..4db36ee766 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -15,7 +15,7 @@ #pragma once #include -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include #include #include @@ -29,7 +29,7 @@ namespace paddle { namespace framework { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA template using Vector = std::vector; #else diff --git a/paddle/framework/op_proto_maker_test.cc b/paddle/framework/op_proto_maker_test.cc index b01e30f753..988a14cf4d 100644 --- a/paddle/framework/op_proto_maker_test.cc +++ b/paddle/framework/op_proto_maker_test.cc @@ -48,4 +48,4 @@ TEST(ProtoMaker, DuplicatedInOut) { paddle::framework::OpAttrChecker op_checker; auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker); ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); -} \ No newline at end of file +} diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index aca6579f36..958cf581f5 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -211,7 +211,7 @@ class OpKernelRegistrar : public Registrar { // TODO(fengjiayi): The following macros // seems ugly, do we have better method? -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA #define USE_OP_KERNEL(op_type) USE_OP_DEVICE_KERNEL(op_type, CPU) #else #define USE_OP_KERNEL(op_type) \ diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index f89f40b444..b860fe6cac 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -183,4 +183,4 @@ class CosineOpComplete : public paddle::framework::CosineOp { TEST(OperatorRegistrar, Test) { using namespace paddle::framework; OperatorRegistrar reg("cos"); -} \ No newline at end of file +} diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 21c1c6f9e6..2ca838f838 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -25,7 +25,7 @@ Eigen::DefaultDevice& ExecutionContext::GetEigenDevice< return *device_context_.GetEigenDevice(); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA template <> Eigen::GpuDevice& ExecutionContext::GetEigenDevice() const { diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 1cde1f74b8..379eac94f9 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -65,7 +65,7 @@ inline T* Tensor::mutable_data(platform::Place place) { holder_.reset(new PlaceholderImpl( boost::get(place), size)); } else if (platform::is_gpu_place(place)) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); } #else @@ -103,7 +103,7 @@ inline void Tensor::CopyFrom(const Tensor& src, memory::Copy(boost::get(dst_place), dst_ptr, boost::get(src_place), src_ptr, size); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA else if (platform::is_gpu_place(src_place) && platform::is_cpu_place(dst_place)) { memory::Copy(boost::get(dst_place), dst_ptr, diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index 86c6945ab5..58cf0fc3cb 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -74,7 +74,7 @@ TEST(Tensor, MutableData) { EXPECT_EQ(p1, p2); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA { Tensor src_tensor; float* p1 = nullptr; @@ -126,7 +126,7 @@ TEST(Tensor, ShareDataWith) { ASSERT_EQ(src_tensor.data(), dst_tensor.data()); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA { Tensor src_tensor; Tensor dst_tensor; @@ -163,7 +163,7 @@ TEST(Tensor, Slice) { EXPECT_EQ(src_data_address + 3 * 4 * 1 * sizeof(int), slice_data_address); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA { Tensor src_tensor; src_tensor.mutable_data(make_ddim({6, 9}), GPUPlace()); @@ -218,7 +218,7 @@ TEST(Tensor, CopyFrom) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA { Tensor src_tensor; Tensor gpu_tensor; diff --git a/paddle/function/BlockExpandOp.cpp b/paddle/function/BlockExpandOp.cpp index ad78f5f584..bd0fe119ce 100644 --- a/paddle/function/BlockExpandOp.cpp +++ b/paddle/function/BlockExpandOp.cpp @@ -194,7 +194,7 @@ public: REGISTER_TYPED_FUNC(BlockExpand, CPU, BlockExpandForward); REGISTER_TYPED_FUNC(BlockExpandGrad, CPU, BlockExpandBackward); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(BlockExpand, GPU, BlockExpandForward); REGISTER_TYPED_FUNC(BlockExpandGrad, GPU, BlockExpandBackward); #endif diff --git a/paddle/function/ContextProjectionOp.cpp b/paddle/function/ContextProjectionOp.cpp index ab18c39df8..23916c0f4b 100644 --- a/paddle/function/ContextProjectionOp.cpp +++ b/paddle/function/ContextProjectionOp.cpp @@ -395,7 +395,7 @@ REGISTER_TYPED_FUNC(ContextProjectionForward, REGISTER_TYPED_FUNC(ContextProjectionBackward, CPU, ContextProjectionBackwardFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(ContextProjectionForward, GPU, ContextProjectionForwardFunc); diff --git a/paddle/function/CosSimOp.cpp b/paddle/function/CosSimOp.cpp index 4418f144d3..2e5c281f37 100644 --- a/paddle/function/CosSimOp.cpp +++ b/paddle/function/CosSimOp.cpp @@ -233,7 +233,7 @@ private: REGISTER_TYPED_FUNC(CosSimForward, CPU, CosSimForwardFunc); REGISTER_TYPED_FUNC(CosSimBackward, CPU, CosSimBackwardFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(CosSimForward, GPU, CosSimForwardFunc); REGISTER_TYPED_FUNC(CosSimBackward, GPU, CosSimBackwardFunc); #endif diff --git a/paddle/function/CropOp.cpp b/paddle/function/CropOp.cpp index 39504cc2c1..46f98f12c1 100644 --- a/paddle/function/CropOp.cpp +++ b/paddle/function/CropOp.cpp @@ -169,7 +169,7 @@ private: REGISTER_TYPED_FUNC(Crop, CPU, CropFunc); REGISTER_TYPED_FUNC(CropGrad, CPU, CropGradFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(Crop, GPU, CropFunc); REGISTER_TYPED_FUNC(CropGrad, GPU, CropGradFunc); #endif diff --git a/paddle/function/CrossMapNormalOp.cpp b/paddle/function/CrossMapNormalOp.cpp index 1cf0918bed..9e88669d37 100644 --- a/paddle/function/CrossMapNormalOp.cpp +++ b/paddle/function/CrossMapNormalOp.cpp @@ -336,7 +336,7 @@ private: REGISTER_TYPED_FUNC(CrossMapNormal, CPU, CrossMapNormalFunc); REGISTER_TYPED_FUNC(CrossMapNormalGrad, CPU, CrossMapNormalGradFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(CrossMapNormal, GPU, CrossMapNormalFunc); REGISTER_TYPED_FUNC(CrossMapNormalGrad, GPU, CrossMapNormalGradFunc); #endif diff --git a/paddle/function/DepthwiseConvOp.cpp b/paddle/function/DepthwiseConvOp.cpp index 7656ab3d0a..9863e3ae1d 100644 --- a/paddle/function/DepthwiseConvOp.cpp +++ b/paddle/function/DepthwiseConvOp.cpp @@ -292,7 +292,7 @@ REGISTER_TYPED_FUNC(DepthwiseConvGradInput, REGISTER_TYPED_FUNC(DepthwiseConvGradFilter, CPU, DepthwiseConvGradFilterFunction); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(DepthwiseConv, GPU, DepthwiseConvFunction); REGISTER_TYPED_FUNC(DepthwiseConvGradInput, GPU, diff --git a/paddle/function/DepthwiseConvOpTest.cpp b/paddle/function/DepthwiseConvOpTest.cpp index 39033ecb2b..b1a90da7db 100644 --- a/paddle/function/DepthwiseConvOpTest.cpp +++ b/paddle/function/DepthwiseConvOpTest.cpp @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(DepthwiseConv, Forward) { DepthwiseConvolution( "GemmConv-CPU", "DepthwiseConv-GPU", forward); diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp index 68e08c1480..bdb56ddac3 100644 --- a/paddle/function/GemmConvOp.cpp +++ b/paddle/function/GemmConvOp.cpp @@ -340,7 +340,7 @@ public: REGISTER_TYPED_FUNC(GemmConv, CPU, GemmConvFunction); REGISTER_TYPED_FUNC(GemmConvGradInput, CPU, GemmConvGradInputFunction); REGISTER_TYPED_FUNC(GemmConvGradFilter, CPU, GemmConvGradFilterFunction); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(GemmConv, GPU, GemmConvFunction); REGISTER_TYPED_FUNC(GemmConvGradInput, GPU, GemmConvGradInputFunction); REGISTER_TYPED_FUNC(GemmConvGradFilter, GPU, GemmConvGradFilterFunction); diff --git a/paddle/function/GemmConvOpTest.cpp b/paddle/function/GemmConvOpTest.cpp index bd1cf3c6a4..b5b5e1f35b 100644 --- a/paddle/function/GemmConvOpTest.cpp +++ b/paddle/function/GemmConvOpTest.cpp @@ -24,7 +24,7 @@ TEST(GemmConv, NaiveConv) { "NaiveConv-CPU", "GemmConv-CPU", forward); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(GemmConv, Forward) { Convolution( "GemmConv-CPU", "GemmConv-GPU", forward); diff --git a/paddle/function/Im2ColTest.cpp b/paddle/function/Im2ColTest.cpp index 55325e94b5..a0a01a5fc7 100644 --- a/paddle/function/Im2ColTest.cpp +++ b/paddle/function/Im2ColTest.cpp @@ -116,7 +116,7 @@ void TestIm2ColFunctor() { TEST(Im2ColFunctor, CPU) { TestIm2ColFunctor(); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(Im2ColFunctor, GPU) { TestIm2ColFunctor(); } diff --git a/paddle/function/MulOp.cpp b/paddle/function/MulOp.cpp index 655026320c..704a8c4132 100644 --- a/paddle/function/MulOp.cpp +++ b/paddle/function/MulOp.cpp @@ -341,7 +341,7 @@ private: }; REGISTER_TYPED_FUNC(MulOp, CPU, MulFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(MulOp, GPU, MulFunc); #endif } // namespace paddle diff --git a/paddle/function/PadOp.cpp b/paddle/function/PadOp.cpp index 24c9bf4e72..eed2f2e308 100644 --- a/paddle/function/PadOp.cpp +++ b/paddle/function/PadOp.cpp @@ -207,7 +207,7 @@ private: REGISTER_TYPED_FUNC(Pad, CPU, PadFunc); REGISTER_TYPED_FUNC(PadGrad, CPU, PadGradFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(Pad, GPU, PadFunc); REGISTER_TYPED_FUNC(PadGrad, GPU, PadGradFunc); #endif diff --git a/paddle/function/RowConvOp.cpp b/paddle/function/RowConvOp.cpp index 09e702f71a..7c802d6627 100644 --- a/paddle/function/RowConvOp.cpp +++ b/paddle/function/RowConvOp.cpp @@ -217,7 +217,7 @@ public: REGISTER_TYPED_FUNC(RowConv, CPU, RowConvFunc); REGISTER_TYPED_FUNC(RowConvGrad, CPU, RowConvGradFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(RowConv, GPU, RowConvFunc); REGISTER_TYPED_FUNC(RowConvGrad, GPU, RowConvGradFunc); #endif diff --git a/paddle/function/SwitchOp.cpp b/paddle/function/SwitchOp.cpp index db839b5b76..597723a2dd 100644 --- a/paddle/function/SwitchOp.cpp +++ b/paddle/function/SwitchOp.cpp @@ -132,7 +132,7 @@ public: REGISTER_TYPED_FUNC(NCHW2NHWC, CPU, NCHW2NHWCFunc); REGISTER_TYPED_FUNC(NHWC2NCHW, CPU, NHWC2NCHWFunc); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA REGISTER_TYPED_FUNC(NCHW2NHWC, GPU, NCHW2NHWCFunc); REGISTER_TYPED_FUNC(NHWC2NCHW, GPU, NHWC2NCHWFunc); #endif diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/gserver/layers/BatchNormBaseLayer.cpp index 55f52816ab..bc7d1c83a4 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/gserver/layers/BatchNormBaseLayer.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "BatchNormalizationLayer.h" #include "Layer.h" #include "paddle/utils/Stat.h" -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include "CudnnBatchNormLayer.h" #endif diff --git a/paddle/gserver/layers/BatchNormalizationLayer.cpp b/paddle/gserver/layers/BatchNormalizationLayer.cpp index 33cf24431d..dacff25e59 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.cpp +++ b/paddle/gserver/layers/BatchNormalizationLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/utils/Stat.h" -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include "hl_batch_transpose.h" #endif #include "BatchNormalizationLayer.h" @@ -90,7 +90,7 @@ void BatchNormalizationLayer::expandMat(const MatrixPtr& in, MatrixPtr& out) { size_t batchSize = in->getHeight(); CHECK_EQ(out->getHeight(), batchSize * imgPixels_); if (useGpu_) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA LOG(FATAL) << "paddle is compiled only for cpu"; #else batchTranspose( @@ -127,7 +127,7 @@ void BatchNormalizationLayer::shrinkMat(const MatrixPtr& in, MatrixPtr& out) { } CHECK_EQ(in->getHeight(), static_cast(batchSize * imgPixels_)); if (useGpu_) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA LOG(FATAL) << "paddle is compiled only for cpu"; #else batchTranspose( diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index 43ab4e4d47..7b932d5a76 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "PoolLayer.h" #include "PoolProjectionLayer.h" #include "paddle/utils/Logging.h" -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include "CudnnPoolLayer.h" #endif namespace paddle { @@ -53,7 +53,7 @@ Layer* PoolLayer::create(const LayerConfig& config) { const std::string& pool = config.inputs(0).pool_conf().pool_type(); if (pool == "max-projection" || pool == "avg-projection") { return new PoolProjectionLayer(config); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA } else if (CudnnPoolLayer::typeCheck(pool)) { return new CudnnPoolLayer(config); #endif diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index 59df057a80..cd957c7c0b 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -674,7 +674,7 @@ void testLayerGradKernel(TestConfig testConf, bool useGpu, bool useWeight, float epsilon) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) return; #endif FLAGS_use_gpu = useGpu; diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index c1c85f8fac..050fde9d0a 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -119,7 +119,7 @@ TEST(Layer, batchNorm) { CHECK_EQ(static_cast(convLayer->getOutputValue()->getWidth()), 576); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA void batchNormInference(int n, int c, int h, int w) { MatrixPtr input = std::make_shared(n, c * h * w); MatrixPtr cudnnOut = std::make_shared(n, c * h * w); diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index 16556469cb..ffcc47e2a8 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -117,7 +117,7 @@ MatrixPtr doOneConvTest(size_t imgSize, } TEST(Layer, convParaUnified) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA MatrixPtr input, resultCpu, resultGpu; /// TEST1 for conv /// diff --git a/paddle/gserver/tests/test_DetectionOutput.cpp b/paddle/gserver/tests/test_DetectionOutput.cpp index 1a83f48fae..dc39c97a87 100644 --- a/paddle/gserver/tests/test_DetectionOutput.cpp +++ b/paddle/gserver/tests/test_DetectionOutput.cpp @@ -150,7 +150,7 @@ TEST(Layer, detectionOutputLayerFwd) { useGpu, result2); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA // GPU case 1. useGpu = true; inputLoc = Matrix::create(1, 16, false, useGpu); diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index 42bb570572..62a131171f 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -51,7 +51,7 @@ void testEvaluator(TestConfig testConf, string testEvaluatorName, size_t batchSize, bool useGpu) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) return; #endif FLAGS_use_gpu = useGpu; diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp index 1594de8502..6386259882 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -97,7 +97,7 @@ TEST(Layer, kmaxSeqScoreLayer) { Matrix::create(subSeqStartPosition.back(), 1, false, false); std::vector mode = {false}; -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA mode.push_back(true); #endif diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index e887dee5f9..90a3352898 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include #endif #include @@ -258,7 +258,7 @@ void testProjectionConv(size_t groups, bool isDeconv) { true); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(Projection, conv) { /// test ConvProjection testProjectionConv(1, false); @@ -422,7 +422,7 @@ TEST(Layer, depthwiseConvLayer) { // 'depthwise_conv' is a sepecial case of 'exconv' whose // groups size equals to the input channels size. testDepthwiseConvLayer("exconv", /* useGpu= */ false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testDepthwiseConvLayer("exconv", /* useGpu= */ true); #endif } @@ -480,7 +480,7 @@ void testConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, convLayer) { testConvLayer("exconv", /* trans= */ false, /* useGpu= */ false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testConvLayer("exconv", /* trans= */ false, /* useGpu= */ true); testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */ true); #endif @@ -525,7 +525,7 @@ TEST(Layer, convTransLayer) { for (auto useGpu : {false, true}) { testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testConvTransLayer("cudnn_convt", /* trans= */ false, /* useGpu= */ true); #endif } @@ -638,7 +638,7 @@ TEST(Layer, SelectiveFullyConnectedLayer) { /* trans= */ false, /* useGup= */ false, false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testLayerGrad(config, "selective_fc", 100, @@ -1210,7 +1210,7 @@ void testPoolLayer(const string& poolType, bool trans, bool useGpu) { testLayerGrad(config, "pool", 100, trans, useGpu); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA void testPoolLayer2(const string& poolType, bool trans, bool useGpu) { TestConfig config; config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0}); @@ -1236,7 +1236,7 @@ TEST(Layer, PoolLayer) { testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ false); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testPoolLayer("avg-projection", /* trans= */ false, /* useGpu= */ true); testPoolLayer("max-projection", /* trans= */ false, /* useGpu= */ true); testPoolLayer("cudnn-max-pool", /* trans= */ false, /* useGpu= */ true); @@ -1309,7 +1309,7 @@ void testPool3DLayer(const string& poolType, bool trans, bool useGpu) { TEST(Layer, Pool3DLayer) { testPool3DLayer("avg", /* trans= */ false, /* useGpu= */ false); testPool3DLayer("max", /* trans= */ false, /* useGpu= */ false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testPool3DLayer("avg", /* trans= */ false, /* useGpu= */ true); testPool3DLayer("max", /* trans= */ false, /* useGpu= */ true); #endif @@ -1695,7 +1695,7 @@ void testBatchNormLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, BatchNormalizationLayer) { testBatchNormLayer("batch_norm", false, false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testBatchNormLayer("batch_norm", false, true); if (hl_get_cudnn_lib_version() >= int(4000)) { testBatchNormLayer("cudnn_batch_norm", false, true); @@ -1744,7 +1744,7 @@ void testBatchNorm3DLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, testBatchNorm3DLayer) { testBatchNorm3DLayer("batch_norm", false, false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testBatchNorm3DLayer("batch_norm", false, true); if (hl_get_cudnn_lib_version() >= int(4000)) { testBatchNorm3DLayer("cudnn_batch_norm", false, true); @@ -2262,7 +2262,7 @@ void test3DConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, test3DConvLayer) { test3DConvLayer("conv3d", /* trans= */ false, /* useGpu= */ false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA test3DConvLayer("conv3d", /* trans= */ false, /* useGpu= */ true); #endif } @@ -2339,7 +2339,7 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, test3DDeConvLayer) { test3DDeConvLayer("deconv3d", /* trans= */ false, /* useGpu= */ false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA test3DDeConvLayer("deconv3d", /* trans= */ false, /* useGpu= */ true); #endif } diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index e322fef9a4..2b92211936 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -243,7 +243,7 @@ TEST(Compare, concat_slice) { compareNetwork(config_file_a, config_file_b); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(Compare, img_pool) { std::string config_file_a = "./gserver/tests/img_pool_a.conf"; std::string config_file_b = "./gserver/tests/img_pool_b.conf"; diff --git a/paddle/gserver/tests/test_PriorBox.cpp b/paddle/gserver/tests/test_PriorBox.cpp index cbc0fff7b8..8dc5568784 100644 --- a/paddle/gserver/tests/test_PriorBox.cpp +++ b/paddle/gserver/tests/test_PriorBox.cpp @@ -151,7 +151,7 @@ TEST(Layer, priorBoxLayerFwd) { useGpu, result); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA // reset the input parameters variance[1] = 0.1; variance[3] = 0.2; diff --git a/paddle/gserver/tests/test_ProtoDataProvider.cpp b/paddle/gserver/tests/test_ProtoDataProvider.cpp index 988dbc2513..af6472619d 100644 --- a/paddle/gserver/tests/test_ProtoDataProvider.cpp +++ b/paddle/gserver/tests/test_ProtoDataProvider.cpp @@ -485,7 +485,7 @@ TEST(ProtoDataProvider, test) { // Currently in async mode, useGpu is not supported continue; } -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) { continue; } @@ -525,7 +525,7 @@ TEST(ProtoDataProvider, constant_slots) { for (int numConstantSlots : {1, 2}) { for (int useGpu : numTwoArray) { for (int dataCompression : numTwoArray) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) { continue; } @@ -708,7 +708,7 @@ TEST(ProtoSequenceDataProvider, test) { // Currently in async mode, useGpu is not supported continue; } -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) { continue; } diff --git a/paddle/gserver/tests/test_PyDataProvider.cpp b/paddle/gserver/tests/test_PyDataProvider.cpp index f6522febf8..fe54799259 100644 --- a/paddle/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/gserver/tests/test_PyDataProvider.cpp @@ -37,7 +37,7 @@ TEST(PyDataProvider, py_fill_slots) { config.clear_files(); std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA bool useGpu = false; #else bool useGpu = true; @@ -71,7 +71,7 @@ TEST(PyDataProvider, py_fill_nest_slots) { std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); EXPECT_EQ(config.IsInitialized(), true); -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA bool useGpu = false; #else bool useGpu = true; diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index b25d32fb2c..4c87fe1bba 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -321,7 +321,7 @@ TEST(Layer, SelectiveFcLayer_train_dense_mul) { "filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; for (auto useGpu : {false, true}) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) { break; } @@ -388,7 +388,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, outMatSelfc->getWidth(), outMatSelfc->getElementCnt())); cpuOutMatSelfc->copyFrom(*outMatSelfc, HPPL_STREAM_DEFAULT); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA if (useGpu) { hl_stream_synchronize(HPPL_STREAM_DEFAULT); } @@ -418,7 +418,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, MatrixPtr cpuOutMatFc( new CpuMatrix(outMatFc->getHeight(), outMatFc->getWidth())); cpuOutMatFc->copyFrom(*outMatFc, HPPL_STREAM_DEFAULT); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA if (useGpu) { hl_stream_synchronize(HPPL_STREAM_DEFAULT); } @@ -443,7 +443,7 @@ TEST(Layer, SelectiveFcLayer_train_sparse_mul) { selLayerConfig.set_size(fcLayerWidth); testSelectiveFcLayerTrainSparseMul(selLayerConfig, false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testSelectiveFcLayerTrainSparseMul(selLayerConfig, true); #endif } diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp index f28149081b..3366002ca1 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -195,7 +195,7 @@ TEST(Layer, SeqSliceLayer) { vector> ends; std::vector mode = {false}; -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA mode.push_back(true); #endif genSeqInfo(seqStartPos, subSeqStartPos); diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index ae5b64257f..da82946006 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -199,7 +199,7 @@ TEST(Layer, WarpCTCLayer) { for (auto batchSize : {1, 10, 32}) { for (auto normByTimes : {false, true}) { for (auto useGpu : {false, true}) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) continue; #endif LOG(INFO) << "layerSize=" << layerSize << " batchSize=" << batchSize diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index de02f9c0d5..c3e34d5309 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -670,7 +670,7 @@ void GpuMatrix::leftMul(Matrix& a, real scaleAB, real scaleT) { } void GpuMatrix::selectRows(Matrix& table, IVector& ids) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA CHECK(dynamic_cast(&table)); CHECK(table.useGpu()); CHECK(ids.useGpu()); @@ -694,7 +694,7 @@ void GpuMatrix::selectRows(Matrix& table, IVector& ids) { } void GpuMatrix::addToRows(Matrix& table, IVector& ids) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA CHECK(dynamic_cast(&table)); CHECK(table.useGpu()); CHECK(ids.useGpu()); @@ -741,7 +741,7 @@ void GpuMatrix::rowMax(Matrix& max) { } void GpuMatrix::rowMax(IVector& maxIds, Matrix& maxVal) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA CHECK(maxIds.useGpu() && maxVal.useGpu()) << "Matrix type are not equal"; size_t numSamples = getHeight(); size_t beam = maxVal.getWidth(); diff --git a/paddle/math/SparseMatrix.cpp b/paddle/math/SparseMatrix.cpp index 1f31082ae8..284b68d590 100644 --- a/paddle/math/SparseMatrix.cpp +++ b/paddle/math/SparseMatrix.cpp @@ -836,7 +836,7 @@ void GpuSparseMatrix::zeroMem() { } void GpuSparseMatrix::rowMax(IVector& maxIds, Matrix& maxVal) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA CHECK(maxIds.useGpu() && maxVal.useGpu()) << "Matrix type are not equal"; size_t numSamples = getHeight(); size_t beam = maxVal.getWidth(); diff --git a/paddle/math/Vector.cpp b/paddle/math/Vector.cpp index 54e57b255d..ff72672e3a 100644 --- a/paddle/math/Vector.cpp +++ b/paddle/math/Vector.cpp @@ -172,7 +172,7 @@ void GpuVectorT::isEqualTo(const VectorT& b, const T& value) { template void GpuVectorT::selectFrom(const VectorT& src, const VectorT& ids) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA hl_vector_select_from(this->getData(), this->getSize(), src.getData(), @@ -850,7 +850,7 @@ CpuGpuVectorT::CpuGpuVectorT(CpuGpuVectorT& src, size_t size) : sync_(nullptr) { CHECK_LE(offset + size, static_cast(src.getSize())); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA SyncedFlag* flag = src.getSync(); if (*flag == DATA_AT_CPU) { src.copyToGpu(); // will set synchronous data between CPU and GPU @@ -861,7 +861,7 @@ CpuGpuVectorT::CpuGpuVectorT(CpuGpuVectorT& src, auto cMemHandle = (src.getVector(false))->getMemoryHandle(); cpuVectorT_ = std::make_shared>( size, std::dynamic_pointer_cast(cMemHandle), offset); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA auto gMemHandle = (src.getVector(true))->getMemoryHandle(); gpuVectorT_ = std::make_shared>( size, std::dynamic_pointer_cast(gMemHandle), offset); diff --git a/paddle/math/tests/test_Allocator.cpp b/paddle/math/tests/test_Allocator.cpp index cf2f66aea1..1fecf659e5 100644 --- a/paddle/math/tests/test_Allocator.cpp +++ b/paddle/math/tests/test_Allocator.cpp @@ -68,7 +68,7 @@ void testPoolAllocator() { TEST(Allocator, Pool) { testPoolAllocator(); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testPoolAllocator(); #endif } @@ -92,7 +92,7 @@ TEST(MemoryHandle, Cpu) { EXPECT_EQ(ptr1, ptr2); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(MemoryHandle, Gpu) { int numGpu = hl_get_device_count(); diff --git a/paddle/math/tests/test_BaseMatrix.cpp b/paddle/math/tests/test_BaseMatrix.cpp index 730759f3db..1766257860 100644 --- a/paddle/math/tests/test_BaseMatrix.cpp +++ b/paddle/math/tests/test_BaseMatrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA /** * This test file use autotest::AutoCompare and cmpWithoutArg to compares the * implementation of CPU and GPU member function in diff --git a/paddle/math/tests/test_CpuGpuVector.cpp b/paddle/math/tests/test_CpuGpuVector.cpp index ccb4a902b0..c72f89c824 100644 --- a/paddle/math/tests/test_CpuGpuVector.cpp +++ b/paddle/math/tests/test_CpuGpuVector.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include #include "paddle/math/Vector.h" diff --git a/paddle/math/tests/test_ExecViaCpu.cpp b/paddle/math/tests/test_ExecViaCpu.cpp index 2d439cd060..25e0ba11de 100644 --- a/paddle/math/tests/test_ExecViaCpu.cpp +++ b/paddle/math/tests/test_ExecViaCpu.cpp @@ -94,7 +94,7 @@ void testWrapper(F&& f) { } } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(ExecViaCpu, test1) { testWrapper(f); testWrapper(&f); diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index 6dab187e3e..9402bd3ec4 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include #include "paddle/math/Matrix.h" diff --git a/paddle/math/tests/test_Matrix.cpp b/paddle/math/tests/test_Matrix.cpp index 7a145eae6a..2f99fa3581 100644 --- a/paddle/math/tests/test_Matrix.cpp +++ b/paddle/math/tests/test_Matrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA /** * This test file use autotest::AutoCompare and cmpWithArg to compares the * implementation of CPU and GPU member function in Matrix.cpp. diff --git a/paddle/math/tests/test_SparseMatrix.cpp b/paddle/math/tests/test_SparseMatrix.cpp index 8151dde106..8abbe8d82e 100644 --- a/paddle/math/tests/test_SparseMatrix.cpp +++ b/paddle/math/tests/test_SparseMatrix.cpp @@ -47,7 +47,7 @@ struct MatrixPara { SparseFormat format; }; -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA void test_sparse_matrix_mul(MatrixPara paraA, MatrixPara paraB, MatrixPara paraC) { @@ -452,7 +452,7 @@ TEST(Matrix, SparseMatrixCSRFormatTrimFrom) { matB->trimFrom(*mat); checkSMatrixEqual2(matA, matB); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA GpuSparseMatrixPtr matC = std::make_shared( height, trimedWidth, height, FLOAT_VALUE, SPARSE_CSR, true); matC->trimFrom(*mat); @@ -546,7 +546,7 @@ TEST(Matrix, SparseMatrixCSCFormatTrimFrom) { matB->trimFrom(*mat); checkSMatrixEqual2(matA, matB); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA GpuSparseMatrixPtr matC = std::make_shared( height, trimedWidth, height, FLOAT_VALUE, SPARSE_CSC, true); matC->trimFrom(*mat); diff --git a/paddle/math/tests/test_TrainingAlgorithm.cpp b/paddle/math/tests/test_TrainingAlgorithm.cpp index 36ac024007..5ae0aa036f 100644 --- a/paddle/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/math/tests/test_TrainingAlgorithm.cpp @@ -91,7 +91,7 @@ int VectorCheckErr(const VectorPtr& vector1, const VectorPtr& vector2) { typedef std::function testMatrixFunc; void testCase(testMatrixFunc matrixFunc) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA for (auto useGpu : {false, true}) { #else for (auto useGpu : {false}) { diff --git a/paddle/math/tests/test_batchTranspose.cpp b/paddle/math/tests/test_batchTranspose.cpp index 0189e534eb..b70a619764 100644 --- a/paddle/math/tests/test_batchTranspose.cpp +++ b/paddle/math/tests/test_batchTranspose.cpp @@ -17,7 +17,7 @@ limitations under the License. */ using namespace paddle; // NOLINT -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(MatrixBatchTransTest, test_batch_matrix_transpose) { const int nx = 100; const int ny = 50; diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 7735877ac8..7e5a1db44a 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA /// This unittest checks GpuMatrix/CpuMatrix get same result, so disable when /// only cpu version. diff --git a/paddle/math/tests/test_perturbation.cpp b/paddle/math/tests/test_perturbation.cpp index dff18136ae..c7c07c817a 100644 --- a/paddle/math/tests/test_perturbation.cpp +++ b/paddle/math/tests/test_perturbation.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include #include diff --git a/paddle/math/tests/test_sparseMatrixCompare.cpp b/paddle/math/tests/test_sparseMatrixCompare.cpp index e39cc0a2f6..2b2a391b9d 100644 --- a/paddle/math/tests/test_sparseMatrixCompare.cpp +++ b/paddle/math/tests/test_sparseMatrixCompare.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA /// This unittest checks GpuSparseMatrix/CpuSparseMatrix get same result, // so disable when /// only cpu version. diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc index ed0c3374ff..fdc5ed19dc 100644 --- a/paddle/memory/detail/buddy_allocator.cc +++ b/paddle/memory/detail/buddy_allocator.cc @@ -175,7 +175,7 @@ void* BuddyAllocator::SystemAlloc(size_t size) { } BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA if (system_allocator_->UseGpu()) { if ((total_used_ + total_free_) == 0) { // Compute the maximum allocation size for the first allocation. diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index 64f8182b5c..6c9a46dd09 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -62,7 +62,7 @@ void CPUAllocator::Free(void* p, size_t size, size_t index) { bool CPUAllocator::UseGpu() const { return false; } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA void* GPUAllocator::Alloc(size_t& index, size_t size) { // CUDA documentation doesn't explain if cudaMalloc returns nullptr diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h index 6b1f40347b..ee9b012f91 100644 --- a/paddle/memory/detail/system_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -40,7 +40,7 @@ class CPUAllocator : public SystemAllocator { virtual bool UseGpu() const; }; -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA class GPUAllocator : public SystemAllocator { public: virtual void* Alloc(size_t& index, size_t size); diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index 57d5443d50..cd563844e7 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -56,7 +56,7 @@ TEST(CPUAllocator, LockMem) { TestAllocator(a, 0); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(GPUAllocator, Alloc) { paddle::memory::detail::GPUAllocator a; TestAllocator(a, 2048); diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc index 184d0f8fa7..790420a8ab 100644 --- a/paddle/memory/memcpy.cc +++ b/paddle/memory/memcpy.cc @@ -26,7 +26,7 @@ void Copy(platform::CPUPlace, void* dst, std::memcpy(dst, src, num); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA template <> void Copy(platform::CPUPlace dst_place, void* dst, diff --git a/paddle/memory/memcpy.h b/paddle/memory/memcpy.h index 7142831d43..0bccee58c3 100644 --- a/paddle/memory/memcpy.h +++ b/paddle/memory/memcpy.h @@ -33,7 +33,7 @@ namespace memory { template void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA /** * \brief Copy memory from one place to another place. diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 6d5a74dafe..355b6218d0 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -62,7 +62,7 @@ size_t Used(platform::CPUPlace place) { return GetCPUBuddyAllocator()->Used(); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { using BuddyAllocVec = std::vector; diff --git a/paddle/memory/memory_test.cc b/paddle/memory/memory_test.cc index 7a617f04dc..0d402038a0 100644 --- a/paddle/memory/memory_test.cc +++ b/paddle/memory/memory_test.cc @@ -80,7 +80,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { } } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA size_t align(size_t size, paddle::platform::GPUPlace place) { size += sizeof(paddle::memory::detail::Metadata); diff --git a/paddle/operators/detail/strided_memcpy.h b/paddle/operators/detail/strided_memcpy.h index 9f05a26322..068c82f399 100644 --- a/paddle/operators/detail/strided_memcpy.h +++ b/paddle/operators/detail/strided_memcpy.h @@ -34,7 +34,7 @@ struct StridedMemcpyFunctor { auto& cpu_place = boost::get(place); memory::Copy(cpu_place, dst, cpu_place, src, sizeof(T) * dst_dim.head); } else { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA auto& gpu_place = boost::get(place); auto& cuda_ctx = reinterpret_cast(dev_ctx); diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc index 3d040ca2b5..40bdbfe733 100644 --- a/paddle/operators/math/im2col_test.cc +++ b/paddle/operators/math/im2col_test.cc @@ -71,7 +71,7 @@ void testIm2col() { context = new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); } else { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA context = new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); #else @@ -116,7 +116,7 @@ void testIm2col() { TEST(math, im2col) { testIm2col(); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA testIm2col(); #endif } diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index 2252268620..9945ba101d 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -1,7 +1,7 @@ #include "paddle/operators/math/math_function.h" #include "gtest/gtest.h" -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(math_function, notrans_mul_trans) { paddle::framework::Tensor input1; paddle::framework::Tensor input1_gpu; diff --git a/paddle/operators/strided_memcpy_test.cc b/paddle/operators/strided_memcpy_test.cc index e0dd7b19f1..68f064eaee 100644 --- a/paddle/operators/strided_memcpy_test.cc +++ b/paddle/operators/strided_memcpy_test.cc @@ -72,7 +72,7 @@ TEST(StridedMemcpy, CPUConcat) { } } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(StridedMemcpy, GPUCrop) { // clang-format off int src[] = { @@ -157,4 +157,4 @@ TEST(StridedMemcpy, GPUConcat) { #endif } // namespace operators -} // namespace paddle \ No newline at end of file +} // namespace paddle diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index 8dcc357a16..a9b6b79903 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -35,7 +35,7 @@ Eigen::DefaultDevice* CPUDeviceContext::eigen_device() const { Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA template <> Eigen::GpuDevice* diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index c1c4c7f760..ef5f19214d 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cudnn.h" #include "paddle/platform/gpu_info.h" @@ -61,7 +61,7 @@ class CPUDeviceContext : public DeviceContext { std::unique_ptr eigen_device_; }; -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA template <> struct EigenDeviceConverter { using EigenDeviceType = Eigen::GpuDevice; diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index f9fe521d50..15d8446cd8 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -29,7 +29,7 @@ limitations under the License. */ #include // for __cxa_demangle #endif -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include "paddle/platform/dynload/cublas.h" #include "paddle/platform/dynload/cudnn.h" @@ -113,7 +113,7 @@ inline typename std::enable_if::type throw_on_error( } } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA template inline typename std::enable_if::type throw_on_error( diff --git a/paddle/platform/enforce_test.cc b/paddle/platform/enforce_test.cc index 80bdee3d9d..8206a055ea 100644 --- a/paddle/platform/enforce_test.cc +++ b/paddle/platform/enforce_test.cc @@ -213,4 +213,4 @@ TEST(ENFORCE_USER_DEFINED_CLASS, EQ) { TEST(ENFORCE_USER_DEFINED_CLASS, NE) { Dims a{{1, 2, 3, 4}}, b{{5, 6, 7, 8}}; ASSERT_THROW(PADDLE_ENFORCE_EQ(a, b), paddle::platform::EnforceNotMet); -} \ No newline at end of file +} diff --git a/paddle/platform/gpu_info.h b/paddle/platform/gpu_info.h index ac884386dd..e47c9b4a2a 100644 --- a/paddle/platform/gpu_info.h +++ b/paddle/platform/gpu_info.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA #include #include diff --git a/paddle/platform/variant.h b/paddle/platform/variant.h index 8145799dfd..619897ca19 100644 --- a/paddle/platform/variant.h +++ b/paddle/platform/variant.h @@ -16,7 +16,7 @@ #include -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA // Because boost's variadic templates has bug on nvcc, boost will disable // variadic template support when GPU enabled on nvcc. diff --git a/paddle/pserver/test/SocketTest.cpp b/paddle/pserver/test/SocketTest.cpp index 96724530f5..b43461d61b 100644 --- a/paddle/pserver/test/SocketTest.cpp +++ b/paddle/pserver/test/SocketTest.cpp @@ -215,7 +215,7 @@ int main(int argc, char** argv) { uint64_t dataSize = FLAGS_dim * sizeof(real); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA GpuVector gpuParam(FLAGS_dim); GpuVector gpuGrad(FLAGS_dim); #else diff --git a/paddle/pserver/test/test_ProtoServer.cpp b/paddle/pserver/test/test_ProtoServer.cpp index 74ab1f2f77..ad8ffed9c1 100644 --- a/paddle/pserver/test/test_ProtoServer.cpp +++ b/paddle/pserver/test/test_ProtoServer.cpp @@ -99,7 +99,7 @@ TEST(ProtoServer, regular) { } TEST(ProtoServer, extended) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA ProtoClient* client; if (FLAGS_rdma_tcp == "rdma") client = new ProtoClient(FLAGS_server_addr, FLAGS_port, F_RDMA); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 761d82fc4d..cff54b1741 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -34,7 +34,7 @@ static size_t UniqueIntegerGenerator() { } bool IsCompileGPU() { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA return false; #else return true; @@ -78,7 +78,7 @@ PYBIND11_PLUGIN(core) { .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) @@ -96,7 +96,7 @@ PYBIND11_PLUGIN(core) { .def( "__init__", [](LoDTensor &instance, const std::vector> &lod) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA new (&instance) LoDTensor(lod); #else LoD new_lod; @@ -107,7 +107,7 @@ PYBIND11_PLUGIN(core) { }) .def("set_lod", [](LoDTensor &self, const std::vector> &lod) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA self.set_lod(lod); #else LoD new_lod; @@ -117,7 +117,7 @@ PYBIND11_PLUGIN(core) { #endif }) .def("lod", [](LoDTensor &self) -> std::vector> { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA return self.lod(); #else auto lod = self.lod(); @@ -203,7 +203,7 @@ All parameter, weight, gradient are variables in Paddle. .def_static("create", [](paddle::platform::GPUPlace& place) -> paddle::platform::DeviceContext* { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA PADDLE_THROW("GPUPlace is not supported in CPU device."); #else return new paddle::platform::CUDADeviceContext(place); diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 62e85fa54f..9e73f79cbd 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -106,7 +106,7 @@ void PyCPUTensorSetFromArray( std::memcpy(dst, array.data(), sizeof(T) * array.size()); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA template void PyCUDATensorSetFromArray( framework::Tensor &self, diff --git a/paddle/string/to_string_test.cc b/paddle/string/to_string_test.cc index 542c771a98..971484dd0c 100644 --- a/paddle/string/to_string_test.cc +++ b/paddle/string/to_string_test.cc @@ -36,4 +36,4 @@ TEST(to_string, user_defined) { using namespace paddle::string; UserDefinedClass instance; ASSERT_EQ(kOutputString, to_string(instance)); -} \ No newline at end of file +} diff --git a/paddle/trainer/MergeModel.cpp b/paddle/trainer/MergeModel.cpp index a37d53bc72..6c52eaf449 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/trainer/MergeModel.cpp @@ -29,7 +29,7 @@ int main(int argc, char** argv) { initMain(argc, argv); initPython(argc, argv); string confFile = TrainerConfigHelper::getConfigNameFromPath(FLAGS_model_dir); -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA FLAGS_use_gpu = false; #endif auto config = std::make_shared(confFile); diff --git a/paddle/trainer/tests/test_Compare.cpp b/paddle/trainer/tests/test_Compare.cpp index b5d29da45a..f3a964acb6 100644 --- a/paddle/trainer/tests/test_Compare.cpp +++ b/paddle/trainer/tests/test_Compare.cpp @@ -146,7 +146,7 @@ void compareGradient(comData& comDataCpu, comData& comDataGpu) { } int main(int argc, char** argv) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA exit(0); #endif paddle::initMain(argc, argv); diff --git a/paddle/trainer/tests/test_CompareSparse.cpp b/paddle/trainer/tests/test_CompareSparse.cpp index 4da9ce20fb..5f1834bd73 100644 --- a/paddle/trainer/tests/test_CompareSparse.cpp +++ b/paddle/trainer/tests/test_CompareSparse.cpp @@ -174,7 +174,7 @@ TEST(compareSparse, multiGradientMachine) { FLAGS_local = local; FLAGS_ports_num_for_sparse = 5; for (bool useGpu : {false, true}) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) continue; #endif FLAGS_parallel_nn = useGpu; @@ -198,7 +198,7 @@ TEST(compareSparse, NeuralNetwork) { FLAGS_local = local; FLAGS_ports_num_for_sparse = 5; for (bool useGpu : {false, true}) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA if (useGpu) continue; #endif FLAGS_parallel_nn = useGpu; diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/trainer/tests/test_Trainer.cpp index f69e1aafee..425b3d10a3 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/trainer/tests/test_Trainer.cpp @@ -51,7 +51,7 @@ void checkGradientTest(const string& configFile, TEST(checkGradient, cpu) { checkGradientTest(configFile1, false, false); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(checkGradient, gpu) { checkGradientTest(configFile1, true, false); } TEST(checkGradient, multiGpu) { @@ -97,7 +97,7 @@ TEST(checkGradient, hsigmoid) { checkGradientTest(configFile2, false, false); } TEST(checkGradient, chunk) { checkGradientTest(configFile3, false, false); -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA checkGradientTest(configFile3, true, true); #endif } diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index 4c4d124fa9..b2a93d4d5e 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -79,7 +79,7 @@ void trainerOnePassTest(const string& configFile, // 1. test trainer (cpu, gpu). TEST(trainerOnePass, cpu) { trainerOnePassTest(configFile1, false, false); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(trainerOnePass, gpu) { trainerOnePassTest(configFile1, true, false); } TEST(trainerOnePass, gpu2) { trainerOnePassTest(configFile1, true, false, 2); } @@ -94,7 +94,7 @@ TEST(trainerOnePass, parallel) { #endif // 2. test average_window. -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(average_window, gpu) { trainerOnePassTest(configFile1, true, false, 4, 0.01); } @@ -266,7 +266,7 @@ TEST(checkRemoteUpdater, cpuTrainerOldUpdater) { checkRemoteParameterUpdaterTest(configFile1, false, false, 1, true); } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST(checkRemoteUpdater, gpuTrainer) { checkRemoteParameterUpdaterTest(configFile1, true, false); } diff --git a/paddle/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/trainer/tests/test_recurrent_machine_generation.cpp index 74b4fed7ed..a8fbe31c2b 100644 --- a/paddle/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/trainer/tests/test_recurrent_machine_generation.cpp @@ -113,7 +113,7 @@ void testGeneration(const string& configFile, #ifndef PADDLE_TYPE_DOUBLE TEST(RecurrentGradientMachine, test_generation) { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA const auto useGpuConfs = {false}; #else const auto useGpuConfs = {true, false}; diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index 32155ded35..8f100f02e9 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Flags.h" -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA DEFINE_bool(use_gpu, false, "Only support CPU training"); #else DEFINE_bool(use_gpu, true, "Whether to use GPU for training"); diff --git a/paddle/utils/Util.h b/paddle/utils/Util.h index 904d0f5061..9579881ea3 100644 --- a/paddle/utils/Util.h +++ b/paddle/utils/Util.h @@ -218,7 +218,7 @@ protected: * *d2* is peer device to enable direct access to by the d1 device. */ inline void enablePeerAccess(int d1, int d2) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA if (hl_device_can_access_peer(d1, d2)) { SetDevice dev(d1); hl_device_enable_peer_access(d2); diff --git a/paddle/utils/Version.h b/paddle/utils/Version.h index 611fda83d9..004d62451c 100644 --- a/paddle/utils/Version.h +++ b/paddle/utils/Version.h @@ -48,7 +48,7 @@ void printVersion(std::ostream& os); * @return return true if paddle compiled with GPU */ constexpr bool isWithGpu() { -#ifndef PADDLE_WITH_GPU +#ifndef PADDLE_WITH_CUDA return false; #else return true; From fe10e86dd536cc22f65a07a1900bb8b199a8bd5b Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 4 Oct 2017 14:05:37 -0700 Subject: [PATCH 202/342] fix gpu build error --- paddle/framework/executor.cc | 19 ++++++++++++++----- paddle/framework/executor.h | 4 ++-- paddle/platform/gpu_info.cc | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 03504952ed..aa36b7438f 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -27,12 +27,12 @@ Executor::Executor(const std::vector& places) { device_contexts_.resize(places.size()); for (size_t i = 0; i < places.size(); i++) { if (platform::is_cpu_place(places[i])) { - device_contexts_[i].reset(new platform::CPUDeviceContext( - boost::get(places[i]))); - } else { + device_contexts_[i] = new platform::CPUDeviceContext( + boost::get(places[i])); + } else if (platform::is_gpu_place(places[i])) { #ifndef PADDLE_ONLY_CPU - device_contexts_[i].reset(new platform::CUDADeviceContext( - boost::get(places[i]))); + device_contexts_[i] = new platform::CUDADeviceContext( + boost::get(places[i])); #else PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); #endif @@ -40,6 +40,14 @@ Executor::Executor(const std::vector& places) { } } +Executor::~Executor() { + for (auto& device_context : device_contexts_) { + if (device_context) { + delete device_context; + } + } +} + void Executor::Run(const ProgramDesc& pdesc, Scope* scope, std::vector* outputs) { // TODO(tonyyang-svail): @@ -59,6 +67,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, for (auto& op_desc : block.ops()) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + std::cout << op->DebugString() << std::endl; op->Run(*scope, *device); } diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index d5c21c59fe..fc53be37c3 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -25,11 +25,11 @@ namespace framework { class Executor { public: explicit Executor(const std::vector& places); - ~Executor() {} + ~Executor(); void Run(const ProgramDesc&, Scope*, std::vector*); private: - std::vector> device_contexts_; + std::vector device_contexts_; }; } // namespace framework diff --git a/paddle/platform/gpu_info.cc b/paddle/platform/gpu_info.cc index f487014871..0464797f31 100644 --- a/paddle/platform/gpu_info.cc +++ b/paddle/platform/gpu_info.cc @@ -43,7 +43,7 @@ int GetCurrentDeviceId() { } void SetDeviceId(int id) { - PADDLE_ENFORCE(id < GetDeviceCount(), "id must less than GPU count") + PADDLE_ENFORCE(id < GetDeviceCount(), "id must less than GPU count"); PADDLE_ENFORCE(cudaSetDevice(id), "cudaSetDevice failed in paddle::platform::SetDeviceId"); } From 2b204f048bf6599bdb9ba799769404dc5fd206a8 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Wed, 4 Oct 2017 14:09:19 -0700 Subject: [PATCH 203/342] Rename platform::GetDeviceCount into platform::GetCUDADeviceCount --- paddle/memory/memory.cc | 2 +- paddle/platform/device_context_test.cc | 4 ++-- paddle/platform/gpu_info.cc | 4 ++-- paddle/platform/gpu_info.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 6d5a74dafe..f816962890 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -77,7 +77,7 @@ BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { // GPU buddy allocator initialization std::call_once(gpu_allocator_flag, [&]() { - int gpu_num = platform::GetDeviceCount(); + int gpu_num = platform::GetCUDADeviceCount(); allocators.reserve(gpu_num); for (int gpu = 0; gpu < gpu_num; gpu++) { platform::SetDeviceId(gpu); diff --git a/paddle/platform/device_context_test.cc b/paddle/platform/device_context_test.cc index f4b00c57de..8bf5174c4a 100644 --- a/paddle/platform/device_context_test.cc +++ b/paddle/platform/device_context_test.cc @@ -20,7 +20,7 @@ TEST(Device, Init) { using paddle::platform::CUDADeviceContext; using paddle::platform::GPUPlace; - int count = paddle::platform::GetDeviceCount(); + int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; i++) { DeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); Eigen::GpuDevice* gpu_device = @@ -34,7 +34,7 @@ TEST(Device, CUDADeviceContext) { using paddle::platform::CUDADeviceContext; using paddle::platform::GPUPlace; - int count = paddle::platform::GetDeviceCount(); + int count = paddle::platform::GetCUDADeviceCount(); for (int i = 0; i < count; i++) { CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i)); Eigen::GpuDevice* gpu_device = device_context->eigen_device(); diff --git a/paddle/platform/gpu_info.cc b/paddle/platform/gpu_info.cc index be381a4e26..70ad611d5d 100644 --- a/paddle/platform/gpu_info.cc +++ b/paddle/platform/gpu_info.cc @@ -26,11 +26,11 @@ DEFINE_double(fraction_of_gpu_memory_to_use, 0.95, namespace paddle { namespace platform { -int GetDeviceCount() { +int GetCUDADeviceCount() { int count; PADDLE_ENFORCE( cudaGetDeviceCount(&count), - "cudaGetDeviceCount failed in paddle::platform::GetDeviceCount"); + "cudaGetDeviceCount failed in paddle::platform::GetCUDADeviceCount"); return count; } diff --git a/paddle/platform/gpu_info.h b/paddle/platform/gpu_info.h index ac884386dd..276783bbe4 100644 --- a/paddle/platform/gpu_info.h +++ b/paddle/platform/gpu_info.h @@ -28,7 +28,7 @@ const std::string kEnvFractionGpuMemoryToUse = "PADDLE_FRACTION_GPU_MEMORY_TO_USE"; //! Get the total number of GPU devices in system. -int GetDeviceCount(); +int GetCUDADeviceCount(); //! Get the current GPU device id in system. int GetCurrentDeviceId(); From 5917e09cde86401005261914964eca4ef54de193 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 4 Oct 2017 15:26:19 -0700 Subject: [PATCH 204/342] tmp work --- paddle/framework/op_registry.h | 4 +++ paddle/framework/operator.h | 2 +- paddle/framework/shape_inference_map.cc | 9 ++++-- paddle/framework/shape_inference_map.h | 8 ----- paddle/pybind/pybind.cc | 9 ++++++ .../v2/framework/tests/test_infer_shape.py | 29 +++++++++++++++++++ 6 files changed, 49 insertions(+), 12 deletions(-) create mode 100644 python/paddle/v2/framework/tests/test_infer_shape.py diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index f04b6c503a..8138ba117a 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -55,6 +55,10 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; + auto proto = reg.info.Proto(); + std::cout << "====== " << op_type << " =======" << std::endl; + std::cout << proto.SerializeAsString() << std::endl; + std::cout << "=============" << std::endl; ShapeInferenceMap::Instance().CreateOpWithKernel(reg.info, op_type); // register gradient op if (!grad_op_type.empty()) { diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 99f721cc67..458404af6d 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -598,9 +598,9 @@ class OperatorWithKernel : public OperatorBase { }); } - protected: virtual void InferShape(InferShapeContextBase* ctx) const = 0; + protected: // indicate kernel DataType by input data. Defaultly all input data must be // same. virtual DataType IndicateDataType(const ExecutionContext& ctx) const { diff --git a/paddle/framework/shape_inference_map.cc b/paddle/framework/shape_inference_map.cc index 1a27037221..bd2b867984 100644 --- a/paddle/framework/shape_inference_map.cc +++ b/paddle/framework/shape_inference_map.cc @@ -37,10 +37,13 @@ ShapeInferenceMap& ShapeInferenceMap::Instance() { void ShapeInferenceMap::CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type) { - const VariableNameMap inputs = - ConvertOpProtoVarsToVarNameMap(op_info.Proto().inputs()); + auto proto = op_info.Proto(); + std::cout << "========= " << op_type << " in======" << std::endl; + std::cout << proto.SerializeAsString() << std::endl; + std::cout << "========= " << op_type << " out======" << std::endl; + const VariableNameMap inputs = ConvertOpProtoVarsToVarNameMap(proto.inputs()); const VariableNameMap outputs = - ConvertOpProtoVarsToVarNameMap(op_info.Proto().outputs()); + ConvertOpProtoVarsToVarNameMap(proto.outputs()); auto* op = op_info.Creator()(op_type, inputs, outputs, {}); auto* op_with_kernel = dynamic_cast(op); auto it = op_shape_inference_map_.find(op_type); diff --git a/paddle/framework/shape_inference_map.h b/paddle/framework/shape_inference_map.h index fb12669026..6c7304f6c0 100644 --- a/paddle/framework/shape_inference_map.h +++ b/paddle/framework/shape_inference_map.h @@ -27,14 +27,6 @@ class ShapeInferenceMap { public: static ShapeInferenceMap& Instance(); - const OperatorBase* GetOperator(const std::string& op_type) { - auto it = op_shape_inference_map_.find(op_type); - if (it == op_shape_inference_map_.end()) { - PADDLE_THROW("op with kernel for Op(%s) is not registered", op_type); - } - return it->second; - } - void CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type); OperatorWithKernel* GetOpWithKernel(const std::string& op_type) { diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index f4121e9d71..e11bcc0e0f 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -223,6 +223,15 @@ All parameter, weight, gradient are variables in Paddle. desc.InitializationErrorString()); return OpRegistry::CreateOp(desc); }) + .def("infer_shape", + [](const OpDescBind &op_desc, BlockDescBind &block) { + auto &shape_inference_map = ShapeInferenceMap::Instance(); + auto *op = shape_inference_map.GetOpWithKernel(op_desc.Type()); + if (op != nullptr) { + auto ctx = CompileTimeInferShapeContext(op_desc, block); + op->InferShape(&ctx); + } + }) .def("backward", [](const OperatorBase &forwardOp, const std::unordered_set &no_grad_vars) { diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py new file mode 100644 index 0000000000..56d3a90123 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -0,0 +1,29 @@ +import unittest +import paddle.v2.framework.core as core +from paddle.v2.framework.op import Operator + + +class TestInferShape(unittest.TestCase): + def test_sum_op(self): + prog = core.ProgramDesc.__create_program_desc__() + self.assertIsNotNone(prog) + block = prog.block(0) + self.assertIsNotNone(block) + + # prepare input/output + x1 = block.new_var("x1") + x1.set_shape([10, 20]) + x2 = block.new_var("x2") + x2.set_shape([10, 20]) + + out = block.new_var("out") + + # prepare the operator + sum_op_desc = block.append_op() + sum_op_desc.set_type("sum") + sum_op_desc.set_input("X", ["x1", "x2"]) + sum_op_desc.set_output("Out", ["out"]) + + sum_op = Operator("sum", X=["x1", "x2"], Out="out") + sum_op.infer_shape(sum_op_desc, block) + print(out.shape()) From ee7b3ed09e699da191fe238ab409f32318637380 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 4 Oct 2017 15:33:44 -0700 Subject: [PATCH 205/342] use EigenScalar to get learning_rate from GPU device --- paddle/operators/sgd_op.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index d72d333a9a..954fd48272 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -23,6 +23,9 @@ using Tensor = framework::Tensor; template using EigenVector = framework::EigenVector; +template +using EigenScalar = framework::EigenScalar; template class SGDOpKernel : public framework::OpKernel { @@ -31,13 +34,14 @@ class SGDOpKernel : public framework::OpKernel { auto param = ctx.Input("Param"); auto grad = ctx.Input("Grad"); auto param_out = ctx.Output("ParamOut"); - float lr = ctx.Input("LearningRate")->data()[0]; + auto learning_rate = ctx.Input("LearningRate"); param_out->mutable_data(ctx.GetPlace()); auto p = EigenVector::Flatten(*param); auto g = EigenVector::Flatten(*grad); auto o = EigenVector::Flatten(*param_out); + auto lr = EigenScalar::From(*learning_rate); auto place = ctx.GetEigenDevice(); o.device(place) = p - lr * g; From c4effc7d2d1225f15b1ec51ab54753d3ef693de7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Oct 2017 15:34:28 -0700 Subject: [PATCH 206/342] Fix CI Test --- paddle/framework/backward.cc | 3 +- paddle/framework/op_info.h | 7 +++- paddle/framework/op_registry.h | 34 +++++++++---------- paddle/operators/minus_op.cc | 33 +++++++++++------- paddle/operators/pad_op.cc | 1 + .../softmax_with_cross_entropy_op.cc | 9 +++-- 6 files changed, 52 insertions(+), 35 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 40390d4150..9193a1593e 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -33,7 +33,7 @@ static inline std::unique_ptr CreateGradOp( op_desc.SetType(op.Type()); op_desc.SetAttrMap(op.Attrs()); auto& info = OpInfoMap::Instance().Get(op.Type()); - auto grad_descs = info.grad_op_maker_(op_desc); + auto grad_descs = info.GradOpMaker()(op_desc); std::vector> grad_ops; grad_ops.reserve(grad_descs.size()); std::transform(grad_descs.begin(), grad_descs.end(), @@ -49,6 +49,7 @@ static inline std::unique_ptr CreateGradOp( for (auto& grad_op : grad_ops) { net_op->AppendOp(std::move(grad_op)); } + net_op->CompleteAddOp(); return std::unique_ptr(net_op); } } diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 6f87e055b4..968f587b46 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -30,7 +30,6 @@ namespace framework { struct OpInfo { OpCreator creator_; - std::string grad_op_type_; GradOpMakerFN grad_op_maker_; OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; @@ -51,6 +50,12 @@ struct OpInfo { "Operator Creator has not been registered"); return creator_; } + + const GradOpMakerFN& GradOpMaker() const { + PADDLE_ENFORCE_NOT_NULL(grad_op_maker_, + "Operator GradOpMaker has not been registered."); + return grad_op_maker_; + } }; class OpInfoMap { diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index da112fa488..a4f0144ce8 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -137,23 +137,21 @@ class OpKernelRegistrar : public Registrar { __test_global_namespace_##uniq_name##__>::value, \ msg) -#define VA_ARGS(...) , ##__VA_ARGS__ - -#define REGISTER_OPERATOR(op_type, op_class, ...) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_op__##op_type, \ - "REGISTER_OPERATOR must be called in global namespace"); \ - class _OpClass_##op_type##_ : public op_class { \ - public: \ - DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ - DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ - }; \ - static ::paddle::framework::OperatorRegistrar<_OpClass_##op_type##_ VA_ARGS( \ - __VA_ARGS__)> \ - __op_registrar_##op_type##__(#op_type); \ - int TouchOpRegistrar_##op_type() { \ - __op_registrar_##op_type##__.Touch(); \ - return 0; \ +#define REGISTER_OPERATOR(op_type, op_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op__##op_type, \ + "REGISTER_OPERATOR must be called in global namespace"); \ + class _OpClass_##op_type##_ : public op_class { \ + public: \ + DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ + DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ + }; \ + static ::paddle::framework::OperatorRegistrar<_OpClass_##op_type##_, \ + ##__VA_ARGS__> \ + __op_registrar_##op_type##__(#op_type); \ + int TouchOpRegistrar_##op_type() { \ + __op_registrar_##op_type##__.Touch(); \ + return 0; \ } /** @@ -170,7 +168,7 @@ class OpKernelRegistrar : public Registrar { virtual std::string GradOpType() const { return #grad_op_type; } \ }; \ REGISTER_OPERATOR(op_type, op_class, _GradOpDescMaker_##grad_op_type##_, \ - op_maker_class) + op_maker_class); #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ REGISTER_OPERATOR(op_type, op_class, op_maker_class) diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index aced8636b9..7057dcbd6e 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -72,19 +72,26 @@ class MinusGradMaker : public framework::GradOpDescMakerBase { std::vector> operator()() const override { std::vector> ops; - ops.resize(2); - - ops[0].reset(new framework::OpDescBind()); - ops[0]->SetType("scale"); - ops[0]->SetInput("X", OutputGrad("Out")); - ops[0]->SetOutput("Out", InputGrad("X")); - ops[0]->SetAttr("scale", 1.0f); - - ops[1].reset(new framework::OpDescBind()); - ops[1]->SetType("scale"); - ops[1]->SetInput("X", OutputGrad("Out")); - ops[1]->SetOutput("Out", InputGrad("Y")); - ops[1]->SetAttr("scale", -1.0f); + auto x_g = InputGrad("X"); + if (!x_g.empty()) { + auto *x_g_op = new framework::OpDescBind(); + x_g_op->SetType("scale"); + x_g_op->SetInput("X", OutputGrad("Out")); + x_g_op->SetOutput("Out", x_g); + x_g_op->SetAttr("scale", 1.0f); + ops.emplace_back(x_g_op); + } + + auto y_g = InputGrad("Y"); + if (!y_g.empty()) { + auto *y_g_op = new framework::OpDescBind(); + y_g_op->SetType("scale"); + y_g_op->SetInput("X", OutputGrad("Out")); + y_g_op->SetOutput("Out", y_g); + y_g_op->SetAttr("scale", -1.0f); + ops.emplace_back(y_g_op); + } + return ops; } }; diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 9445917739..15aa05f266 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -121,6 +121,7 @@ class PadOpGradMaker : public framework::SingleGradOpDescMaker { bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); bind->SetOutput(framework::GradVarName("X"), InputGrad("X")); bind->SetAttrMap(Attrs()); + bind->SetType("pad_grad"); return std::unique_ptr(bind); } }; diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index bc9868874d..70fe429f59 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -14,6 +14,12 @@ #include "paddle/operators/softmax_with_cross_entropy_op.h" #include +#include + +#define DBG_LINE() \ + do { \ + std::cerr << "Run at " << __LINE__ << std::endl; \ + } while (false) namespace paddle { namespace operators { @@ -187,8 +193,7 @@ class SoftmaxGradMaker : public framework::SingleGradOpDescMaker { namespace ops = paddle::operators; REGISTER_OPERATOR(softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyOp, - ops::SoftmaxWithCrossEntropyOpMaker, - ops::SoftmaxWithCrossEntropyOpMaker); + ops::SoftmaxWithCrossEntropyOpMaker, ops::SoftmaxGradMaker); REGISTER_OPERATOR(softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyOpGrad); REGISTER_OP_CPU_KERNEL(softmax_with_cross_entropy, From a270dbb778091d983010298b7972e05bcd6fe22e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 4 Oct 2017 15:48:41 -0700 Subject: [PATCH 207/342] Add support for rnn_op --- paddle/framework/backward.cc | 53 +++++++++++++++++++++++++++-------- paddle/framework/block_desc.h | 9 ++++-- 2 files changed, 48 insertions(+), 14 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c4ede7d2fb..d9a42be5a2 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -20,6 +20,7 @@ #include "paddle/framework/block_desc.h" #include "paddle/framework/op_registry.h" +#include "paddle/framework/program_desc.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -254,7 +255,7 @@ static bool AllGradInSet(const std::vector& names, return true; } -std::vector> MakeGradOpDescs( +std::vector> MakeOpGrad( const std::unique_ptr& op_desc, std::unordered_set& no_grad_vars) { std::vector> grad_op_descs; @@ -295,20 +296,35 @@ std::vector> MakeGradOpDescs( for (auto& p : pending_fill_zeros_ops) { grad_op_descs.push_back(std::move(p)); } - - // TODO(fengjiayi): RNN op return grad_op_descs; } -void AppendBackwardOpDescs(BlockDescBind& block_desc, - std::unordered_set& no_grad_vars) { +std::vector> MakeBlockBackward( + ProgramDescBind& program_desc, int block_idx, + std::unordered_set& no_grad_vars) { + BlockDescBind* cur_block = program_desc.Block(block_idx); + std::deque>& op_descs = cur_block->ops_; std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; - std::deque>& block_op_descs = block_desc.ops_; std::vector> backward_descs; - for (auto it = block_op_descs.rbegin(); it != block_op_descs.rend(); ++it) { + for (auto it = op_descs.rbegin(); it != op_descs.rend(); ++it) { std::vector> op_grads = - MakeGradOpDescs(*it, no_grad_vars); + MakeOpGrad(*it, no_grad_vars); + + if ((*it)->Type() == "recurrent") { + PADDLE_ENFORCE_EQ( + op_grads.size(), size_t(1), + "rnn_op's gradient process should contain only one op."); + int step_block_idx = (*it)->GetBlockAttr("stop_block"); + auto backward_block_op_descs = + MakeBlockBackward(program_desc, step_block_idx, no_grad_vars); + BlockDescBind* backward_block = program_desc.AppendBlock(*cur_block); + for (auto& ptr : backward_block_op_descs) { + backward_block->ops_.push_back(std::move(ptr)); + } + op_grads[0]->SetBlockAttr("step_block", *backward_block); + } + for (const auto& desc : op_grads) { for (const std::string& out_name : desc->OutputArgumentNames()) { dup_out_ops[out_name].emplace_back(grad_desc_idx); @@ -345,11 +361,24 @@ void AppendBackwardOpDescs(BlockDescBind& block_desc, backward_descs.insert(backward_descs.begin() + p.first + 1, std::move(p.second)); } - // Append backward_descs to BlockDescBind::ops_ - for (std::unique_ptr& ptr : backward_descs) { - block_op_descs.push_back(std::move(ptr)); + return backward_descs; +} + +void AppendBackward(ProgramDescBind& program_desc, + const std::unordered_set& no_grad_vars) { + std::unordered_set no_grad_var_names; + no_grad_var_names.reserve(no_grad_vars.size() + 1); + no_grad_var_names.insert(std::string(kEmptyVarName) + kGradVarSuffix); + for (auto& name : no_grad_vars) { + no_grad_var_names.insert(GradVarName(name)); + } + const int root_block_idx = 0; + auto backward_op_descs = + MakeBlockBackward(program_desc, root_block_idx, no_grad_var_names); + auto& forw_op_descs = program_desc.Block(root_block_idx)->ops_; + for (auto& ptr : backward_op_descs) { + forw_op_descs.push_back(std::move(ptr)); } - return; } } // namespace framework diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index fd95ef1901..aad1c3fef8 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -32,8 +32,13 @@ class ProgramDescBind; class BlockDescBind { public: - friend void AppendBackwardOpDescs( - BlockDescBind &block_desc, std::unordered_set &no_grad_vars); + friend std::vector> MakeBlockBackward( + ProgramDescBind &program_desc, int block_idx, + std::unordered_set &no_grad_vars); + + friend void AppendBackward( + ProgramDescBind &program_desc, + const std::unordered_set &no_grad_vars); BlockDescBind(ProgramDescBind *prog, BlockDesc *desc) : prog_(prog), desc_(desc), need_update_(false) {} From 2594a50245ae7bfe91f05d3c162af39b2457fa21 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Oct 2017 15:49:43 -0700 Subject: [PATCH 208/342] Polish code --- paddle/framework/backward.cc | 1 - paddle/framework/backward_test.cc | 12 ------------ paddle/framework/op_info.h | 2 -- paddle/framework/op_registry.cc | 1 - paddle/operators/mean_op.cc | 1 - paddle/operators/softmax_with_cross_entropy_op.cc | 5 ----- 6 files changed, 22 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 9193a1593e..3d81dadfc4 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -203,7 +203,6 @@ static std::unique_ptr BackwardRecursive( } } else { std::unique_ptr grad_op(CreateGradOp(forwardOp)); - PADDLE_ENFORCE(grad_op != nullptr); ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( const std::string& grad_input) { diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 830d0427fa..a9b71cd809 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -171,17 +171,6 @@ REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); -// TEST(Backward, simple_op_grad) { -// auto fwd = f::OpRegistry::CreateOp( -// "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); -// ASSERT_NE(fwd, nullptr); -// auto gop = f::OpRegistry::CreateGradOp(*fwd); -// ASSERT_EQ(1UL, gop->Inputs().size()); -// ASSERT_EQ("rowwise_add_grad", gop->Type()); -// ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X"))); -// ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b"))); -//} - TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp( "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); @@ -390,7 +379,6 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { + 1UL /* external output number*/ + 1UL /* number of gradient of external output*/ + 2U /* internal variable number*/); - EXPECT_EQ(grad_fc.Outputs(all).size(), 2UL /* input number of mul*/ + 2UL /* input number of rowwise_add diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 968f587b46..231f212fa3 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -23,8 +23,6 @@ #include "paddle/framework/type_defs.h" #include "paddle/platform/macros.h" -#include "glog/logging.h" - namespace paddle { namespace framework { diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index ac6aa8d28e..4dc83ec8fe 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -55,7 +55,6 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { } std::unique_ptr OpRegistry::CreateOp(OpDescBind* op_desc) { - op_desc->Sync(); return CreateOp(op_desc->Type(), op_desc->Inputs(), op_desc->Outputs(), op_desc->GetAttrMap()); } diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 339c089e87..2332c9546b 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -71,7 +71,6 @@ class MeanGradMaker : public framework::SingleGradOpDescMaker { } // namespace paddle namespace ops = paddle::operators; - REGISTER_OPERATOR(mean, ops::MeanOp, ops::MeanOpMaker, ops::MeanGradMaker); REGISTER_OPERATOR(mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean, diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 70fe429f59..42c1ba6fdf 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -16,11 +16,6 @@ #include #include -#define DBG_LINE() \ - do { \ - std::cerr << "Run at " << __LINE__ << std::endl; \ - } while (false) - namespace paddle { namespace operators { From 4147c7f22836fe7ae7b0c6e616adaba0bbfe3b3a Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Wed, 4 Oct 2017 15:52:23 -0700 Subject: [PATCH 209/342] gan design modified --- doc/design/gan_api.md | 82 ++++++++++++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 20 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index eb0bc1c003..b107f2fc00 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -1,20 +1,45 @@ -''' -GAN implementation, just a demo. -''' -```python -# pd for short, should be more concise. -from paddle.v2 as pd -import numpy as np -import logging -``` +# Design for GAN + +GAN (General Adversarial Net) is an important model for unsupervised learning and widely used in many areas. + +It contains several important machine learning concepts, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth. + +In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation. + +## The Conditional-GAN might be a class. +This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure: + +### DCGAN(object): +which contains everything required to build a GAN model. It provides following member functions methods as API: + +### __init__(...): +Initialize hyper-parameters (like conv dimension and so forth), and declare model parameters of discriminator and generator as well. + +### generator(z, y=None): +Generate a fake image from input noise z. If the label y is provided, the conditional GAN model will be chosen. +Returns a generated image. + +### discriminator(image): +Given an image, decide if it is from a real source or a fake one. +Returns a 0/1 binary label. + +### build_model(self): +build the whole GAN model, define training loss for both generator and discrimator.


-The original GAN paper. +Borrow this photo from the original DC-GAN paper.

-# Conditional-GAN should be a class. -### Class member function: the initializer. +## Discussion on Engine Functions required to build GAN +- Trace the ternsor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly) +- Different optimizers responsible for optimizing different loss. + +To be more detailed, we introduce our design of DCGAN as following: + +### Class member Function: Initializer +- Set up hyper-parameters, including condtional dimension, noise dimension, batch size and so forth. +- Declare and define all the model variables. All the discriminator parameters are included in the list self.theta_D and all the generator parameters are included in the list self.theta_G. ```python class DCGAN(object): def __init__(self, y_dim=None): @@ -43,11 +68,16 @@ class DCGAN(object): self.theta_G = [self.G_W0, self.G_b0, self.G_W1, self.G_b1, self.G_W2, self.G_b2] ``` -### Class member function: Generator Net +### Class member Function: Generator +- Given a noisy input z, returns a fake image. +- Concatenation, batch-norm, FC operations required; +- Deconv layer required, which is missing now... ```python def generator(self, z, y = None): - - # Generator Net + # input z: the random noise + # input y: input data label (optional) + # output G_im: generated fake images + if not self.y_dim: z = pd.concat(1, [z, y]) @@ -64,11 +94,14 @@ def generator(self, z, y = None): return G_im ``` -### Class member function: Discriminator Net +### Class member function: Discriminator +- Given a noisy input z, returns a fake image. +- Concatenation, Convolution, batch-norm, FC, Leaky-ReLU operations required; ```python def discriminator(self, image): + # input image: either generated images or real ones + # output D_h2: binary logit of the label - # Discriminator Net D_h0 = pd.conv2d(image, self.D_w0, self.D_b0) D_h0_bn = pd.batchnorm(h0) D_h0_relu = pd.lrelu(h0_bn) @@ -82,6 +115,9 @@ def discriminator(self, image): ``` ### Class member function: Build the model +- Define data readers as placeholders to hold the data; +- Build generator and discriminators; +- Define two training losses for discriminator and generator, respectively. ```python def build_model(self): @@ -92,8 +128,8 @@ def build_model(self): self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) self.z = pd.data(tf.float32, [None, self.z_size]) - # if conditional GAN - if self.y_dim: + # step 1: generate images by generator, classify real/fake images with discriminator + if self.y_dim: # if conditional GAN, includes label self.G = self.generator(self.z, self.y) self.D_t = self.discriminator(self.images) # generated fake images @@ -106,6 +142,7 @@ def build_model(self): self.sampled = self.sampler(self.z) self.D_f = self.discriminator(self.images) + # step 2: define the two losses self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size)) self.d_loss = self.d_loss_real + self.d_loss_fake @@ -113,8 +150,13 @@ def build_model(self): self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie)) ``` -# Main function for the demo: +## Main function for the demo: ```python +# pd for short, should be more concise. +from paddle.v2 as pd +import numpy as np +import logging + if __name__ == "__main__": # dcgan From 79c8bb9e7acbe2bc91625e4a2e396994c4fef168 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Wed, 4 Oct 2017 16:02:07 -0700 Subject: [PATCH 210/342] gan design new version --- doc/design/gan_api.md | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index b107f2fc00..8521bc8bf2 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -6,6 +6,11 @@ It contains several important machine learning concepts, including building and In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation. +

+
+Borrow this photo from the original DC-GAN paper. +

+ ## The Conditional-GAN might be a class. This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure: @@ -26,11 +31,6 @@ Returns a 0/1 binary label. ### build_model(self): build the whole GAN model, define training loss for both generator and discrimator. -

-
-Borrow this photo from the original DC-GAN paper. -

- ## Discussion on Engine Functions required to build GAN - Trace the ternsor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly) - Different optimizers responsible for optimizing different loss. @@ -151,6 +151,10 @@ def build_model(self): ``` ## Main function for the demo: +Generally, the user of GAN just need to the following things: +- Define an object as DCGAN class; +- Build the DCGAN model; +- Specify two optimizers for two different losses with respect to different parameters. ```python # pd for short, should be more concise. from paddle.v2 as pd @@ -158,7 +162,6 @@ import numpy as np import logging if __name__ == "__main__": - # dcgan dcgan = DCGAN() dcgan.build_model() @@ -167,8 +170,8 @@ if __name__ == "__main__": data_X, data_y = self.load_mnist() # Two subgraphs required!!! - d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss, ) - g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.g_loss) + d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.d_loss, dcgan.theta_D) + g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.g_loss, dcgan.theta_G) # executor sess = pd.executor() @@ -183,11 +186,11 @@ if __name__ == "__main__": batch_z = np.random.uniform(-1., 1., [batch_size, z_dim]) if batch_id % 2 == 0: - sess.eval(d_optim, + sess.run(d_optim, feed_dict = {dcgan.images: batch_im, dcgan.y: batch_label, dcgan.z: batch_z}) else: - sess.eval(g_optim, + sess.run(g_optim, feed_dict = {dcgan.z: batch_z}) ``` From 3db38fce8ba88bd68a51a5e0232d60eeac7c50d1 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Wed, 4 Oct 2017 16:14:08 -0700 Subject: [PATCH 211/342] gan design new version --- doc/design/gan_api.md | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 8521bc8bf2..e0e3440d49 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -14,21 +14,18 @@ Borrow this photo from the original DC-GAN paper. ## The Conditional-GAN might be a class. This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure: -### DCGAN(object): -which contains everything required to build a GAN model. It provides following member functions methods as API: +- DCGAN(object): which contains everything required to build a GAN model. It provides following member functions methods as API: -### __init__(...): -Initialize hyper-parameters (like conv dimension and so forth), and declare model parameters of discriminator and generator as well. +- __init__(...): Initialize hyper-parameters (like conv dimension and so forth), and declare model parameters of discriminator and generator as well. -### generator(z, y=None): -Generate a fake image from input noise z. If the label y is provided, the conditional GAN model will be chosen. +- generator(z, y=None): Generate a fake image from input noise z. If the label y is provided, the conditional GAN model will be chosen. Returns a generated image. -### discriminator(image): +- discriminator(image): Given an image, decide if it is from a real source or a fake one. Returns a 0/1 binary label. -### build_model(self): +- build_model(self): build the whole GAN model, define training loss for both generator and discrimator. ## Discussion on Engine Functions required to build GAN From ebbbaee0c3d69b26405875216dbc5e798adab4ba Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Oct 2017 16:18:46 -0700 Subject: [PATCH 212/342] Follow comments --- paddle/framework/backward.cc | 4 ++-- paddle/framework/op_registry.cc | 6 +++--- paddle/framework/op_registry.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 3d81dadfc4..01ef385fd8 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -39,9 +39,9 @@ static inline std::unique_ptr CreateGradOp( std::transform(grad_descs.begin(), grad_descs.end(), std::back_inserter(grad_ops), [](const std::unique_ptr& grad_desc) { - return OpRegistry::CreateOp(grad_desc.get()); + return OpRegistry::CreateOp(*grad_desc); }); - PADDLE_ENFORCE_GT(grad_ops.size(), 0); + PADDLE_ENFORCE(!grad_ops.empty()); if (grad_ops.size() == 1) { return std::move(grad_ops[0]); } else { diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 4dc83ec8fe..e9d2e55872 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -54,9 +54,9 @@ std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { return CreateOp(op_desc.type(), inputs, outputs, attrs); } -std::unique_ptr OpRegistry::CreateOp(OpDescBind* op_desc) { - return CreateOp(op_desc->Type(), op_desc->Inputs(), op_desc->Outputs(), - op_desc->GetAttrMap()); +std::unique_ptr OpRegistry::CreateOp(const OpDescBind& op_desc) { + return CreateOp(op_desc.Type(), op_desc.Inputs(), op_desc.Outputs(), + op_desc.GetAttrMap()); } } // namespace framework diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index a4f0144ce8..4bf521c48d 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -79,7 +79,7 @@ class OpRegistry { static std::unique_ptr CreateOp(const OpDesc& op_desc); - static std::unique_ptr CreateOp(OpDescBind* op_desc); + static std::unique_ptr CreateOp(const OpDescBind& op_desc); }; template From cc1860c10ec15851a9bb9380f66ea6c8d4d144e2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 4 Oct 2017 16:40:30 -0700 Subject: [PATCH 213/342] Add persistable in framework.proto --- paddle/framework/framework.proto | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 951c7afbc1..d696cdf5ad 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -106,6 +106,7 @@ message LoDTensorDesc { message VarDesc { required string name = 1; optional LoDTensorDesc lod_tensor = 2; + optional bool persistable = 3 [ default = false ]; } message BlockDesc { From 775c60246b66469e06f01a50c89b7b39594a3b63 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 4 Oct 2017 16:53:21 -0700 Subject: [PATCH 214/342] remove using in sgd header file --- paddle/operators/sgd_op.h | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index 954fd48272..b501d244d7 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -19,32 +19,25 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; -template -using EigenScalar = framework::EigenScalar; - template class SGDOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto param = ctx.Input("Param"); - auto grad = ctx.Input("Grad"); - auto param_out = ctx.Output("ParamOut"); - auto learning_rate = ctx.Input("LearningRate"); + auto param = ctx.Input("Param"); + auto grad = ctx.Input("Grad"); + auto param_out = ctx.Output("ParamOut"); + auto learning_rate = ctx.Input("LearningRate"); param_out->mutable_data(ctx.GetPlace()); - auto p = EigenVector::Flatten(*param); - auto g = EigenVector::Flatten(*grad); - auto o = EigenVector::Flatten(*param_out); - auto lr = EigenScalar::From(*learning_rate); + auto p = framework::EigenVector::Flatten(*param); + auto g = framework::EigenVector::Flatten(*grad); + auto o = framework::EigenVector::Flatten(*param_out); + auto lr = framework::EigenVector::From(*learning_rate); auto place = ctx.GetEigenDevice(); - o.device(place) = p - lr * g; + Eigen::DSizes grad_dsize(grad->dims()[0], grad->dims()[1]); + o.device(place) = p - lr.broadcast(grad_dsize) * g; } }; From 8ebc31d9358c919fdd6f50d502f4ee071a91d38e Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 4 Oct 2017 17:13:02 -0700 Subject: [PATCH 215/342] optimize the dsize --- paddle/operators/sgd_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/sgd_op.h b/paddle/operators/sgd_op.h index b501d244d7..26f4012f25 100644 --- a/paddle/operators/sgd_op.h +++ b/paddle/operators/sgd_op.h @@ -33,10 +33,10 @@ class SGDOpKernel : public framework::OpKernel { auto p = framework::EigenVector::Flatten(*param); auto g = framework::EigenVector::Flatten(*grad); auto o = framework::EigenVector::Flatten(*param_out); - auto lr = framework::EigenVector::From(*learning_rate); + auto lr = framework::EigenVector::Flatten(*learning_rate); auto place = ctx.GetEigenDevice(); - Eigen::DSizes grad_dsize(grad->dims()[0], grad->dims()[1]); + Eigen::DSizes grad_dsize(grad->numel()); o.device(place) = p - lr.broadcast(grad_dsize) * g; } }; From 3014f6a1135e113cb55a6a2cb771d477502a8b00 Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 4 Oct 2017 17:36:19 -0700 Subject: [PATCH 216/342] correct macro --- paddle/framework/executor.cc | 2 +- paddle/framework/executor_test.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index aa36b7438f..7c3cac359e 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -30,7 +30,7 @@ Executor::Executor(const std::vector& places) { device_contexts_[i] = new platform::CPUDeviceContext( boost::get(places[i])); } else if (platform::is_gpu_place(places[i])) { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU device_contexts_[i] = new platform::CUDADeviceContext( boost::get(places[i])); #else diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index f746242a6b..ca7e8ca7d2 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -103,7 +103,7 @@ TEST_F(ExecutorTester, InitCPU) { delete executor; } -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_GPU TEST_F(ExecutorTester, InitGPU) { std::vector places; GPUPlace gpu_place0(0); From 46530f9e666012dd83c8763563f7acae8f5aad30 Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Wed, 4 Oct 2017 19:02:22 -0700 Subject: [PATCH 217/342] Added Leaky Relu activation --- paddle/operators/activation_op.cc | 19 ++++++++++++ paddle/operators/activation_op.h | 30 ++++++++++++++++++- .../v2/framework/tests/test_activation_op.py | 17 +++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 7ae4d2f6b6..5f2ecc2673 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -69,6 +69,22 @@ class ReluOpMaker : public framework::OpProtoAndCheckerMaker { } }; +template +class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LeakyReluOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of LeakyRelu operator"); + AddOutput("Y", "Output of LeakyRelu operator"); + AddComment( + "LeakyRelu activation operator, " + "leaky_relu = max(x, alpha * x)"); + AddAttr("alpha", "The small negative slope") + .SetDefault(static_cast(0.02f)); + } +}; + class TanhOpMaker : public framework::OpProtoAndCheckerMaker { public: TanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -240,6 +256,9 @@ REGISTER_OP(softsign, ops::ActivationOp, ops::SoftsignOpMaker, softsign_grad, REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker, brelu_grad, ops::ActivationOpGrad); +REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker, + leaky_relu_grad, ops::ActivationOpGrad); + REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, soft_relu_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index ff35c2d97e..dae66cc77d 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -309,6 +309,33 @@ struct SoftReluGradFunctor : public BaseActivationFunctor { } }; +template +struct LeakyReluFunctor : public BaseActivationFunctor { + float alpha; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"alpha", &alpha}}; + } + + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x.cwiseMax(alpha * x); + } +}; + +template +struct LeakyReluGradFunctor : public BaseActivationFunctor { + float alpha; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"alpha", &alpha}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + auto temp1 = alpha * (x < static_cast(0)).template cast().eval(); + auto temp2 = (x >= static_cast(0)).template cast().eval(); + dx.device(d) = dy * (temp1 + temp2).template cast(); + } +}; + template struct PowFunctor : public BaseActivationFunctor { float factor; @@ -379,4 +406,5 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(soft_relu, SoftReluFunctor, SoftReluGradFunctor); \ __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ - __macro(softsign, SoftsignFunctor, SoftsignGradFunctor) + __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ + __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index c44eb84906..ce6dec7748 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -122,6 +122,23 @@ class TestBRelu(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.02) +class TestLeakyRelu(OpTest): + def setUp(self): + self.op_type = "leaky_relu" + alpha = 0.02 + self.attrs = {'alpha': alpha} + self.inputs = {'X': np.random.uniform(-3, 3, [4, 4]).astype("float32")} + self.outputs = { + 'Y': np.maximum(self.inputs['X'], alpha * self.inputs['X']) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.008) + + class TestSoftRelu(OpTest): def setUp(self): self.op_type = "soft_relu" From 47c994be07d608471734d5d14f980d83f2e0a7a6 Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Thu, 5 Oct 2017 08:50:51 -0700 Subject: [PATCH 218/342] Updated the reltive error --- python/paddle/v2/framework/tests/test_activation_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index ce6dec7748..f232996a55 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -136,7 +136,7 @@ class TestLeakyRelu(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.008) + self.check_grad(['X'], 'Y', max_relative_error=0.007) class TestSoftRelu(OpTest): From 623848afa1f0bb3a69c7e49c4fa0f763a252669d Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 5 Oct 2017 12:11:56 -0700 Subject: [PATCH 219/342] add feed operator --- paddle/framework/scope.cc | 16 ++++++++++ paddle/framework/scope.h | 2 ++ paddle/operators/activation_op.cu | 18 +++++------ paddle/operators/feed_op.cc | 52 +++++++++++++++++++++++++++++++ paddle/operators/feed_op.cu | 18 +++++++++++ paddle/operators/feed_op.h | 40 ++++++++++++++++++++++++ 6 files changed, 137 insertions(+), 9 deletions(-) create mode 100644 paddle/operators/feed_op.cc create mode 100644 paddle/operators/feed_op.cu create mode 100644 paddle/operators/feed_op.h diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 080b4ac621..b04120abf2 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/scope.h" +#include // for unique_ptr +#include // for call_once #include "paddle/string/printf.h" namespace paddle { @@ -62,5 +64,19 @@ void Scope::DropKids() { kids_.clear(); } +std::once_flag feed_variable_flag; + +template +std::unique_ptr make_unique(Args&&... args) { + return std::unique_ptr(new T(std::forward(args)...)); +} + +framework::Scope* GetScope() { + static std::unique_ptr g_scope = + make_unique(); + std::call_once(feed_variable_flag, [&]() { g_scope->NewVar("feed_value"); }); + return g_scope.get(); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 7047f0d55e..96f3ae875b 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -73,5 +73,7 @@ class Scope { DISABLE_COPY_AND_ASSIGN(Scope); }; +framework::Scope* GetScope(); + } // namespace framework } // namespace paddle diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 93e9f1c694..44a6aaf9cb 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/activation_op.h" diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc new file mode 100644 index 0000000000..805c3600be --- /dev/null +++ b/paddle/operators/feed_op.cc @@ -0,0 +1,52 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/feed_op.h" + +namespace paddle { +namespace operators { + +class FeedOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override { + typedef std::vector FeedInputs; + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null."); + int col = ctx->Attrs().Get("col"); + framework::Variable* g_feed_variable = + framework::GetScope()->FindVar("feed_value"); + FeedInputs tensors = g_feed_variable->Get(); + auto in_dim = tensors[col].dims(); + ctx->SetOutputDim("Y", in_dim); + // need to handle LodTensor later + } +}; + +class FeedOpMaker : public framework::OpProtoAndCheckerMaker { + public: + FeedOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddAttr("col", "The col in Global Feed Variable"); + AddOutput("Out", "The output of dropout op."); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(feed, ops::FeedOp, ops::FeedOpMaker); +REGISTER_OP_CPU_KERNEL(feed, ops::FeedKernel); diff --git a/paddle/operators/feed_op.cu b/paddle/operators/feed_op.cu new file mode 100644 index 0000000000..7b6a2ac91e --- /dev/null +++ b/paddle/operators/feed_op.cu @@ -0,0 +1,18 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/feed_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(feed, ops::FeedKernel); diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h new file mode 100644 index 0000000000..57781e205f --- /dev/null +++ b/paddle/operators/feed_op.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class FeedKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + typedef std::vector FeedInputs; + Tensor* out = ctx.Output("Out"); + out->mutable_data(ctx.GetPlace()); + framework::Variable* g_feed_variable = + framework::GetScope()->FindVar("feed_value"); + int col = ctx.template Attr("col"); + FeedInputs tensors = g_feed_variable->Get(); + out->CopyFrom(tensors[col], ctx.GetPlace()); + } +}; + +} // namespace operators +} // namespace paddle From 60af56c1b8e60240238d877c093bf9c99706fefe Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Wed, 4 Oct 2017 19:02:22 -0700 Subject: [PATCH 220/342] Added Leaky Relu activation --- paddle/operators/activation_op.cc | 19 ++++++++++++ paddle/operators/activation_op.h | 30 ++++++++++++++++++- .../v2/framework/tests/test_activation_op.py | 17 +++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 7ae4d2f6b6..5f2ecc2673 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -69,6 +69,22 @@ class ReluOpMaker : public framework::OpProtoAndCheckerMaker { } }; +template +class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LeakyReluOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of LeakyRelu operator"); + AddOutput("Y", "Output of LeakyRelu operator"); + AddComment( + "LeakyRelu activation operator, " + "leaky_relu = max(x, alpha * x)"); + AddAttr("alpha", "The small negative slope") + .SetDefault(static_cast(0.02f)); + } +}; + class TanhOpMaker : public framework::OpProtoAndCheckerMaker { public: TanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -240,6 +256,9 @@ REGISTER_OP(softsign, ops::ActivationOp, ops::SoftsignOpMaker, softsign_grad, REGISTER_OP(brelu, ops::ActivationOp, ops::BReluOpMaker, brelu_grad, ops::ActivationOpGrad); +REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker, + leaky_relu_grad, ops::ActivationOpGrad); + REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, soft_relu_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index ff35c2d97e..dae66cc77d 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -309,6 +309,33 @@ struct SoftReluGradFunctor : public BaseActivationFunctor { } }; +template +struct LeakyReluFunctor : public BaseActivationFunctor { + float alpha; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"alpha", &alpha}}; + } + + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x.cwiseMax(alpha * x); + } +}; + +template +struct LeakyReluGradFunctor : public BaseActivationFunctor { + float alpha; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"alpha", &alpha}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + auto temp1 = alpha * (x < static_cast(0)).template cast().eval(); + auto temp2 = (x >= static_cast(0)).template cast().eval(); + dx.device(d) = dy * (temp1 + temp2).template cast(); + } +}; + template struct PowFunctor : public BaseActivationFunctor { float factor; @@ -379,4 +406,5 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(soft_relu, SoftReluFunctor, SoftReluGradFunctor); \ __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ - __macro(softsign, SoftsignFunctor, SoftsignGradFunctor) + __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ + __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index c44eb84906..ce6dec7748 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -122,6 +122,23 @@ class TestBRelu(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.02) +class TestLeakyRelu(OpTest): + def setUp(self): + self.op_type = "leaky_relu" + alpha = 0.02 + self.attrs = {'alpha': alpha} + self.inputs = {'X': np.random.uniform(-3, 3, [4, 4]).astype("float32")} + self.outputs = { + 'Y': np.maximum(self.inputs['X'], alpha * self.inputs['X']) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.008) + + class TestSoftRelu(OpTest): def setUp(self): self.op_type = "soft_relu" From 11070e5f36be24be8da6fa2b70a2dae13212c513 Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Thu, 5 Oct 2017 08:50:51 -0700 Subject: [PATCH 221/342] Updated the reltive error --- python/paddle/v2/framework/tests/test_activation_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index ce6dec7748..f232996a55 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -136,7 +136,7 @@ class TestLeakyRelu(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.008) + self.check_grad(['X'], 'Y', max_relative_error=0.007) class TestSoftRelu(OpTest): From 828c5b3e1dd3c80b955a1c65179ca6d5a27d852d Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 5 Oct 2017 13:07:55 -0700 Subject: [PATCH 222/342] Adding Adadelta optimization operator (#4576) * Adding Adadelta optimization operator * Making inputs and outputs conform to naming convention * Removing type alias from header files * Fixing Adadelta documentation in comments * Addressing code review feedback --- paddle/operators/adadelta_op.cc | 115 ++++++++++++++++++ paddle/operators/adadelta_op.cu | 20 +++ paddle/operators/adadelta_op.h | 69 +++++++++++ .../v2/framework/tests/test_adadelta_op.py | 96 +++++++++++++++ 4 files changed, 300 insertions(+) create mode 100644 paddle/operators/adadelta_op.cc create mode 100644 paddle/operators/adadelta_op.cu create mode 100644 paddle/operators/adadelta_op.h create mode 100644 python/paddle/v2/framework/tests/test_adadelta_op.py diff --git a/paddle/operators/adadelta_op.cc b/paddle/operators/adadelta_op.cc new file mode 100644 index 0000000000..bd8c93b4a1 --- /dev/null +++ b/paddle/operators/adadelta_op.cc @@ -0,0 +1,115 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/adadelta_op.h" + +namespace paddle { +namespace operators { + +class AdadeltaOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(Param) of AdadeltaOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(Grad) of AdadeltaOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("AvgSquaredGrad"), + "Input(AvgSquaredGrad) of AdadeltaOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("AvgSquaredUpdate"), + "Input(AvgSquaredUpdate) of AdadeltaOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of AdadeltaOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("AvgSquaredGradOut"), + "Output(AvgSquaredGradOut) of AdadeltaOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("AvgSquaredUpdateOut"), + "Output(AvgSquaredUpdateOut) of AdadeltaOp should not be null."); + + auto param_dim = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ( + param_dim, ctx->GetInputDim("Grad"), + "param and grad input of AdadeltaOp should have same dimension"); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("AvgSquaredGrad"), + "Param and AvgSquaredGrad input of AdadeltaOp " + "should have same dimension"); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("AvgSquaredUpdate"), + "Param and AvgSquaredUpdate input of AdadeltaOp " + "should have same dimension"); + + ctx->SetOutputDim("ParamOut", param_dim); + ctx->SetOutputDim("AvgSquaredGradOut", param_dim); + ctx->SetOutputDim("AvgSquaredUpdateOut", param_dim); + } +}; + +class AdadeltaOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AdadeltaOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Param", "(Tensor) Input parameter"); + AddInput("Grad", "(Tensor) Input gradient"); + AddInput("AvgSquaredGrad", + "(Tensor) Input expectation of squared gradient"); + AddInput("AvgSquaredUpdate", + "(Tensor) Input expectation of squared parameter updates"); + + AddOutput("ParamOut", "(Tensor) Output parameter"); + AddOutput("AvgSquaredGradOut", + "(Tensor) Output expectation of squared gradient"); + AddOutput("AvgSquaredUpdateOut", + "(Tensor) Output expectation of squared parameter updates"); + + AddAttr("rho", + "(float, default 0.95) Exponential decay rate " + "for squared gradients.") + .SetDefault(0.95f); + AddAttr("epsilon", + "(float, default 1.0e-6) Constant for " + "numerical stability") + .SetDefault(1.0e-6f); + AddComment(R"DOC( +Adadelta Updates Operator. + +This implements the Adadelta optimizer[1]. Adadelta is a per-dimension +adaptive learning rate method for gradient descent. + +Adadelta updates: + +avg_squared_grad_out = rho * avg_squared_grad + (1 - rho) * grad * grad +param_update = - sqrt((avg_squared_update + epsilon) / + (avg_squared_grad_out + epsilon)) * grad +avg_squared_update_out = rho * avg_squared_update + (1 - rho) * param_update**2 +param_out = param + param_update + +References: + [1] ADADELTA: An Adaptive Learning Rate Method + https://arxiv.org/abs/1212.5701 + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(adadelta, ops::AdadeltaOp, ops::AdadeltaOpMaker); +REGISTER_OP_CPU_KERNEL( + adadelta, ops::AdadeltaOpKernel); diff --git a/paddle/operators/adadelta_op.cu b/paddle/operators/adadelta_op.cu new file mode 100644 index 0000000000..3af1c8c8e9 --- /dev/null +++ b/paddle/operators/adadelta_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/adadelta_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + adadelta, ops::AdadeltaOpKernel); diff --git a/paddle/operators/adadelta_op.h b/paddle/operators/adadelta_op.h new file mode 100644 index 0000000000..d29e15c435 --- /dev/null +++ b/paddle/operators/adadelta_op.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class AdadeltaOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto param_out_tensor = ctx.Output("ParamOut"); + auto avg_squared_grad_out_tensor = + ctx.Output("AvgSquaredGradOut"); + auto avg_squared_update_out_tensor = + ctx.Output("AvgSquaredUpdateOut"); + + param_out_tensor->mutable_data(ctx.GetPlace()); + avg_squared_grad_out_tensor->mutable_data(ctx.GetPlace()); + avg_squared_update_out_tensor->mutable_data(ctx.GetPlace()); + + float rho = ctx.Attr("rho"); + float epsilon = ctx.Attr("epsilon"); + + auto param = framework::EigenVector::Flatten( + *ctx.Input("Param")); + auto grad = framework::EigenVector::Flatten( + *ctx.Input("Grad")); + // Squared gradient accumulator + auto avg_squared_grad = framework::EigenVector::Flatten( + *ctx.Input("AvgSquaredGrad")); + // Squared updates accumulator + auto avg_squared_update = framework::EigenVector::Flatten( + *ctx.Input("AvgSquaredUpdate")); + auto param_out = framework::EigenVector::Flatten(*param_out_tensor); + auto avg_squared_grad_out = + framework::EigenVector::Flatten(*avg_squared_grad_out_tensor); + auto avg_squared_update_out = + framework::EigenVector::Flatten(*avg_squared_update_out_tensor); + auto place = ctx.GetEigenDevice(); + + avg_squared_grad_out.device(place) = + rho * avg_squared_grad + (1 - rho) * grad.square(); + auto update = + -((avg_squared_update + epsilon) / (avg_squared_grad_out + epsilon)) + .sqrt() * + grad; + avg_squared_update_out.device(place) = + rho * avg_squared_update + (1 - rho) * update.square(); + param_out.device(place) = param + update; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_adadelta_op.py b/python/paddle/v2/framework/tests/test_adadelta_op.py new file mode 100644 index 0000000000..7105593a98 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_adadelta_op.py @@ -0,0 +1,96 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestAdadeltaOp1(OpTest): + def setUp(self): + self.op_type = "adadelta" + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The squared gradient is positive + avg_squared_grad = np.random.random((102, 105)).astype("float32") + # The squared update is positive + avg_squared_update = np.random.random((102, 105)).astype("float32") + + rho = 0.95 + epsilon = 1e-6 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'AvgSquaredGrad': avg_squared_grad, + 'AvgSquaredUpdate': avg_squared_update + } + + self.attrs = {'rho': rho, 'epsilon': epsilon} + + avg_squared_grad_out = rho * avg_squared_grad + \ + (1 - rho) * np.square(grad) + update = -np.multiply( + np.sqrt( + np.divide(avg_squared_update + epsilon, avg_squared_grad_out + + epsilon)), grad) + + avg_squared_update_out = rho * avg_squared_update + \ + (1 - rho) * np.square(update) + + param_out = param + update + + self.outputs = { + 'ParamOut': param_out, + 'AvgSquaredGradOut': avg_squared_grad_out, + 'AvgSquaredUpdateOut': avg_squared_update_out + } + + def test_check_output(self): + self.check_output() + + +class TestAdadeltaOp2(OpTest): + '''Test Adadelta op with default attribute values + ''' + + def setUp(self): + self.op_type = "adadelta" + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The squared gradient is positive + avg_squared_grad = np.random.random((102, 105)).astype("float32") + # The squared update is positive + avg_squared_update = np.random.random((102, 105)).astype("float32") + + rho = 0.95 + epsilon = 1e-6 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'AvgSquaredGrad': avg_squared_grad, + 'AvgSquaredUpdate': avg_squared_update + } + + avg_squared_grad_out = rho * avg_squared_grad + \ + (1 - rho) * np.square(grad) + update = -np.multiply( + np.sqrt( + np.divide(avg_squared_update + epsilon, avg_squared_grad_out + + epsilon)), grad) + + avg_squared_update_out = rho * avg_squared_update + \ + (1 - rho) * np.square(update) + + param_out = param + update + + self.outputs = { + 'ParamOut': param_out, + 'AvgSquaredGradOut': avg_squared_grad_out, + 'AvgSquaredUpdateOut': avg_squared_update_out + } + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() From 4b07686aa89575442176ce056ef3551a2b31580e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 5 Oct 2017 14:42:17 -0700 Subject: [PATCH 223/342] Add unit tests --- paddle/framework/backward.cc | 5 +- paddle/framework/backward_test.cc | 206 ++++++++++++++++++++++++++++++ 2 files changed, 209 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index efcdd1bc78..c970e01dd1 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -289,7 +289,7 @@ std::vector> MakeOpGrad( std::unordered_set& no_grad_vars) { std::vector> grad_op_descs; // All input gradients of forwarding operator do not need to calculat. - const std::vector& inputs = op_desc->InArgumentNames(); + const std::vector& inputs = op_desc->InputArgumentNames(); if (AllGradInSet(inputs, no_grad_vars)) { return grad_op_descs; // empty vector } @@ -323,8 +323,9 @@ std::vector> MakeOpGrad( } } } + for (auto& p : pending_fill_zeros_ops) { - grad_op_descs.push_back(std::move(p)); + grad_op_descs.insert(grad_op_descs.begin(), std::move(p)); } return grad_op_descs; } diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 9ea91358f4..30225a4a99 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -155,6 +155,18 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class MultInOutOpMaker : public OpProtoAndCheckerMaker { + public: + MultInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "x"); + AddInput("H", "h"); + AddOutput("Y", "y"); + AddOutput("Z", "z"); + AddComment(""); + } +}; + } // namespace framework } // namespace paddle @@ -172,6 +184,7 @@ REGISTER_OP(sum, f::NOP, f::SumOpMaker, sum_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, f::NOP); +REGISTER_OP(mult_in_out, f::NOP, f::MultInOutOpMaker, mult_in_out_grad, f::NOP); TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp( @@ -487,4 +500,197 @@ TEST(Backward, simple_mult_op) { std::vector({f::GradVarName("out2")})); EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), std::vector({f::GradVarName("b3")})); +} + +TEST(Backward, intermedia_var_no_grad) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("rowwise_add"); + op1->SetInput("X", {"x1"}); + op1->SetInput("b", {"b1"}); + op1->SetOutput("Out", {"out1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mul"); + op2->SetInput("X", {"x2"}); + op2->SetInput("Y", {"y2"}); + op2->SetOutput("Out", {"out2"}); + + f::OpDescBind *op3 = block->AppendOp(); + op3->SetType("rowwise_add"); + op3->SetInput("X", {"out2"}); + op3->SetInput("b", {"b3"}); + op3->SetOutput("Out", {"out3"}); + + f::OpDescBind *op4 = block->AppendOp(); + op4->SetType("mul"); + op4->SetInput("X", {"out1"}); + op4->SetInput("Y", {"out3"}); + op4->SetOutput("Out", {"out4"}); + + AppendBackward(program, {"out3"}); + + ASSERT_EQ(block->AllOps().size(), 6UL); + f::OpDescBind *grad_op1 = block->AllOps()[5]; + EXPECT_EQ(grad_op1->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 1UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b1")})); + + f::OpDescBind *grad_op4 = block->AllOps()[4]; + EXPECT_EQ(grad_op4->Type(), "mul_grad"); + ASSERT_EQ(grad_op4->InputNames().size(), 4UL); + ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); + EXPECT_EQ(grad_op4->Input("Y"), std::vector({"out3"})); + EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out4"})); + EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out4")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), + std::vector({f::kEmptyVarName})); +} + +TEST(Backward, var_no_grad) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("mult_in_out"); + op1->SetInput("X", {"x1"}); + op1->SetInput("H", {"h1"}); + op1->SetOutput("Y", {"y1"}); + op1->SetOutput("Z", {"z1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mult_in_out"); + op2->SetInput("X", {"y1"}); + op2->SetInput("H", {"z1"}); + op2->SetOutput("Y", {"y2"}); + op2->SetOutput("Z", {"z2"}); + + AppendBackward(program, {"z1"}); + + ASSERT_EQ(block->AllOps().size(), 5UL); + f::OpDescBind *grad_op2 = block->AllOps()[2]; + ASSERT_EQ(grad_op2->Type(), "mult_in_out_grad"); + ASSERT_EQ(grad_op2->InputNames().size(), 6UL); + ASSERT_EQ(grad_op2->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op2->Input("X"), std::vector({"y1"})); + EXPECT_EQ(grad_op2->Input("H"), std::vector({"z1"})); + EXPECT_EQ(grad_op2->Input("Y"), std::vector({"y2"})); + EXPECT_EQ(grad_op2->Input("Z"), std::vector({"z2"})); + EXPECT_EQ(grad_op2->Input(f::GradVarName("Y")), + std::vector({f::GradVarName("y2")})); + EXPECT_EQ(grad_op2->Input(f::GradVarName("Z")), + std::vector({f::GradVarName("z2")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("X")), + std::vector({f::GradVarName("y1")})); + EXPECT_EQ(grad_op2->Output(f::GradVarName("H")), + std::vector({f::kEmptyVarName})); + + f::OpDescBind *fill_zero_op = block->AllOps()[3]; + ASSERT_EQ(fill_zero_op->Type(), "fill_zeros_like"); + ASSERT_EQ(fill_zero_op->InputNames().size(), 1UL); + ASSERT_EQ(fill_zero_op->OutputNames().size(), 1UL); + EXPECT_EQ(fill_zero_op->Input("X"), std::vector({"z1"})); + EXPECT_EQ(fill_zero_op->Output("Y"), + std::vector({std::string("z1") + f::kZeroVarSuffix})); + + f::OpDescBind *grad_op1 = block->AllOps()[4]; + ASSERT_EQ(grad_op1->Type(), "mult_in_out_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 6UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input("X"), std::vector({"x1"})); + EXPECT_EQ(grad_op1->Input("H"), std::vector({"h1"})); + EXPECT_EQ(grad_op1->Input("Y"), std::vector({"y1"})); + EXPECT_EQ(grad_op1->Input("Z"), std::vector({"z1"})); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Y")), + std::vector({f::GradVarName("y1")})); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Z")), + std::vector({std::string("z1") + f::kZeroVarSuffix})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("H")), + std::vector({f::GradVarName("h1")})); +} + +TEST(Backward, shared_var) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op1 = block->AppendOp(); + op1->SetType("rowwise_add"); + op1->SetInput("X", {"x1"}); + op1->SetInput("b", {"b1"}); + op1->SetOutput("Out", {"out1"}); + + f::OpDescBind *op2 = block->AppendOp(); + op2->SetType("mul"); + op2->SetInput("X", {"out1"}); + op2->SetInput("Y", {"y2"}); + op2->SetOutput("Out", {"out2"}); + + f::OpDescBind *op3 = block->AppendOp(); + op3->SetType("rowwise_add"); + op3->SetInput("X", {"out1"}); + op3->SetInput("b", {"b3"}); + op3->SetOutput("Out", {"out3"}); + + AppendBackward(program, {}); + + ASSERT_EQ(block->AllOps().size(), 7UL); + f::OpDescBind *grad_op3 = block->AllOps()[3]; + ASSERT_EQ(grad_op3->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op3->InputNames().size(), 1UL); + ASSERT_EQ(grad_op3->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op3->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out3")})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1") + "@RENAME@0"})); + EXPECT_EQ(grad_op3->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b3")})); + + f::OpDescBind *grad_op4 = block->AllOps()[4]; + ASSERT_EQ(grad_op4->Type(), "mul_grad"); + ASSERT_EQ(grad_op4->InputNames().size(), 4UL); + ASSERT_EQ(grad_op4->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op4->Input("X"), std::vector({"out1"})); + EXPECT_EQ(grad_op4->Input("Y"), std::vector({"y2"})); + EXPECT_EQ(grad_op4->Input("Out"), std::vector({"out2"})); + EXPECT_EQ(grad_op4->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out2")})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("X")), + std::vector({f::GradVarName("out1") + "@RENAME@1"})); + EXPECT_EQ(grad_op4->Output(f::GradVarName("Y")), + std::vector({f::GradVarName("y2")})); + + f::OpDescBind *sum_op = block->AllOps()[5]; + ASSERT_EQ(sum_op->Type(), "sum"); + ASSERT_EQ(sum_op->InputNames().size(), 1UL); + ASSERT_EQ(sum_op->OutputNames().size(), 1UL); + EXPECT_EQ(sum_op->Input("X"), + std::vector({f::GradVarName("out1") + "@RENAME@0", + f::GradVarName("out1") + "@RENAME@1"})); + EXPECT_EQ(sum_op->Output("Out"), + std::vector({f::GradVarName("out1")})); + + f::OpDescBind *grad_op1 = block->AllOps()[6]; + ASSERT_EQ(grad_op1->Type(), "rowwise_add_grad"); + ASSERT_EQ(grad_op1->InputNames().size(), 1UL); + ASSERT_EQ(grad_op1->OutputNames().size(), 2UL); + EXPECT_EQ(grad_op1->Input(f::GradVarName("Out")), + std::vector({f::GradVarName("out1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("X")), + std::vector({f::GradVarName("x1")})); + EXPECT_EQ(grad_op1->Output(f::GradVarName("b")), + std::vector({f::GradVarName("b1")})); } \ No newline at end of file From 20725f2d52bd3f6d54df45c710872b9b8ee52e14 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 5 Oct 2017 14:55:29 -0700 Subject: [PATCH 224/342] add executor feed operator test --- paddle/framework/executor.cc | 20 ++-- paddle/framework/executor.h | 2 +- paddle/framework/executor_test.cc | 155 +++++++++++++++++++++++++++--- paddle/operators/feed_op.cc | 15 ++- 4 files changed, 167 insertions(+), 25 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 7c3cac359e..aafef12554 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -48,8 +48,7 @@ Executor::~Executor() { } } -void Executor::Run(const ProgramDesc& pdesc, Scope* scope, - std::vector* outputs) { +void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { // TODO(tonyyang-svail): // - only runs the first block // - only runs on the first device @@ -76,14 +75,15 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, device_context->Wait(); } // // print tensor value - // for (auto& var : block.vars()) { - // std::cout << var.name() << std::endl; - // auto v = scope->FindVar(var.name()); - // const LoDTensor& t = v->Get(); - // for (int i = 0; i < t.numel(); ++i) - // std::cout << t.data()[i] << " "; - // std::cout << std::endl; - // } + for (auto& var : block.vars()) { + std::cout << var.name() << std::endl; + auto v = scope->FindVar(var.name()); + const LoDTensor& t = v->Get(); + for (int i = 0; i < t.numel(); ++i) { + std::cout << t.data()[i] << " "; + } + std::cout << std::endl; + } } } // namespace framework diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index fc53be37c3..9e443c8fca 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -26,7 +26,7 @@ class Executor { public: explicit Executor(const std::vector& places); ~Executor(); - void Run(const ProgramDesc&, Scope*, std::vector*); + void Run(const ProgramDesc&, Scope*); private: std::vector device_contexts_; diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index ca7e8ca7d2..0856d1f32e 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -13,17 +13,18 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" +#include // for unique_ptr +#include // for call_once +#include #include "gtest/gtest.h" #include "paddle/framework/attribute.h" - #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" -#include - USE_OP(elementwise_add); USE_OP(gaussian_random); +USE_OP(feed); using std::string; using namespace paddle::platform; @@ -58,7 +59,67 @@ void add_gaussian_random_op(string var_name, proto_block* block) { Out->add_arguments(var_name); } -class ExecutorTester : public ::testing::Test { +void add_feed_op(string var_name, int index, proto_block* block) { + std::vector dim{3}; + + // insert variable + auto a = block->add_vars(); + a->set_name(var_name); + auto a_lt = a->mutable_lod_tensor(); + a_lt->set_data_type(paddle::framework::DataType::FP32); + for (int i : dim) { + a_lt->add_dims(i); + } + + // insert operation + auto op = block->add_ops(); + op->set_type("feed"); + + // set dims attr + auto dims = op->add_attrs(); + dims->set_name("dims"); + dims->set_type(paddle::framework::AttrType::INTS); + for (int i : dim) { + dims->add_ints(i); + } + + // set col attr + auto col = op->add_attrs(); + col->set_name("col"); + col->set_type(paddle::framework::AttrType::INT); + col->set_i(index); + + auto Out = op->add_outputs(); + Out->set_parameter("Out"); + Out->add_arguments(var_name); +} + +std::once_flag set_variable_flag; + +template +void set_feed_variable(const std::vector>& inputs) { + typedef std::vector FeedInputs; + Variable* g_feed_value = GetScope()->FindVar("feed_value"); + FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); + auto size = inputs.size(); + + std::call_once(set_variable_flag, [&]() { + feed_inputs.reserve(size); + for (size_t i = 0; i < size; i++) { + paddle::framework::Tensor tmp; + tmp.mutable_data(make_ddim({static_cast(inputs[i].size())}), + CPUPlace()); + feed_inputs.push_back(tmp); + } + }); + + for (size_t i = 0; i < size; i++) { + memcpy(feed_inputs[i].data(), inputs[i].data(), + inputs[i].size() * sizeof(T)); + } +} + +class ExecutorTesterRandom : public ::testing::Test { public: virtual void SetUp() override { auto root_block = pdesc_.add_blocks(); @@ -84,33 +145,103 @@ class ExecutorTester : public ::testing::Test { auto Out = op->add_outputs(); Out->set_parameter("Out"); Out->add_arguments("c"); + + scope_ = GetScope(); } protected: - std::vector* outputs_{nullptr}; ProgramDesc pdesc_; - Scope scope_; + Scope* scope_; }; -TEST_F(ExecutorTester, InitCPU) { +class ExecutorTesterFeed : public ::testing::Test { + public: + virtual void SetUp() override { + auto root_block = pdesc_.add_blocks(); + root_block->set_idx(0); + root_block->set_parent_idx(-1); + + add_feed_op("a", 0, root_block); + add_feed_op("b", 1, root_block); + + auto c = root_block->add_vars(); + c->set_name("c"); + auto c_lt = c->mutable_lod_tensor(); + c_lt->set_data_type(paddle::framework::DataType::FP32); + + auto op = root_block->add_ops(); + op->set_type("elementwise_add"); + auto X = op->add_inputs(); + X->set_parameter("X"); + X->add_arguments("a"); + auto Y = op->add_inputs(); + Y->set_parameter("Y"); + Y->add_arguments("b"); + auto Out = op->add_outputs(); + Out->set_parameter("Out"); + Out->add_arguments("c"); + + std::vector vec1 = {1.0, 2.0, 3.0}; + std::vector vec2 = {4.0, 5.0, 6.0}; + inputs_.push_back(vec1); + inputs_.push_back(vec2); + } + + protected: + ProgramDesc pdesc_; + std::vector> inputs_; +}; + +TEST_F(ExecutorTesterRandom, CPU) { std::vector places; CPUPlace cpu_place1, cpu_place2; places.push_back(cpu_place1); places.push_back(cpu_place2); Executor* executor = new Executor(places); - executor->Run(pdesc_, &scope_, outputs_); + executor->Run(pdesc_, scope_); + delete executor; +} + +TEST_F(ExecutorTesterFeed, CPU) { + std::vector places; + CPUPlace cpu_place; + places.push_back(cpu_place); + + Executor* executor = new Executor(places); + + // 3 mini-batch + for (int i = 0; i < 3; i++) { + // need to set feed variable before Executor::Run + set_feed_variable(inputs_); + executor->Run(pdesc_, GetScope()); + } + delete executor; } #ifdef PADDLE_WITH_GPU -TEST_F(ExecutorTester, InitGPU) { +TEST_F(ExecutorTesterRandom, GPU) { + std::vector places; + GPUPlace gpu_place(0); + places.push_back(gpu_place); + + Executor* executor = new Executor(places); + executor->Run(pdesc_, scope_); + delete executor; +} + +TEST_F(ExecutorTesterFeed, GPU) { std::vector places; - GPUPlace gpu_place0(0); - places.push_back(gpu_place0); + GPUPlace gpu_place(0); + places.push_back(gpu_place); Executor* executor = new Executor(places); - executor->Run(pdesc_, &scope_, outputs_); + + // need to set feed variable before Executor::Run + set_feed_variable(inputs_); + executor->Run(pdesc_, scope_); + delete executor; } #endif diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index 805c3600be..5ae882bc8a 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -28,19 +28,30 @@ class FeedOp : public framework::OperatorWithKernel { int col = ctx->Attrs().Get("col"); framework::Variable* g_feed_variable = framework::GetScope()->FindVar("feed_value"); + FeedInputs tensors = g_feed_variable->Get(); + auto in_dim = tensors[col].dims(); - ctx->SetOutputDim("Y", in_dim); + ctx->SetOutputDim("Out", in_dim); // need to handle LodTensor later } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return static_cast(Attr("data_type")); + } }; class FeedOpMaker : public framework::OpProtoAndCheckerMaker { public: FeedOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("col", "The col in Global Feed Variable"); + AddAttr("data_type", "output data type") + .SetDefault(framework::DataType::FP32); + AddAttr("col", "The col in global feed variable").SetDefault(0); + AddAttr>("dims", "The dimension of random tensor."); AddOutput("Out", "The output of dropout op."); + AddComment(R"DOC(Feed data to global feed variable)DOC"); } }; From 352af966d7a62a1ed4cedaf9562d05db8026fe23 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 5 Oct 2017 15:30:03 -0700 Subject: [PATCH 225/342] add python unit test --- paddle/framework/CMakeLists.txt | 4 +- paddle/framework/op_registry.h | 9 +-- paddle/framework/shape_inference.h | 4 ++ paddle/framework/shape_inference_map.cc | 60 ------------------- paddle/framework/shape_inference_map.h | 48 --------------- paddle/pybind/pybind.cc | 24 +++++--- .../v2/framework/tests/test_infer_shape.py | 46 ++++++++++++-- 7 files changed, 62 insertions(+), 133 deletions(-) delete mode 100644 paddle/framework/shape_inference_map.cc delete mode 100644 paddle/framework/shape_inference_map.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 986b45451f..a2efcdb55c 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,10 +26,8 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) -cc_library(shape_inference_map SRCS shape_inference_map.cc DEPS op_info operator) - cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) -cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info shape_inference_map) +cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 8138ba117a..ee02da7b4d 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -26,7 +26,6 @@ limitations under the License. */ #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" -#include "paddle/framework/shape_inference_map.h" namespace paddle { namespace framework { @@ -55,16 +54,10 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; - auto proto = reg.info.Proto(); - std::cout << "====== " << op_type << " =======" << std::endl; - std::cout << proto.SerializeAsString() << std::endl; - std::cout << "=============" << std::endl; - ShapeInferenceMap::Instance().CreateOpWithKernel(reg.info, op_type); + // register gradient op if (!grad_op_type.empty()) { OperatorRegistrar grad_reg(grad_op_type.c_str()); - ShapeInferenceMap::Instance().CreateOpWithKernel(grad_reg.info, - grad_op_type); } } diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index ac6f238638..8189823c19 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -20,6 +20,10 @@ limitations under the License. */ namespace paddle { namespace framework { +class InferShapeContextBase; + +typedef std::function InferShapeFn; + class InferShapeContextBase { public: virtual ~InferShapeContextBase() {} diff --git a/paddle/framework/shape_inference_map.cc b/paddle/framework/shape_inference_map.cc deleted file mode 100644 index bd2b867984..0000000000 --- a/paddle/framework/shape_inference_map.cc +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/framework/shape_inference_map.h" - -namespace paddle { -namespace framework { - -static VariableNameMap ConvertOpProtoVarsToVarNameMap( - const google::protobuf::RepeatedPtrField& op_proto_vars) { - VariableNameMap ret_val; - for (auto& var : op_proto_vars) { - ret_val[var.name()] = {}; - } - return ret_val; -} - -static ShapeInferenceMap* g_shape_inference_map = nullptr; - -ShapeInferenceMap& ShapeInferenceMap::Instance() { - if (g_shape_inference_map == nullptr) { - g_shape_inference_map = new ShapeInferenceMap(); - } - return *g_shape_inference_map; -} - -void ShapeInferenceMap::CreateOpWithKernel(const OpInfo& op_info, - const std::string& op_type) { - auto proto = op_info.Proto(); - std::cout << "========= " << op_type << " in======" << std::endl; - std::cout << proto.SerializeAsString() << std::endl; - std::cout << "========= " << op_type << " out======" << std::endl; - const VariableNameMap inputs = ConvertOpProtoVarsToVarNameMap(proto.inputs()); - const VariableNameMap outputs = - ConvertOpProtoVarsToVarNameMap(proto.outputs()); - auto* op = op_info.Creator()(op_type, inputs, outputs, {}); - auto* op_with_kernel = dynamic_cast(op); - auto it = op_shape_inference_map_.find(op_type); - if (it != op_shape_inference_map_.end()) { - PADDLE_THROW("OpWithKernel(%s) is already registered for infer_shape", - op_type); - } - if (op_with_kernel != nullptr) { - op_shape_inference_map_[op_type] = op_with_kernel; - } -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/shape_inference_map.h b/paddle/framework/shape_inference_map.h deleted file mode 100644 index 6c7304f6c0..0000000000 --- a/paddle/framework/shape_inference_map.h +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include - -#include "paddle/framework/op_info.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/shape_inference.h" - -namespace paddle { -namespace framework { - -class ShapeInferenceMap { - public: - static ShapeInferenceMap& Instance(); - - void CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type); - - OperatorWithKernel* GetOpWithKernel(const std::string& op_type) { - auto it = op_shape_inference_map_.find(op_type); - if (it == op_shape_inference_map_.end()) { - return nullptr; - } - return it->second; - } - - private: - ShapeInferenceMap() = default; - DISABLE_COPY_AND_ASSIGN(ShapeInferenceMap); - - std::unordered_map op_shape_inference_map_; -}; - -} // namespace framework -} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index e11bcc0e0f..2ad0344c09 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -223,15 +223,21 @@ All parameter, weight, gradient are variables in Paddle. desc.InitializationErrorString()); return OpRegistry::CreateOp(desc); }) - .def("infer_shape", - [](const OpDescBind &op_desc, BlockDescBind &block) { - auto &shape_inference_map = ShapeInferenceMap::Instance(); - auto *op = shape_inference_map.GetOpWithKernel(op_desc.Type()); - if (op != nullptr) { - auto ctx = CompileTimeInferShapeContext(op_desc, block); - op->InferShape(&ctx); - } - }) + .def_static("infer_shape", + [](OpDescBind &op_desc, BlockDescBind &block) { + auto op = OpRegistry::CreateOp(*op_desc.Proto()); + auto *op_with_kernel = + dynamic_cast(op.get()); + if (op_with_kernel != nullptr) { + auto ctx = CompileTimeInferShapeContext(op_desc, block); + op_with_kernel->InferShape(&ctx); + } else { + PADDLE_THROW( + "OP(%s) is not type of OperatorWithKernel, " + "should not call this function", + op_desc.Type()); + } + }) .def("backward", [](const OperatorBase &forwardOp, const std::unordered_set &no_grad_vars) { diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py index 56d3a90123..ec93aaf843 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -10,11 +10,13 @@ class TestInferShape(unittest.TestCase): block = prog.block(0) self.assertIsNotNone(block) + shape = [10, 20] + # prepare input/output x1 = block.new_var("x1") - x1.set_shape([10, 20]) + x1.set_shape(shape) x2 = block.new_var("x2") - x2.set_shape([10, 20]) + x2.set_shape(shape) out = block.new_var("out") @@ -24,6 +26,40 @@ class TestInferShape(unittest.TestCase): sum_op_desc.set_input("X", ["x1", "x2"]) sum_op_desc.set_output("Out", ["out"]) - sum_op = Operator("sum", X=["x1", "x2"], Out="out") - sum_op.infer_shape(sum_op_desc, block) - print(out.shape()) + print(type(sum_op_desc)) + print(type(block)) + core.Operator.infer_shape(sum_op_desc, block) + self.assertEqual(out.shape(), shape) + + def test_mul_op(self): + prog = core.ProgramDesc.__create_program_desc__() + self.assertIsNotNone(prog) + block = prog.block(0) + self.assertIsNotNone(block) + + x_shape = [10, 20] + y_shape = [20, 30] + + # prepare input/output + x1 = block.new_var("x") + x1.set_shape(x_shape) + x2 = block.new_var("y") + x2.set_shape(y_shape) + + out = block.new_var("out") + + # prepare the operator + mul_op_desc = block.append_op() + mul_op_desc.set_type("mul") + mul_op_desc.set_input("X", ["x"]) + mul_op_desc.set_input("Y", ["y"]) + mul_op_desc.set_output("Out", ["out"]) + mul_op_desc.set_attr("x_num_col_dims", 1) + mul_op_desc.set_attr("y_num_col_dims", 1) + + core.Operator.infer_shape(mul_op_desc, block) + self.assertEqual(out.shape(), [x_shape[0], y_shape[1]]) + + +if __name__ == '__main__': + unittest.main() From 628715d602491732f78f314d355760ca8ba326e3 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 5 Oct 2017 15:43:51 -0700 Subject: [PATCH 226/342] clean code --- paddle/framework/shape_inference.h | 5 ----- python/paddle/v2/framework/tests/test_infer_shape.py | 2 -- 2 files changed, 7 deletions(-) diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 8189823c19..bc8af0eb3e 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -14,16 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/framework/attribute.h" #include "paddle/framework/ddim.h" namespace paddle { namespace framework { -class InferShapeContextBase; - -typedef std::function InferShapeFn; - class InferShapeContextBase { public: virtual ~InferShapeContextBase() {} diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py index ec93aaf843..b38ec9c037 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -26,8 +26,6 @@ class TestInferShape(unittest.TestCase): sum_op_desc.set_input("X", ["x1", "x2"]) sum_op_desc.set_output("Out", ["out"]) - print(type(sum_op_desc)) - print(type(block)) core.Operator.infer_shape(sum_op_desc, block) self.assertEqual(out.shape(), shape) From e043b386606afc95ffd9135c14866d5e3b77b642 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 5 Oct 2017 15:47:42 -0700 Subject: [PATCH 227/342] clean code --- paddle/framework/op_registry.h | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index ee02da7b4d..4ee2c7d275 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -54,7 +54,6 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; - // register gradient op if (!grad_op_type.empty()) { OperatorRegistrar grad_reg(grad_op_type.c_str()); From 45c4dcaabb4cbf140384dcffe3392d2e10b2a6d7 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 5 Oct 2017 15:54:44 -0700 Subject: [PATCH 228/342] add fetch operator --- paddle/framework/executor.cc | 18 ++++---- paddle/framework/executor_test.cc | 67 ++++++++++++++++++++++++++++++ paddle/framework/scope.cc | 5 ++- paddle/operators/activation_op.cu | 18 ++++---- paddle/operators/feed_op.cc | 6 +-- paddle/operators/fetch_op.cc | 68 +++++++++++++++++++++++++++++++ paddle/operators/fetch_op.cu | 18 ++++++++ paddle/operators/fetch_op.h | 40 ++++++++++++++++++ 8 files changed, 218 insertions(+), 22 deletions(-) create mode 100644 paddle/operators/fetch_op.cc create mode 100644 paddle/operators/fetch_op.cu create mode 100644 paddle/operators/fetch_op.h diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index aafef12554..51ddb7e58e 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -75,15 +75,15 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { device_context->Wait(); } // // print tensor value - for (auto& var : block.vars()) { - std::cout << var.name() << std::endl; - auto v = scope->FindVar(var.name()); - const LoDTensor& t = v->Get(); - for (int i = 0; i < t.numel(); ++i) { - std::cout << t.data()[i] << " "; - } - std::cout << std::endl; - } + // for (auto& var : block.vars()) { + // std::cout << var.name() << std::endl; + // auto v = scope->FindVar(var.name()); + // const LoDTensor& t = v->Get(); + // for (int i = 0; i < t.numel(); ++i) { + // std::cout << t.data()[i] << " "; + // } + // std::cout << std::endl; + // } } } // namespace framework diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 0856d1f32e..980f5f579c 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -25,6 +25,7 @@ limitations under the License. */ USE_OP(elementwise_add); USE_OP(gaussian_random); USE_OP(feed); +USE_OP(fetch); using std::string; using namespace paddle::platform; @@ -94,6 +95,41 @@ void add_feed_op(string var_name, int index, proto_block* block) { Out->add_arguments(var_name); } +void add_fetch_op(string var_name, int index, proto_block* block) { + std::vector dim{3}; + + // insert variable + auto a = block->add_vars(); + a->set_name(var_name); + auto a_lt = a->mutable_lod_tensor(); + a_lt->set_data_type(paddle::framework::DataType::FP32); + for (int i : dim) { + a_lt->add_dims(i); + } + + // insert operation + auto op = block->add_ops(); + op->set_type("fetch"); + + // set dims attr + auto dims = op->add_attrs(); + dims->set_name("dims"); + dims->set_type(paddle::framework::AttrType::INTS); + for (int i : dim) { + dims->add_ints(i); + } + + // set col attr + auto col = op->add_attrs(); + col->set_name("col"); + col->set_type(paddle::framework::AttrType::INT); + col->set_i(index); + + auto Out = op->add_inputs(); + Out->set_parameter("Input"); + Out->add_arguments(var_name); +} + std::once_flag set_variable_flag; template @@ -119,6 +155,27 @@ void set_feed_variable(const std::vector>& inputs) { } } +template +std::vector> get_fetch_variable() { + typedef std::vector FetchOutputs; + Variable* g_fetch_value = GetScope()->FindVar("fetch_value"); + FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); + auto size = fetch_outputs.size(); + + std::vector> result; + result.reserve(size); + + for (size_t i = 0; i < size; i++) { + std::vector tmp; + tmp.reserve(fetch_outputs[i].numel()); + memcpy(tmp.data(), fetch_outputs[i].data(), + fetch_outputs[i].numel() * sizeof(T)); + result.push_back(tmp); + } + + return result; +} + class ExecutorTesterRandom : public ::testing::Test { public: virtual void SetUp() override { @@ -181,6 +238,8 @@ class ExecutorTesterFeed : public ::testing::Test { Out->set_parameter("Out"); Out->add_arguments("c"); + add_fetch_op("c", 0, root_block); + std::vector vec1 = {1.0, 2.0, 3.0}; std::vector vec2 = {4.0, 5.0, 6.0}; inputs_.push_back(vec1); @@ -213,8 +272,16 @@ TEST_F(ExecutorTesterFeed, CPU) { // 3 mini-batch for (int i = 0; i < 3; i++) { // need to set feed variable before Executor::Run + std::cout << "start mini-batch " << i << std::endl; set_feed_variable(inputs_); executor->Run(pdesc_, GetScope()); + std::vector> result = get_fetch_variable(); + for (auto& vec : result) { + for (auto& num : vec) { + std::cout << num << " "; + } + std::cout << std::endl; + } } delete executor; diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index b04120abf2..2c416570cf 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -74,7 +74,10 @@ std::unique_ptr make_unique(Args&&... args) { framework::Scope* GetScope() { static std::unique_ptr g_scope = make_unique(); - std::call_once(feed_variable_flag, [&]() { g_scope->NewVar("feed_value"); }); + std::call_once(feed_variable_flag, [&]() { + g_scope->NewVar("feed_value"); + g_scope->NewVar("fetch_value"); + }); return g_scope.get(); } diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 44a6aaf9cb..93e9f1c694 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -1,16 +1,16 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/operators/activation_op.h" diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index 5ae882bc8a..a61855cb99 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -49,9 +49,9 @@ class FeedOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("data_type", "output data type") .SetDefault(framework::DataType::FP32); AddAttr("col", "The col in global feed variable").SetDefault(0); - AddAttr>("dims", "The dimension of random tensor."); - AddOutput("Out", "The output of dropout op."); - AddComment(R"DOC(Feed data to global feed variable)DOC"); + AddAttr>("dims", "The dimension of feed tensor."); + AddOutput("Out", "The output of feed op."); + AddComment(R"DOC(Feed data from global feed variable)DOC"); } }; diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc new file mode 100644 index 0000000000..68e8d26dbe --- /dev/null +++ b/paddle/operators/fetch_op.cc @@ -0,0 +1,68 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/fetch_op.h" + +namespace paddle { +namespace operators { + +class FetchOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override { + typedef std::vector FetchOutputs; + PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null."); + int col = ctx->Attrs().Get("col"); + framework::Variable* g_fetch_variable = + framework::GetScope()->FindVar("fetch_value"); + + FetchOutputs* tensors = g_fetch_variable->GetMutable(); + if (tensors->size() < col) { + tensors->resize(col); + } + + auto input_dim = ctx->GetInputDim("Input"); + framework::Tensor tmp; + tmp.Resize(input_dim); + (*tensors)[col].Resize(input_dim); + // need to handle LodTensor later + } + + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return static_cast(Attr("data_type")); + } +}; + +class FetchOpMaker : public framework::OpProtoAndCheckerMaker { + public: + FetchOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddAttr("data_type", "output data type") + .SetDefault(framework::DataType::FP32); + AddAttr("col", "The col in global fetch variable").SetDefault(0); + AddAttr>("dims", "The dimension of fetch tensor."); + AddInput("Input", "The output of fetch op."); + AddComment(R"DOC(Fetch data to global fetch variable)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(fetch, ops::FetchOp, ops::FetchOpMaker); +REGISTER_OP_CPU_KERNEL(fetch, ops::FetchKernel); diff --git a/paddle/operators/fetch_op.cu b/paddle/operators/fetch_op.cu new file mode 100644 index 0000000000..2e24d3a8ad --- /dev/null +++ b/paddle/operators/fetch_op.cu @@ -0,0 +1,18 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/feed_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(fetch, ops::FetchKernel); diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h new file mode 100644 index 0000000000..95e7986a22 --- /dev/null +++ b/paddle/operators/fetch_op.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class FetchKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + typedef std::vector FetchOutputs; + Tensor* input = ctx.Output("Input"); + int col = ctx.template Attr("col"); + framework::Variable* g_fetch_variable = + framework::GetScope()->FindVar("fetch_value"); + FetchOutputs tensors = g_fetch_variable->Get(); + tensors[col].mutable_data(platform::CPUPlace()); + tensors[col].CopyFrom(*input, platform::CPUPlace()); + } +}; + +} // namespace operators +} // namespace paddle From 154a6ed29c13eeab9c4f785cf5ca1520ba8ca999 Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Thu, 5 Oct 2017 17:52:10 -0700 Subject: [PATCH 229/342] Implementing tanhshrink operator --- paddle/operators/activation_op.cc | 14 +++++++++++++ paddle/operators/activation_op.h | 21 ++++++++++++++++++- .../v2/framework/tests/test_activation_op.py | 15 +++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 5f2ecc2673..66e9d2c401 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -97,6 +97,17 @@ class TanhOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker { + public: + TanhShrinkOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of TanhShrink operator"); + AddOutput("Y", "Output of TanhShrink operator"); + AddComment("TanhShrink activation operator, tanhshrink(x) = x - tanh(x)"); + } +}; + class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { public: SqrtOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -235,6 +246,9 @@ REGISTER_OP(relu, ops::ActivationOp, ops::ReluOpMaker, relu_grad, REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad, ops::ActivationOpGrad); +REGISTER_OP(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker, + tanh_shrink_grad, ops::ActivationOpGrad); + REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index dae66cc77d..2450601742 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -146,6 +146,24 @@ struct TanhGradFunctor : public BaseActivationFunctor { } }; +// tanhshrink(x) = x - tanh(x) +// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) +template +struct TanhShrinkFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x - x.tanh(); + } +}; + +template +struct TanhShrinkGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + dx.device(d) = dy * (x.tanh() * x.tanh()); + } +}; + // sqrt(x) = x^(1/2) template struct SqrtFunctor : public BaseActivationFunctor { @@ -407,4 +425,5 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ - __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor) + __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ + __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index f232996a55..701e1a1aee 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -48,6 +48,21 @@ class TestTanh(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.007) +class TestTanhShrink(OpTest): + def setUp(self): + self.op_type = "tanh_shrink" + self.inputs = { + 'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32") + } + self.outputs = {'Y': self.inputs['X'] - np.tanh(self.inputs['X'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.008) + + class TestSqrt(OpTest): def setUp(self): self.op_type = "sqrt" From f52cdaa0cee682ddc3588286af42d960141596f0 Mon Sep 17 00:00:00 2001 From: Kavya Srinet Date: Thu, 5 Oct 2017 19:12:27 -0700 Subject: [PATCH 230/342] Updated RMSProp to have learning rate as an input and work with GPU --- paddle/operators/rmsprop_op.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/paddle/operators/rmsprop_op.h b/paddle/operators/rmsprop_op.h index 9c04276ec6..7bf2129010 100644 --- a/paddle/operators/rmsprop_op.h +++ b/paddle/operators/rmsprop_op.h @@ -32,6 +32,8 @@ class RmspropOpKernel : public framework::OpKernel { auto* moment_out = ctx.Output("MomentOut"); auto* mean_square_out = ctx.Output("MeanSquareOut"); + auto grad = ctx.Input("Grad"); + param_out->mutable_data(ctx.GetPlace()); moment_out->mutable_data(ctx.GetPlace()); mean_square_out->mutable_data(ctx.GetPlace()); @@ -42,8 +44,8 @@ class RmspropOpKernel : public framework::OpKernel { auto p = EigenVector::Flatten(*ctx.Input("Param")); auto ms = EigenVector::Flatten(*ctx.Input("MeanSquare")); - float lr = ctx.Input("LearningRate")->data()[0]; - auto g = EigenVector::Flatten(*ctx.Input("Grad")); + auto lr = EigenVector::Flatten(*ctx.Input("LearningRate")); + auto g = EigenVector::Flatten(*grad); auto mom = EigenVector::Flatten(*ctx.Input("Moment")); auto p_out = EigenVector::Flatten(*param_out); @@ -51,8 +53,12 @@ class RmspropOpKernel : public framework::OpKernel { auto ms_out = EigenVector::Flatten(*mean_square_out); auto place = ctx.GetEigenDevice(); + Eigen::DSizes grad_dsize(grad->numel()); + ms_out.device(place) = rho * ms + (1 - rho) * g * g; - mom_out.device(place) = momentum * mom + lr * g / (ms_out + epsilon).sqrt(); + mom_out.device(place) = + momentum * mom + + lr.broadcast(grad_dsize) * g / (ms_out + epsilon).sqrt(); p_out.device(place) = p - mom_out; } }; From 48b080db9fcc4f34535c98878112e6633d6d8d7d Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 5 Oct 2017 20:48:04 -0700 Subject: [PATCH 231/342] ensure global BuddyAllocator is initialized before global Scope --- paddle/framework/executor_test.cc | 94 +++++++++++++++++-------------- paddle/operators/feed_op.cc | 4 +- paddle/operators/feed_op.h | 2 +- paddle/operators/fetch_op.cc | 7 ++- paddle/operators/fetch_op.h | 8 +-- 5 files changed, 62 insertions(+), 53 deletions(-) diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 980f5f579c..d3ea18d154 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -13,8 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" -#include // for unique_ptr -#include // for call_once #include #include "gtest/gtest.h" #include "paddle/framework/attribute.h" @@ -34,9 +32,8 @@ using namespace paddle::framework; typedef paddle::framework::BlockDesc proto_block; typedef paddle::framework::OpDesc proto_op; -void add_gaussian_random_op(string var_name, proto_block* block) { - std::vector dim{2, 3}; - +void add_gaussian_random_op(string var_name, std::vector& dim, + proto_block* block) { // insert variable auto a = block->add_vars(); a->set_name(var_name); @@ -60,9 +57,8 @@ void add_gaussian_random_op(string var_name, proto_block* block) { Out->add_arguments(var_name); } -void add_feed_op(string var_name, int index, proto_block* block) { - std::vector dim{3}; - +void add_feed_op(string var_name, std::vector& dim, int index, + proto_block* block) { // insert variable auto a = block->add_vars(); a->set_name(var_name); @@ -95,9 +91,8 @@ void add_feed_op(string var_name, int index, proto_block* block) { Out->add_arguments(var_name); } -void add_fetch_op(string var_name, int index, proto_block* block) { - std::vector dim{3}; - +void add_fetch_op(string var_name, std::vector& dim, int index, + proto_block* block) { // insert variable auto a = block->add_vars(); a->set_name(var_name); @@ -138,20 +133,11 @@ void set_feed_variable(const std::vector>& inputs) { Variable* g_feed_value = GetScope()->FindVar("feed_value"); FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); auto size = inputs.size(); - - std::call_once(set_variable_flag, [&]() { - feed_inputs.reserve(size); - for (size_t i = 0; i < size; i++) { - paddle::framework::Tensor tmp; - tmp.mutable_data(make_ddim({static_cast(inputs[i].size())}), - CPUPlace()); - feed_inputs.push_back(tmp); - } - }); - + feed_inputs.resize(size); for (size_t i = 0; i < size; i++) { - memcpy(feed_inputs[i].data(), inputs[i].data(), - inputs[i].size() * sizeof(T)); + T* dst = feed_inputs[i].mutable_data( + make_ddim({static_cast(inputs[i].size())}), CPUPlace()); + memcpy(dst, inputs[i].data(), inputs[i].size() * sizeof(T)); } } @@ -160,19 +146,17 @@ std::vector> get_fetch_variable() { typedef std::vector FetchOutputs; Variable* g_fetch_value = GetScope()->FindVar("fetch_value"); FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); - auto size = fetch_outputs.size(); + auto size = fetch_outputs.size(); std::vector> result; result.reserve(size); - for (size_t i = 0; i < size; i++) { std::vector tmp; - tmp.reserve(fetch_outputs[i].numel()); + tmp.resize(fetch_outputs[i].numel()); memcpy(tmp.data(), fetch_outputs[i].data(), fetch_outputs[i].numel() * sizeof(T)); result.push_back(tmp); } - return result; } @@ -183,8 +167,9 @@ class ExecutorTesterRandom : public ::testing::Test { root_block->set_idx(0); root_block->set_parent_idx(-1); - add_gaussian_random_op("a", root_block); - add_gaussian_random_op("b", root_block); + std::vector dim{2, 3}; + add_gaussian_random_op("a", dim, root_block); + add_gaussian_random_op("b", dim, root_block); auto c = root_block->add_vars(); c->set_name("c"); @@ -203,12 +188,11 @@ class ExecutorTesterRandom : public ::testing::Test { Out->set_parameter("Out"); Out->add_arguments("c"); - scope_ = GetScope(); + add_fetch_op("c", dim, 0, root_block); } protected: ProgramDesc pdesc_; - Scope* scope_; }; class ExecutorTesterFeed : public ::testing::Test { @@ -218,8 +202,10 @@ class ExecutorTesterFeed : public ::testing::Test { root_block->set_idx(0); root_block->set_parent_idx(-1); - add_feed_op("a", 0, root_block); - add_feed_op("b", 1, root_block); + std::vector dim{6}; + + add_feed_op("a", dim, 0, root_block); + add_feed_op("b", dim, 1, root_block); auto c = root_block->add_vars(); c->set_name("c"); @@ -238,10 +224,10 @@ class ExecutorTesterFeed : public ::testing::Test { Out->set_parameter("Out"); Out->add_arguments("c"); - add_fetch_op("c", 0, root_block); + add_fetch_op("c", dim, 0, root_block); - std::vector vec1 = {1.0, 2.0, 3.0}; - std::vector vec2 = {4.0, 5.0, 6.0}; + std::vector vec1 = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; + std::vector vec2 = {4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; inputs_.push_back(vec1); inputs_.push_back(vec2); } @@ -253,12 +239,24 @@ class ExecutorTesterFeed : public ::testing::Test { TEST_F(ExecutorTesterRandom, CPU) { std::vector places; - CPUPlace cpu_place1, cpu_place2; - places.push_back(cpu_place1); - places.push_back(cpu_place2); + CPUPlace cpu_place; + places.push_back(cpu_place); + + // We have a global Scope and BuddyAllocator, and we must ensure + // global BuddyAllocator is initialized before global Scope. Thus, + // global Scope will deconstruct before BuddyAllocator. Otherwise, + // "pointer being freed was not allocated" error will appear. + paddle::memory::Used(cpu_place); Executor* executor = new Executor(places); - executor->Run(pdesc_, scope_); + executor->Run(pdesc_, GetScope()); + std::vector> result = get_fetch_variable(); + for (auto& vec : result) { + for (auto& num : vec) { + std::cout << num << " "; + } + std::cout << std::endl; + } delete executor; } @@ -267,6 +265,12 @@ TEST_F(ExecutorTesterFeed, CPU) { CPUPlace cpu_place; places.push_back(cpu_place); + // We have a global Scope and BuddyAllocator, and we must ensure + // global BuddyAllocator is initialized before global Scope. Thus, + // global Scope will deconstruct before BuddyAllocator. Otherwise, + // "pointer being freed was not allocated" error will appear. + paddle::memory::Used(cpu_place); + Executor* executor = new Executor(places); // 3 mini-batch @@ -293,8 +297,10 @@ TEST_F(ExecutorTesterRandom, GPU) { GPUPlace gpu_place(0); places.push_back(gpu_place); + paddle::memory::Used(gpu_place); + Executor* executor = new Executor(places); - executor->Run(pdesc_, scope_); + executor->Run(pdesc_, GetScope()); delete executor; } @@ -303,11 +309,13 @@ TEST_F(ExecutorTesterFeed, GPU) { GPUPlace gpu_place(0); places.push_back(gpu_place); + paddle::memory::Used(gpu_place); + Executor* executor = new Executor(places); // need to set feed variable before Executor::Run set_feed_variable(inputs_); - executor->Run(pdesc_, scope_); + executor->Run(pdesc_, GetScope()); delete executor; } diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index a61855cb99..d40db3ff2e 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -29,11 +29,11 @@ class FeedOp : public framework::OperatorWithKernel { framework::Variable* g_feed_variable = framework::GetScope()->FindVar("feed_value"); - FeedInputs tensors = g_feed_variable->Get(); + const FeedInputs& tensors = g_feed_variable->Get(); auto in_dim = tensors[col].dims(); ctx->SetOutputDim("Out", in_dim); - // need to handle LodTensor later + // TODO(qijun) need to handle LodTensor later } framework::DataType IndicateDataType( diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h index 57781e205f..cf93b6f434 100644 --- a/paddle/operators/feed_op.h +++ b/paddle/operators/feed_op.h @@ -31,7 +31,7 @@ class FeedKernel : public framework::OpKernel { framework::Variable* g_feed_variable = framework::GetScope()->FindVar("feed_value"); int col = ctx.template Attr("col"); - FeedInputs tensors = g_feed_variable->Get(); + const FeedInputs& tensors = g_feed_variable->Get(); out->CopyFrom(tensors[col], ctx.GetPlace()); } }; diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 68e8d26dbe..a885deacc8 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -30,15 +30,16 @@ class FetchOp : public framework::OperatorWithKernel { framework::GetScope()->FindVar("fetch_value"); FetchOutputs* tensors = g_fetch_variable->GetMutable(); - if (tensors->size() < col) { - tensors->resize(col); + if (tensors->size() < static_cast(col + 1)) { + tensors->resize(col + 1); } auto input_dim = ctx->GetInputDim("Input"); framework::Tensor tmp; tmp.Resize(input_dim); (*tensors)[col].Resize(input_dim); - // need to handle LodTensor later + + // TODO(qijun) need to handle LodTensor later } framework::DataType IndicateDataType( diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h index 95e7986a22..e8d5e3a9c0 100644 --- a/paddle/operators/fetch_op.h +++ b/paddle/operators/fetch_op.h @@ -26,13 +26,13 @@ class FetchKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { typedef std::vector FetchOutputs; - Tensor* input = ctx.Output("Input"); + const Tensor* input = ctx.Input("Input"); int col = ctx.template Attr("col"); framework::Variable* g_fetch_variable = framework::GetScope()->FindVar("fetch_value"); - FetchOutputs tensors = g_fetch_variable->Get(); - tensors[col].mutable_data(platform::CPUPlace()); - tensors[col].CopyFrom(*input, platform::CPUPlace()); + FetchOutputs* tensors = g_fetch_variable->GetMutable(); + (*tensors)[col].mutable_data(platform::CPUPlace()); + (*tensors)[col].CopyFrom(*input, platform::CPUPlace()); } }; From bbceb72398f23902fae2f011c2b6c7f2a8b7b8e3 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 5 Oct 2017 20:54:16 -0700 Subject: [PATCH 232/342] refine some codes --- paddle/framework/executor.cc | 10 ---------- paddle/framework/executor_test.cc | 2 ++ paddle/framework/scope.cc | 9 ++------- paddle/operators/feed_op.cc | 2 +- paddle/operators/fetch_op.cc | 2 +- 5 files changed, 6 insertions(+), 19 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 51ddb7e58e..ee0df039ac 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -74,16 +74,6 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { for (auto& device_context : device_contexts_) { device_context->Wait(); } - // // print tensor value - // for (auto& var : block.vars()) { - // std::cout << var.name() << std::endl; - // auto v = scope->FindVar(var.name()); - // const LoDTensor& t = v->Get(); - // for (int i = 0; i < t.numel(); ++i) { - // std::cout << t.data()[i] << " "; - // } - // std::cout << std::endl; - // } } } // namespace framework diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index d3ea18d154..5e327cc893 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -130,6 +130,7 @@ std::once_flag set_variable_flag; template void set_feed_variable(const std::vector>& inputs) { typedef std::vector FeedInputs; + // Tensors in feed value variable will only be in CPUPlace Variable* g_feed_value = GetScope()->FindVar("feed_value"); FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); auto size = inputs.size(); @@ -144,6 +145,7 @@ void set_feed_variable(const std::vector>& inputs) { template std::vector> get_fetch_variable() { typedef std::vector FetchOutputs; + // Tensors in fetch value variable will only be in CPUPlace Variable* g_fetch_value = GetScope()->FindVar("fetch_value"); FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 2c416570cf..b6a9d7fbc2 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -66,15 +66,10 @@ void Scope::DropKids() { std::once_flag feed_variable_flag; -template -std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - framework::Scope* GetScope() { - static std::unique_ptr g_scope = - make_unique(); + static std::unique_ptr g_scope{nullptr}; std::call_once(feed_variable_flag, [&]() { + g_scope.reset(new framework::Scope()); g_scope->NewVar("feed_value"); g_scope->NewVar("fetch_value"); }); diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index d40db3ff2e..f2c498e2e2 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -33,7 +33,7 @@ class FeedOp : public framework::OperatorWithKernel { auto in_dim = tensors[col].dims(); ctx->SetOutputDim("Out", in_dim); - // TODO(qijun) need to handle LodTensor later + // TODO(qijun): need to handle LodTensor later } framework::DataType IndicateDataType( diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index a885deacc8..f6882cbd03 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -39,7 +39,7 @@ class FetchOp : public framework::OperatorWithKernel { tmp.Resize(input_dim); (*tensors)[col].Resize(input_dim); - // TODO(qijun) need to handle LodTensor later + // TODO(qijun): need to handle LodTensor later } framework::DataType IndicateDataType( From bddaef6218b7e32bb510d348396d81c96ba29a3a Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 5 Oct 2017 21:03:49 -0700 Subject: [PATCH 233/342] Correcting grammatical mistakes in the python_api doc --- doc/design/python_api.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 5c68354274..6213da65c8 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -15,9 +15,9 @@ Please be aware that these Python classes need to maintain some construction-tim ### Program -A `ProgramDesc` describes a [DL program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md), which is composed of an array of `BlockDesc`s. A `BlockDesc` refers to its parent block by its index in the array. For example, operators in the step block of an RNN operator needs to be able to access variables in its ancessor blocks. +A `ProgramDesc` describes a [DL program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md), which is composed of an array of `BlockDesc`s. The `BlockDesc`s in a `ProgramDesc` can have a tree-like hierarchical structure. However, the `ProgramDesc` onlys stores a flattened array of `BlockDesc`s. A `BlockDesc` refers to its parent block by its index in the array. For example, operators in the step block of an RNN operator need to be able to access variables in its ancestor blocks. -Whenever we create a block, we need set its parent block to the current block, so the Python class `Program` needs to maintain a data member `current_block`. +Whenever we create a block, we need to set its parent block to the current block, hence the Python class `Program` needs to maintain a data member `current_block`. ```python class Program(objects): @@ -81,13 +81,13 @@ class Block(objects): self.ops.prepend(Operator(self, ...)) ``` -`create_parameter` is necessary because parameters are global variables, those defined in the global block, but can be created in some sub-blocks, e.g., an FC layer in the step block of an RNN operator. +`create_parameter` is necessary because parameters are global variables, defined in the global block, but can be created in some sub-blocks. For example, an FC layer in the step block of an RNN operator. -`prepand_operator` is necessary because the constructor of `Parameter` needs to create the initialize (or load) operator of the parameter, and would like to put it in the *preamble* of the global block. +`prepend_operator` is necessary because the constructor of `Parameter` needs to create the initialize (or load) operator of the parameter, and would like to put it in the *preamble* of the global block. ### Operator -The `Operator` class fills in the `OpDesc` message and calls the C++ function `InferShape` to infer output shape from input shape. +The `Operator` class fills in the `OpDesc` message and calls the C++ function `InferShape` to infer the output shapes from the input shapes. ```python class Operator(object): @@ -105,7 +105,7 @@ class Operator(object): return self.proto.type() ``` -`Operator` creates the `OpDesc` message in C++ space, so could it call the `InferShape` function, which is in C++. +`Operator` creates the `OpDesc` message in C++ space, so that it can call the `InferShape` function, which is in C++. ### Variable @@ -128,7 +128,7 @@ class Variable(object): self.writer = None ``` -Please be aware of `self.writer`, that tracks operator who creates the variable. It possible that there are more than one operators who write a variable, but in Python space, each writes to a variable is represented by a Variable class. This is guaranteed by the fact that **`core.NewVarDesc` must NOT create a new `VarDesc` message if its name already exists in the specified block**. +Please be aware of `self.writer`, that tracks operator who creates the variable. It possible that there are more than one operators who write a variable, but in Python space, each write to a variable is represented by a Variable class. This is guaranteed by the fact that **`core.NewVarDesc` must NOT create a new `VarDesc` message if its name already exists in the specified block**. ### Parameter @@ -155,7 +155,7 @@ class Parameter(Variable): initialize_op_attrs) ``` -When users create a parameter, s/he can call +When users create a parameter, they can call ```python program.create_parameter( From 78f4c803f370f461f9e62e905b7870a65d05b55c Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Thu, 5 Oct 2017 21:47:25 -0700 Subject: [PATCH 234/342] change learning rate and fix format --- paddle/operators/adagrad_op.cc | 67 ++++++++++--------- paddle/operators/adagrad_op.h | 43 ++++++------ .../v2/framework/tests/test_adagrad_op.py | 48 +++++++++++-- 3 files changed, 97 insertions(+), 61 deletions(-) diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index 56a5fbcb86..ea2ff3c503 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -23,33 +23,33 @@ class AdagradOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("param"), - "Input(param) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("grad"), - "Input(grad) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("moment"), - "Input(moment) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("learning_rate"), - "Input(learning_rate) of AdagradOp should not be null."); - - PADDLE_ENFORCE(ctx->HasOutput("param_out"), - "Output(param_out) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("moment_out"), - "Output(moment_out) of AdagradOp should not be null."); - - auto lr_dims = ctx->GetInputDim("learning_rate"); + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(Param) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(Grad) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Moment"), + "Input(Moment) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of AdagradOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of AdagradOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), + "Output(MomentOut) of AdagradOp should not be null."); + + auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "learning_rate should have one element"); - auto param_dim = ctx->GetInputDim("param"); + "LearningRate should have one element"); + auto param_dims = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( - param_dim, ctx->GetInputDim("grad"), - "Param and grad input of AdagradOp should have the same dimension."); + param_dims, ctx->GetInputDim("Grad"), + "Param and Grad input of AdagradOp should have the same dimension."); PADDLE_ENFORCE_EQ( - param_dim, ctx->GetInputDim("moment"), - "Param and moment input of AdagradOp should have the same dimension."); + param_dims, ctx->GetInputDim("Moment"), + "Param and Moment input of AdagradOp should have the same dimension."); - ctx->SetOutputDim("param_out", param_dim); - ctx->SetOutputDim("moment_out", param_dim); + ctx->SetOutputDim("ParamOut", param_dims); + ctx->SetOutputDim("MomentOut", param_dims); } }; @@ -58,15 +58,18 @@ class AdagradOpMaker : public framework::OpProtoAndCheckerMaker { AdagradOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("param", "Input parameter"); - AddInput("grad", "Input gradient"); - AddInput("moment", "Second moment"); - AddInput("learning_rate", "learning rate of adagrad"); - - AddOutput("param_out", "Output parameter"); - AddOutput("moment_out", "Output second moment"); - - AddAttr("epsilon", "Constant for numerical stability"); + AddInput("Param", "(Tensor) Input parameter"); + AddInput("Grad", "(Tensor) Input gradient"); + AddInput("Moment", "(Tensor) Second moment"); + AddInput("LearningRate", "(Tensor) Learning rate"); + + AddOutput("ParamOut", "(Tensor) Output parameter"); + AddOutput("MomentOut", "(Tensor) Output second moment"); + + AddAttr("epsilon", + "(float, default 1.0e-6) " + "Constant for numerical stability") + .SetDefault(1.0e-6f); AddComment(R"DOC( Adaptive Gradient Algorithm (Adagrad). diff --git a/paddle/operators/adagrad_op.h b/paddle/operators/adagrad_op.h index 73833d4a3f..c5d8f751d3 100644 --- a/paddle/operators/adagrad_op.h +++ b/paddle/operators/adagrad_op.h @@ -19,40 +19,35 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; - -template -using EigenScalar = framework::EigenScalar; - -template -using EigenVector = framework::EigenVector; - template class AdagradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto param_out = ctx.Output("param_out"); - auto moment_out = ctx.Output("moment_out"); + auto param_out_tensor = ctx.Output("ParamOut"); + auto moment_out_tensor = ctx.Output("MomentOut"); - param_out->mutable_data(ctx.GetPlace()); - moment_out->mutable_data(ctx.GetPlace()); + param_out_tensor->mutable_data(ctx.GetPlace()); + moment_out_tensor->mutable_data(ctx.GetPlace()); - float lr = ctx.Input("learning_rate")->data()[0]; float epsilon = ctx.Attr("epsilon"); - auto p = EigenVector::Flatten(*ctx.Input("param")); - auto g = EigenVector::Flatten(*ctx.Input("grad")); - auto m = EigenVector::Flatten(*ctx.Input("moment")); - auto lr = EigenScalar::From(*ctx.Input("learning_rate")); - - auto p_out = EigenVector::Flatten(*param_out); - auto m_out = EigenVector::Flatten(*moment_out); + auto param = framework::EigenVector::Flatten( + *ctx.Input("Param")); + auto grad = framework::EigenVector::Flatten( + *ctx.Input("Grad")); + auto moment = framework::EigenVector::Flatten( + *ctx.Input("Moment")); + auto lr = framework::EigenVector::Flatten( + *ctx.Input("LearningRate")); + + auto param_out = framework::EigenVector::Flatten(*param_out_tensor); + auto moment_out = framework::EigenVector::Flatten(*moment_out_tensor); auto place = ctx.GetEigenDevice(); - m_out.device(place) = m + g * g; - p_out.device(place) = p - lr * g / (m_out.sqrt() + epsilon); + moment_out.device(place) = moment + grad * grad; + Eigen::DSizes m_dsize(moment_out_tensor->numel()); + param_out.device(place) = + param - lr.broadcast(m_dsize) * grad / (moment_out.sqrt() + epsilon); } }; diff --git a/python/paddle/v2/framework/tests/test_adagrad_op.py b/python/paddle/v2/framework/tests/test_adagrad_op.py index 2ee38ea37c..66bad349e5 100644 --- a/python/paddle/v2/framework/tests/test_adagrad_op.py +++ b/python/paddle/v2/framework/tests/test_adagrad_op.py @@ -3,25 +3,63 @@ import numpy as np from op_test import OpTest -class TestAdagradOp(OpTest): +class TestAdagradOp1(OpTest): + ''' Test Adagrad operator with explicit attributes + ''' + def setUp(self): self.op_type = "adagrad" param = np.random.random((123, 321)).astype("float32") grad = np.random.random((123, 321)).astype("float32") moment = np.zeros((123, 321)).astype("float32") + lr = 0.01 + epsilon = 1e-8 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'LearningRate': np.array([lr]).astype("float32") + } + + self.attrs = {'epsilon': epsilon} + + moment_out = moment + grad * grad + param_out = param - lr * grad / (np.sqrt(moment_out) + epsilon) + + self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} + + def test_check_output(self): + self.check_output() + + +class TestAdagradOp2(OpTest): + ''' Test Adagrad operator with default attributes + ''' - lr = np.array([0.01]).astype("float32") + def setUp(self): + self.op_type = "adagrad" + + param = np.random.random((123, 321)).astype("float32") + grad = np.random.random((123, 321)).astype("float32") + moment = np.zeros((123, 321)).astype("float32") + lr = 0.01 epsilon = 1e-6 - self.inputs = {'param': param, 'grad': grad, 'moment': moment} + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'LearningRate': np.array([lr]).astype("float32") + } - self.attrs = {'learning_rate': learning_rate, 'epsilon': epsilon} + self.attrs = {'epsilon': epsilon} moment_out = moment + grad * grad param_out = param - lr * grad / (np.sqrt(moment_out) + epsilon) - self.outputs = {'param_out': param_out, 'moment_out': moment_out} + self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out} def test_check_output(self): self.check_output() From 583c94e4e641297820910d15fbc604cd9281834b Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 5 Oct 2017 23:35:48 -0700 Subject: [PATCH 235/342] new gan --- doc/design/gan_api.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index e0e3440d49..b7c0fab201 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -6,6 +6,17 @@ It contains several important machine learning concepts, including building and In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation. +| important building blocks | People in Charge | +|---------------------------|-------------------| +| convolution 2d (done) | Chengduo | +| deconv 2d (missing) | Zhuoyuan | +| batch norm (missing) | Zhuoyuan, Jiayi | +| Dependency Engine (done) | Jiayi | +| Executor (done) | Tony | +| Multi optimizer | ? | +| Optimizer with any para | ? | + +


Borrow this photo from the original DC-GAN paper. From 672f70ccba17a28af6842faede7c6349a399527b Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 5 Oct 2017 23:43:12 -0700 Subject: [PATCH 236/342] gan api --- doc/design/gan_api.md | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index b7c0fab201..14e34a9839 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -6,15 +6,25 @@ It contains several important machine learning concepts, including building and In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation. -| important building blocks | People in Charge | -|---------------------------|-------------------| -| convolution 2d (done) | Chengduo | -| deconv 2d (missing) | Zhuoyuan | -| batch norm (missing) | Zhuoyuan, Jiayi | -| Dependency Engine (done) | Jiayi | -| Executor (done) | Tony | -| Multi optimizer | ? | -| Optimizer with any para | ? | +| important building blocks | People in Charge | Required | +|---------------------------|-------------------|----------| +| convolution 2d (done) | Chengduo | Y | +| cudnn conv 2d (missing) | Chengduo | N | +| deconv 2d (missing) | Zhuoyuan | Y | +| cudnn deconv 2d (missing) | Zhuoyuan | N | +| batch norm (missing) | Zhuoyuan, Jiayi | Y | +| cudnn batch norm (missing)| Zhuoyuan, Jiayi | N | +| max-pooling (done) | ? | Y | +| fc (done) | ? | Y | +| softmax loss (done) | ? | Y | +| reshape op (done) | ? | Y | +| Dependency Engine (done) | Jiayi | Y * | +| Python API (done) | Jiayi | Y * | +| Executor (done) | Tony | Y * | +| Multi optimizer | ? | Y * | +| Optimizer with any para | ? | Y * | +| Concat op | ? | N (Cond) | +| Repmat op | ? | N (Cond) |

From b8a5b7f9bbe9c182ac70685005ef1662fd6d118b Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 5 Oct 2017 23:43:58 -0700 Subject: [PATCH 237/342] gan api --- doc/design/gan_api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 14e34a9839..d35309079f 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -10,8 +10,8 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to |---------------------------|-------------------|----------| | convolution 2d (done) | Chengduo | Y | | cudnn conv 2d (missing) | Chengduo | N | -| deconv 2d (missing) | Zhuoyuan | Y | -| cudnn deconv 2d (missing) | Zhuoyuan | N | +| deconv 2d (missing) | Zhuoyuan, Zhihong | Y | +| cudnn deconv 2d (missing) | Zhuoyuan, Zhihong | N | | batch norm (missing) | Zhuoyuan, Jiayi | Y | | cudnn batch norm (missing)| Zhuoyuan, Jiayi | N | | max-pooling (done) | ? | Y | From 1f5192a27b968a7980c2eead7b6885e66f09575a Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 6 Oct 2017 11:06:59 -0700 Subject: [PATCH 238/342] fix executor gpu unittest --- paddle/framework/executor.cc | 2 +- paddle/framework/executor_test.cc | 20 +++++++++++++++----- paddle/operators/fetch_op.cu | 2 +- paddle/platform/gpu_info.cc | 3 ++- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index ee0df039ac..c18ba049c8 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -30,7 +30,7 @@ Executor::Executor(const std::vector& places) { device_contexts_[i] = new platform::CPUDeviceContext( boost::get(places[i])); } else if (platform::is_gpu_place(places[i])) { -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA device_contexts_[i] = new platform::CUDADeviceContext( boost::get(places[i])); #else diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 5e327cc893..55e209628b 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -293,7 +293,7 @@ TEST_F(ExecutorTesterFeed, CPU) { delete executor; } -#ifdef PADDLE_WITH_GPU +#ifdef PADDLE_WITH_CUDA TEST_F(ExecutorTesterRandom, GPU) { std::vector places; GPUPlace gpu_place(0); @@ -315,10 +315,20 @@ TEST_F(ExecutorTesterFeed, GPU) { Executor* executor = new Executor(places); - // need to set feed variable before Executor::Run - set_feed_variable(inputs_); - executor->Run(pdesc_, GetScope()); - + // 3 mini-batch + for (int i = 0; i < 3; i++) { + // need to set feed variable before Executor::Run + std::cout << "start mini-batch " << i << std::endl; + set_feed_variable(inputs_); + executor->Run(pdesc_, GetScope()); + std::vector> result = get_fetch_variable(); + for (auto& vec : result) { + for (auto& num : vec) { + std::cout << num << " "; + } + std::cout << std::endl; + } + } delete executor; } #endif diff --git a/paddle/operators/fetch_op.cu b/paddle/operators/fetch_op.cu index 2e24d3a8ad..ca39d24c79 100644 --- a/paddle/operators/fetch_op.cu +++ b/paddle/operators/fetch_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/feed_op.h" +#include "paddle/operators/fetch_op.h" namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL(fetch, ops::FetchKernel); diff --git a/paddle/platform/gpu_info.cc b/paddle/platform/gpu_info.cc index 486dcd623a..aa76bb209d 100644 --- a/paddle/platform/gpu_info.cc +++ b/paddle/platform/gpu_info.cc @@ -43,7 +43,8 @@ int GetCurrentDeviceId() { } void SetDeviceId(int id) { - PADDLE_ENFORCE(id < GetDeviceCount(), "id must less than GPU count"); + // TODO(qijun): find a better way to cache the cuda device count + PADDLE_ENFORCE(id < GetCUDADeviceCount(), "id must less than GPU count"); PADDLE_ENFORCE(cudaSetDevice(id), "cudaSetDevice failed in paddle::platform::SetDeviceId"); } From 8c7ee7a5cdc6e959ad55a6a24d3ffe4bc3f37144 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 6 Oct 2017 11:41:59 -0700 Subject: [PATCH 239/342] optimize code --- paddle/framework/block_desc.cc | 3 +-- paddle/framework/shape_inference.h | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 670533a3fe..01f50e1393 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -35,8 +35,7 @@ VarDescBind *BlockDescBind::Var(const std::string &name) const { } bool BlockDescBind::HasVar(const std::string &name) const { - auto it = vars_.find(name); - return it != vars_.end(); + return vars_.find(name) != vars_.end(); } std::vector BlockDescBind::AllVars() const { diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index bc8af0eb3e..74e0371e32 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -19,6 +19,9 @@ limitations under the License. */ namespace paddle { namespace framework { +// TODO(longfei): Once after both CompileTimeInferShapeContext and +// RuntimeInferShapeContext get merged, we can rename InferShapeContextBase into +// InferShapeContext so to replace the current InferShapeContext. class InferShapeContextBase { public: virtual ~InferShapeContextBase() {} From 3e82922851da5f36f8e8187b818a1f43f324c64f Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 6 Oct 2017 15:53:54 -0400 Subject: [PATCH 240/342] fix tensorarray unpack bug (#4614) --- paddle/framework/tensor_array.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/framework/tensor_array.cc b/paddle/framework/tensor_array.cc index d54714c66c..2728bce1c1 100644 --- a/paddle/framework/tensor_array.cc +++ b/paddle/framework/tensor_array.cc @@ -217,12 +217,11 @@ LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) { // collect indice need to copy to the batch std::vector indice; - for (size_t seq_id = 0; seq_id < meta.size(); seq_id++) { - const auto& seq_meta = meta[seq_id]; - if (index >= seq_meta.end) break; - indice.push_back(seq_meta.begin + index); + for (const auto& seq : meta) { + size_t id = seq.begin + index; + if (id >= seq.end) break; + indice.push_back(id); } - PADDLE_ENFORCE(!indice.empty(), "invalid batch at %d", index); // copy the indice of records in LoDTensor @@ -232,16 +231,18 @@ LoDTensor DynamicBatchUnpacker::GetBatch(size_t index) { result.Resize(make_ddim(record_dims_vec)); result.mutable_data(platform::CPUPlace()); - for (size_t i = 0; i < indice.size() - 1; i++) { + for (size_t i = 0; i < indice.size(); i++) { auto index = indice[i]; auto target = result.Slice(i, i + 1); auto source_ = source->Slice(index, index + 1); + target.CopyFrom(source_, platform::CPUPlace()); } return result; } +// TODO(supejom) to cache lod if reasonable LoDTensor PackDynamicBatch(const std::vector& source, const std::vector& meta, const LoD& lod, size_t level) { @@ -273,7 +274,6 @@ LoDTensor PackDynamicBatch(const std::vector& source, } result.set_lod(lod); - return result; } From 4acd5abaaa6c18109e505bf2b2ef07a03df25ccc Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 6 Oct 2017 13:25:56 -0700 Subject: [PATCH 241/342] update comment for input/output length check --- paddle/framework/operator.h | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 458404af6d..d7bc9c9ffb 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -325,15 +325,21 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { bool HasInput(const std::string& name) const override { const std::vector& input_names = op_.Input(name); - PADDLE_ENFORCE_EQ(input_names.size(), 1UL, "Inputs(%s) length is not 1", - name); + auto length = input_names.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Input(%s) should have only one value, " + "but it have %d now", + name, length); return block_.HasVar(input_names[0]); } bool HasOutput(const std::string& name) const override { const std::vector& output_names = op_.Output(name); - PADDLE_ENFORCE_EQ(output_names.size(), 1UL, "Outputs(%s) length is not 1", - name); + auto length = output_names.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Output(%s) should have only one value, " + "but it have %d now", + name, length); return block_.HasVar(output_names[0]); } @@ -357,7 +363,11 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { DDim GetInputDim(const std::string& name) const override { std::vector ddims = GetInputsDim(name); - PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Inputs(%s) length is not 1", name); + auto length = ddims.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Input(%s) should have 1 value, " + "but it has %d now", + name, length); return ddims[0]; } @@ -367,7 +377,11 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { DDim GetOutputDim(const std::string& name) const override { std::vector ddims = GetOutputsDim(name); - PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Outputs(%s) length is not 1", name); + auto length = ddims.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Output(%s) should have 1 value, " + "but it has %d now", + name, length); return ddims[0]; } From ac0e3828b49cab7fd943c293516917cfdc4404ee Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Fri, 6 Oct 2017 21:15:29 +0000 Subject: [PATCH 242/342] test text --- paddle/framework/executor.cc | 102 +++++++++++++++++++++++++++++------ paddle/framework/executor.h | 16 ++++++ 2 files changed, 103 insertions(+), 15 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index aafef12554..89b83f82fb 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -13,13 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" +#include #include #include +#include #include #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/scope.h" +#include + namespace paddle { namespace framework { @@ -64,26 +68,94 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { scope->NewVar(var.name()); } - for (auto& op_desc : block.ops()) { - auto op = paddle::framework::OpRegistry::CreateOp(op_desc); - std::cout << op->DebugString() << std::endl; - op->Run(*scope, *device); + std::vector should_run = Preprocess(pdesc); + PADDLE_ENFORCE(should_run.size() == block.ops_size(), + "should_run.size() != block.ops_size()"); + for (int i = 0; i < should_run.size(); ++i) { + if (should_run[i]) { + auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); + std::cout << op->DebugString() << std::endl; + op->Run(*scope, *device); + } } - // TODO(tonyyang-svail): need to test gpu device - for (auto& device_context : device_contexts_) { - device_context->Wait(); - } // // print tensor value - for (auto& var : block.vars()) { - std::cout << var.name() << std::endl; - auto v = scope->FindVar(var.name()); - const LoDTensor& t = v->Get(); - for (int i = 0; i < t.numel(); ++i) { - std::cout << t.data()[i] << " "; + // for (auto& var : block.vars()) { + // std::cout << var.name() << std::endl; + // auto v = scope->FindVar(var.name()); + // const LoDTensor& t = v->Get(); + // for (int i = 0; i < t.numel(); ++i) { + // std::cout << t.data()[i] << " "; + // } + // std::cout << std::endl; + // } +} + +std::vector Executor::Preprocess(const ProgramDesc& pdesc) { + // TODO(tonyyang-svail): + // - only runs the first block + + auto& block = pdesc.blocks(0); + auto& ops = block.ops(); + + bool expect_feed = true; + for (auto& op_desc : ops) { + PADDLE_ENFORCE(op_desc.type() != "feed" || expect_feed, + "All FeedOps are at the beginning of the ProgramDesc"); + expect_feed = (op_desc.type() == "feed"); + } + + bool expect_fetch = true; + for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { + auto& op_desc = *op_iter; + PADDLE_ENFORCE(op_desc.type() != "fetch" || expect_fetch, + "All FetchOps must at the end of the ProgramDesc"); + expect_fetch = (op_desc.type() == "fetch"); + } + + std::set dependent_vars; + std::vector should_run; + for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { + auto& op_desc = *op_iter; + + bool found_dependent_vars = false; + for (auto& var : op_desc.outputs()) { + for (auto& argu : var.arguments()) { + if (dependent_vars.count(argu) != 0) { + found_dependent_vars = true; + } + } + } + + // TODO(tonyyang-svail): add VLOG here for debugging + if (op_desc.type() == "fetch" || found_dependent_vars) { + // erase its output to the dependency graph + for (auto& var : op_desc.outputs()) { + for (auto& argu : var.arguments()) { + dependent_vars.erase(argu); + } + } + + // insert its input to the dependency graph + for (auto& var : op_desc.inputs()) { + for (auto& argu : var.arguments()) { + dependent_vars.insert(argu); + } + } + + // this op should be executed + should_run.push_back(true); + } else { + // this op should NOT be executed + should_run.push_back(false); } - std::cout << std::endl; } + + // since we are traversing the ProgramDesc in reverse order + // we reverse the should_run vector + std::reverse(should_run.begin(), should_run.end()); + + return should_run; } } // namespace framework diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 9e443c8fca..1d2e6c96de 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -26,8 +26,24 @@ class Executor { public: explicit Executor(const std::vector& places); ~Executor(); + + /* @Brief + * Runtime evaluation of the given ProgramDesc under certain Scope + * + * @param + * ProgramDesc + * Scope + */ void Run(const ProgramDesc&, Scope*); + protected: + /* @Brief + * + * @param + * ProgramDesc + */ + std::vector Preprocess(const ProgramDesc& pdesc); + private: std::vector device_contexts_; }; From e8a678e1eecd11fee219a93c6c586ee24663a506 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 6 Oct 2017 22:46:04 +0000 Subject: [PATCH 243/342] fix executor gpu unittest runtime error --- paddle/framework/executor_test.cc | 19 ++++++++++++++++--- paddle/operators/fetch_op.cc | 2 -- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 55e209628b..82f9bd6f2d 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -239,6 +239,7 @@ class ExecutorTesterFeed : public ::testing::Test { std::vector> inputs_; }; +#ifndef PADDLE_WITH_CUDA TEST_F(ExecutorTesterRandom, CPU) { std::vector places; CPUPlace cpu_place; @@ -292,13 +293,19 @@ TEST_F(ExecutorTesterFeed, CPU) { delete executor; } - -#ifdef PADDLE_WITH_CUDA +#else TEST_F(ExecutorTesterRandom, GPU) { std::vector places; GPUPlace gpu_place(0); places.push_back(gpu_place); + // We have a global Scope and BuddyAllocator, and we must ensure + // global BuddyAllocator is initialized before global Scope. Thus, + // global Scope will deconstruct before BuddyAllocator. Otherwise, + // "pointer being freed was not allocated" error will appear. + // If paddle is compiled with GPU, both CPU and GPU BuddyAllocator + // need to be used at first. + paddle::memory::Used(CPUPlace()); paddle::memory::Used(gpu_place); Executor* executor = new Executor(places); @@ -310,7 +317,13 @@ TEST_F(ExecutorTesterFeed, GPU) { std::vector places; GPUPlace gpu_place(0); places.push_back(gpu_place); - + // We have a global Scope and BuddyAllocator, and we must ensure + // global BuddyAllocator is initialized before global Scope. Thus, + // global Scope will deconstruct before BuddyAllocator. Otherwise, + // "pointer being freed was not allocated" error will appear. + // If paddle is compiled with GPU, both CPU and GPU BuddyAllocator + // need to be used at first. + paddle::memory::Used(CPUPlace()); paddle::memory::Used(gpu_place); Executor* executor = new Executor(places); diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index f6882cbd03..4b6b3ca85a 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -35,8 +35,6 @@ class FetchOp : public framework::OperatorWithKernel { } auto input_dim = ctx->GetInputDim("Input"); - framework::Tensor tmp; - tmp.Resize(input_dim); (*tensors)[col].Resize(input_dim); // TODO(qijun): need to handle LodTensor later From 803b7b6298bea773316b3030b2edbc2fa183b738 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 6 Oct 2017 16:01:14 -0700 Subject: [PATCH 244/342] Follow comments --- paddle/framework/op_info.h | 2 ++ paddle/framework/op_registry.cc | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/framework/op_info.h b/paddle/framework/op_info.h index 231f212fa3..c504f69e30 100644 --- a/paddle/framework/op_info.h +++ b/paddle/framework/op_info.h @@ -54,6 +54,8 @@ struct OpInfo { "Operator GradOpMaker has not been registered."); return grad_op_maker_; } + + const OpAttrChecker* Checker() const { return checker_; } }; class OpInfoMap { diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index e9d2e55872..504afbd5db 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -23,8 +23,8 @@ std::unique_ptr OpRegistry::CreateOp( const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, AttributeMap attrs) { auto& info = OpInfoMap::Instance().Get(type); - if (info.checker_ != nullptr) { - info.checker_->Check(attrs); + if (info.Checker() != nullptr) { + info.Checker()->Check(attrs); } auto op = info.Creator()(type, inputs, outputs, attrs); return std::unique_ptr(op); From 564b8c6cede75f844ba238a4573a6514d899a90d Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Fri, 6 Oct 2017 16:07:57 -0700 Subject: [PATCH 245/342] gan api --- doc/design/gan_api.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index d35309079f..9864e8b7de 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -19,12 +19,12 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to | softmax loss (done) | ? | Y | | reshape op (done) | ? | Y | | Dependency Engine (done) | Jiayi | Y * | -| Python API (done) | Jiayi | Y * | +| Python API (done) | Longfei, Jiayi | Y * | | Executor (done) | Tony | Y * | -| Multi optimizer | ? | Y * | +| Multi optimizer (woking) | Longfei | Y * | | Optimizer with any para | ? | Y * | -| Concat op | ? | N (Cond) | -| Repmat op | ? | N (Cond) | +| Concat op (done) | ? | N (Cond) | +| Repmat op (done) | ? | N (Cond) |

@@ -91,7 +91,8 @@ class DCGAN(object): - Concatenation, batch-norm, FC operations required; - Deconv layer required, which is missing now... ```python -def generator(self, z, y = None): +class DCGAN(object): + def generator(self, z, y = None): # input z: the random noise # input y: input data label (optional) # output G_im: generated fake images @@ -116,7 +117,8 @@ def generator(self, z, y = None): - Given a noisy input z, returns a fake image. - Concatenation, Convolution, batch-norm, FC, Leaky-ReLU operations required; ```python -def discriminator(self, image): +class DCGAN(object): + def discriminator(self, image): # input image: either generated images or real ones # output D_h2: binary logit of the label @@ -137,8 +139,8 @@ def discriminator(self, image): - Build generator and discriminators; - Define two training losses for discriminator and generator, respectively. ```python -def build_model(self): - +class DCGAN(object): + def build_model(self): # input data if self.y_dim: self.y = pd.data(pd.float32, [self.batch_size, self.y_dim]) From 91f5d2b9cb23cbb6048180ed791e53659532cf04 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 6 Oct 2017 16:09:19 -0700 Subject: [PATCH 246/342] follow comments and create local_scope inside executor run method --- paddle/framework/executor.cc | 6 ++---- paddle/framework/executor_test.cc | 12 ++++++------ paddle/framework/scope.cc | 2 +- paddle/framework/scope.h | 2 +- paddle/operators/feed_op.cc | 2 +- paddle/operators/feed_op.h | 6 ++---- paddle/operators/fetch_op.cc | 2 +- paddle/operators/fetch_op.h | 6 ++---- 8 files changed, 16 insertions(+), 22 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index c18ba049c8..7fc407ebc9 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -56,9 +56,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { auto& block = pdesc.blocks(0); auto& device = device_contexts_[0]; - // TODO(tonyyang-svail): - // - runs on a new local scope - // Scope& local_scope = scope->NewScope(); + Scope& local_scope = scope->NewScope(); for (auto& var : block.vars()) { scope->NewVar(var.name()); @@ -67,7 +65,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { for (auto& op_desc : block.ops()) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); std::cout << op->DebugString() << std::endl; - op->Run(*scope, *device); + op->Run(local_scope, *device); } // TODO(tonyyang-svail): need to test gpu device diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 82f9bd6f2d..bf6c1dffc1 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -131,7 +131,7 @@ template void set_feed_variable(const std::vector>& inputs) { typedef std::vector FeedInputs; // Tensors in feed value variable will only be in CPUPlace - Variable* g_feed_value = GetScope()->FindVar("feed_value"); + Variable* g_feed_value = GetGlobalScope()->FindVar("feed_value"); FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); auto size = inputs.size(); feed_inputs.resize(size); @@ -146,7 +146,7 @@ template std::vector> get_fetch_variable() { typedef std::vector FetchOutputs; // Tensors in fetch value variable will only be in CPUPlace - Variable* g_fetch_value = GetScope()->FindVar("fetch_value"); + Variable* g_fetch_value = GetGlobalScope()->FindVar("fetch_value"); FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); auto size = fetch_outputs.size(); @@ -252,7 +252,7 @@ TEST_F(ExecutorTesterRandom, CPU) { paddle::memory::Used(cpu_place); Executor* executor = new Executor(places); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); for (auto& vec : result) { for (auto& num : vec) { @@ -281,7 +281,7 @@ TEST_F(ExecutorTesterFeed, CPU) { // need to set feed variable before Executor::Run std::cout << "start mini-batch " << i << std::endl; set_feed_variable(inputs_); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); for (auto& vec : result) { for (auto& num : vec) { @@ -309,7 +309,7 @@ TEST_F(ExecutorTesterRandom, GPU) { paddle::memory::Used(gpu_place); Executor* executor = new Executor(places); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); delete executor; } @@ -333,7 +333,7 @@ TEST_F(ExecutorTesterFeed, GPU) { // need to set feed variable before Executor::Run std::cout << "start mini-batch " << i << std::endl; set_feed_variable(inputs_); - executor->Run(pdesc_, GetScope()); + executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); for (auto& vec : result) { for (auto& num : vec) { diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index b6a9d7fbc2..2a0d9bbf33 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -66,7 +66,7 @@ void Scope::DropKids() { std::once_flag feed_variable_flag; -framework::Scope* GetScope() { +framework::Scope* GetGlobalScope() { static std::unique_ptr g_scope{nullptr}; std::call_once(feed_variable_flag, [&]() { g_scope.reset(new framework::Scope()); diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 96f3ae875b..319d291efe 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -73,7 +73,7 @@ class Scope { DISABLE_COPY_AND_ASSIGN(Scope); }; -framework::Scope* GetScope(); +framework::Scope* GetGlobalScope(); } // namespace framework } // namespace paddle diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index f2c498e2e2..b9e43be966 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -27,7 +27,7 @@ class FeedOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null."); int col = ctx->Attrs().Get("col"); framework::Variable* g_feed_variable = - framework::GetScope()->FindVar("feed_value"); + framework::GetGlobalScope()->FindVar("feed_value"); const FeedInputs& tensors = g_feed_variable->Get(); diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h index cf93b6f434..de8ec6ff61 100644 --- a/paddle/operators/feed_op.h +++ b/paddle/operators/feed_op.h @@ -19,17 +19,15 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; - template class FeedKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { typedef std::vector FeedInputs; - Tensor* out = ctx.Output("Out"); + framework::Tensor* out = ctx.Output("Out"); out->mutable_data(ctx.GetPlace()); framework::Variable* g_feed_variable = - framework::GetScope()->FindVar("feed_value"); + framework::GetGlobalScope()->FindVar("feed_value"); int col = ctx.template Attr("col"); const FeedInputs& tensors = g_feed_variable->Get(); out->CopyFrom(tensors[col], ctx.GetPlace()); diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 4b6b3ca85a..7bde4953cd 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -27,7 +27,7 @@ class FetchOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null."); int col = ctx->Attrs().Get("col"); framework::Variable* g_fetch_variable = - framework::GetScope()->FindVar("fetch_value"); + framework::GetGlobalScope()->FindVar("fetch_value"); FetchOutputs* tensors = g_fetch_variable->GetMutable(); if (tensors->size() < static_cast(col + 1)) { diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h index e8d5e3a9c0..3bec9c9974 100644 --- a/paddle/operators/fetch_op.h +++ b/paddle/operators/fetch_op.h @@ -19,17 +19,15 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; - template class FetchKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { typedef std::vector FetchOutputs; - const Tensor* input = ctx.Input("Input"); + const framework::Tensor* input = ctx.Input("Input"); int col = ctx.template Attr("col"); framework::Variable* g_fetch_variable = - framework::GetScope()->FindVar("fetch_value"); + framework::GetGlobalScope()->FindVar("fetch_value"); FetchOutputs* tensors = g_fetch_variable->GetMutable(); (*tensors)[col].mutable_data(platform::CPUPlace()); (*tensors)[col].CopyFrom(*input, platform::CPUPlace()); From 806796cea3e8bad3706f82bb47073a2313b09f3e Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Fri, 6 Oct 2017 16:10:30 -0700 Subject: [PATCH 247/342] gan api --- doc/design/gan_api.md | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 9864e8b7de..d0f8b47ca3 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -98,18 +98,18 @@ class DCGAN(object): # output G_im: generated fake images if not self.y_dim: - z = pd.concat(1, [z, y]) + z = pd.layer.concat(1, [z, y]) - G_h0 = pd.fc(z, self.G_w0, self.G_b0) - G_h0_bn = pd.batch_norm(G_h0) - G_h0_relu = pd.relu(G_h0_bn) + G_h0 = pd.layer.fc(z, self.G_w0, self.G_b0) + G_h0_bn = pd.layer.batch_norm(G_h0) + G_h0_relu = pd.layer.relu(G_h0_bn) - G_h1 = pd.deconv(G_h0_relu, self.G_w1, self.G_b1) - G_h1_bn = pd.batch_norm(G_h1) - G_h1_relu = pd.relu(G_h1_bn) + G_h1 = pd.layer.deconv(G_h0_relu, self.G_w1, self.G_b1) + G_h1_bn = pd.layer.batch_norm(G_h1) + G_h1_relu = pd.layer.relu(G_h1_bn) - G_h2 = pd.deconv(G_h1_relu, self.G_W2, self.G_b2)) - G_im = pd.tanh(G_im) + G_h2 = pd.layer.deconv(G_h1_relu, self.G_W2, self.G_b2)) + G_im = pd.layer.tanh(G_im) return G_im ``` @@ -122,15 +122,15 @@ class DCGAN(object): # input image: either generated images or real ones # output D_h2: binary logit of the label - D_h0 = pd.conv2d(image, self.D_w0, self.D_b0) - D_h0_bn = pd.batchnorm(h0) - D_h0_relu = pd.lrelu(h0_bn) + D_h0 = pd.layer.conv2d(image, w=self.D_w0, b=self.D_b0) + D_h0_bn = pd.layer.batchnorm(h0) + D_h0_relu = pd.layer.lrelu(h0_bn) - D_h1 = pd.conv2d(D_h0_relu, self.D_w1, self.D_b1) - D_h1_bn = pd.batchnorm(D_h1) - D_h1_relu = pd.lrelu(D_h1_bn) + D_h1 = pd.layer.conv2d(D_h0_relu, w=self.D_w1, b=self.D_b1) + D_h1_bn = pd.layer.batchnorm(D_h1) + D_h1_relu = pd.layer.lrelu(D_h1_bn) - D_h2 = pd.fc(D_h1_relu, self.D_w2, self.D_b2) + D_h2 = pd.layer.fc(D_h1_relu, w=self.D_w2, b=self.D_b2) return D_h2 ``` From a7d700e0ba35e78cfbe85acf2d0b4cb72d22b10f Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 6 Oct 2017 16:30:44 -0700 Subject: [PATCH 248/342] revert local scope to TODO --- paddle/framework/executor.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 7fc407ebc9..c18ba049c8 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -56,7 +56,9 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { auto& block = pdesc.blocks(0); auto& device = device_contexts_[0]; - Scope& local_scope = scope->NewScope(); + // TODO(tonyyang-svail): + // - runs on a new local scope + // Scope& local_scope = scope->NewScope(); for (auto& var : block.vars()) { scope->NewVar(var.name()); @@ -65,7 +67,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { for (auto& op_desc : block.ops()) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); std::cout << op->DebugString() << std::endl; - op->Run(local_scope, *device); + op->Run(*scope, *device); } // TODO(tonyyang-svail): need to test gpu device From 6dcbeb6125fbb0e34ab08a7ebbcc171da01514d8 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 6 Oct 2017 17:29:43 -0700 Subject: [PATCH 249/342] Update if_else_op.md according to https://github.com/PaddlePaddle/Paddle/issues/4313 --- doc/design/if_else_op.md | 76 +++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/doc/design/if_else_op.md b/doc/design/if_else_op.md index 954a19c073..26d140f06d 100644 --- a/doc/design/if_else_op.md +++ b/doc/design/if_else_op.md @@ -1,41 +1,51 @@ -IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has N instances. If cond[i] == True, input instance input[i] will go through true_block() and generate output[i]; otherwise it will produce output from false_bloack(). +# The `IfElse` Operator -```python -import paddle as pd +PaddlePaddle's `IfElse` operator differs from TensorFlow's: -x = var() -y = var() -cond = var() -default_value = var() -b = pd.create_ifelseop(inputs=[x], output_num=1) -with b.true_block(): - x = b.inputs(0) - z = operator.add(x, y) - b.set_output(0, operator.softmax(z)) - -with b.false_block(): - x = b.inputs(0) - z = layer.fc(x) - b.set_output(0, operator.softmax(z)) - -out = b(cond) -``` +- the TensorFlow version takes a scalar boolean value as the condition so that the whole mini-batch goes to either the true or the false branch, whereas +- the PaddlePaddle version takes a vector of boolean value as the condition, and instances corresponding to true values go to the true branch, those corresponding to false values go to the false branch. + +## Example + +The following PaddlePaddle program shows the usage of the IfElse operator: -If only true_block is set in an IfElseOp, a special case is that we can have a default value for false as: ```python import paddle as pd -x = var() -y = var() -cond = var() -default_value = var() -b = pd.create_ifelseop(inputs=[x], output_num=1, default_value) - -with b.true_block(): - x = b.inputs(0) - z = operator.add(x, y) - b.set_output(0, operator.softmax(z)) +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() +with ie.true_block(): + d = pd.layer.add(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` -out = b(cond) +A challenge to implement the `IfElse` operator is to infer those variables to be split, or, say, to identify the variable of the mini-batch or those derived from the mini-batch. + +An equivalent C++ program is as follows: + +```c++ +namespace pd = paddle; + +int x = 10; +int y = 1; +int z = 10; +bool cond = false; +int o1, o2; +if (cond) { + int d = x + y; + o1 = z; + o2 = pd::layer::softmax(z); +} else { + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; +} ``` -where default_value is a list of vars for `cond` == False. From 20a6ae7f1f7eb67eb9f5e7b6290aa81aa4536e0a Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 6 Oct 2017 20:41:37 -0400 Subject: [PATCH 250/342] Feature/tensor array add python binding (#4616) --- paddle/framework/tensor_array.h | 3 + paddle/pybind/CMakeLists.txt | 2 +- paddle/pybind/pybind.cc | 51 +++++++++ .../v2/framework/tests/test_tensor_array.py | 106 ++++++++++++++++++ 4 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 python/paddle/v2/framework/tests/test_tensor_array.py diff --git a/paddle/framework/tensor_array.h b/paddle/framework/tensor_array.h index 22ae6a966f..94a14c2df4 100644 --- a/paddle/framework/tensor_array.h +++ b/paddle/framework/tensor_array.h @@ -26,6 +26,9 @@ namespace framework { * in original lod-tensor. */ struct DySeqMeta { + DySeqMeta(size_t begin, size_t end, size_t ori_idx) + : begin(begin), end(end), ori_idx(ori_idx) {} + size_t begin; size_t end; // not included size_t ori_idx; diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 18ecbd1aa3..97364f2db9 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,6 +1,6 @@ if(WITH_PYTHON) cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc - DEPS pybind python backward proto_desc + DEPS pybind python backward proto_desc tensor_array ${GLOB_OP_LIB}) endif(WITH_PYTHON) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 38ba450447..356c4986e2 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" #include "paddle/framework/lod_tensor.h" +#include "paddle/framework/tensor_array.h" #include "paddle/operators/cond_op.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -286,6 +287,56 @@ All parameter, weight, gradient are variables in Paddle. self->CompleteAddOp(); }); + py::class_(m, "TensorArray") + .def("__init__", + [](TensorArray &instance) { new (&instance) TensorArray(); }) + .def("read", + [](TensorArray &self, size_t index) { return self.Read(index); }) + .def("write", [](TensorArray &self, size_t index, + LoDTensor &value) { self.Write(index, value); }) + .def("write_shared", + [](TensorArray &self, size_t index, const LoDTensor &value) { + self.WriteShared(index, value); + }) + .def("size", [](TensorArray &self) { return self.size(); }) + .def("pack", + [](TensorArray &self, size_t level, + const std::vector> &meta_info, + const std::vector> &lod) { + std::vector meta; + for (auto &info : meta_info) { + PADDLE_ENFORCE_EQ(info.size(), 3UL); + meta.emplace_back(info[0], info[1], info[2]); + } +#ifndef PADDLE_WITH_CUDA + return self.Pack(level, meta, lod); +#else + LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + return self.Pack(level, meta, new_lod); +#endif + }) + .def("unpack", + [](TensorArray &self, const LoDTensor &source, int level, + bool length_descend) { + auto metas = self.Unpack(source, level, length_descend); + std::vector> meta_info; + for (auto meta : metas) { + meta_info.emplace_back( + std::vector({meta.begin, meta.end, meta.ori_idx})); + } + return meta_info; + }) + .def("stack", [](TensorArray &self) { return self.Stack(); }) + .def("unstack", + [](TensorArray &self, const LoDTensor &source) { + return self.Unstack(source); + }) + .def("unstack_shared", [](TensorArray &self, const LoDTensor &source) { + return self.UnstackShared(source); + }); + // recurrent_op py::class_(m, "RecurrentOp") .def_static( diff --git a/python/paddle/v2/framework/tests/test_tensor_array.py b/python/paddle/v2/framework/tests/test_tensor_array.py new file mode 100644 index 0000000000..11f8a01f92 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_tensor_array.py @@ -0,0 +1,106 @@ +import logging +import paddle.v2.framework.core as core +import unittest +import numpy as np + + +class TestTensorArray(unittest.TestCase): + def setUp(self): + self.ta = core.TensorArray() + + self.batch_size = 10 + self.dim = 2 + + # create a LoDTensor + self.scope = core.Scope() + var = self.scope.new_var("test_tensor") + self.place = core.CPUPlace() + tensor = var.get_tensor() + tensor.set_dims([self.batch_size, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + tensor_array[0, 0] = 0 + tensor_array[1, 0] = 1 + tensor_array[2, 0] = 2 + tensor_array[3, 0] = 3 + tensor_array[4, 0] = 4 + tensor_array[5, 0] = 5 + tensor_array[6, 0] = 6 + tensor_array[7, 0] = 7 + tensor_array[8, 0] = 8 + tensor_array[9, 0] = 9 + + lod_py = [[0, 2, 5, 10]] + lod_tensor = core.LoDTensor(lod_py) + lod_tensor.set(tensor_array, self.place) + + self.py_seq_meta = [[5, 10, 2], [2, 5, 1], [0, 2, 0]] + + self.tensor = lod_tensor + + def test_unstack(self): + self.ta.unstack(self.tensor) + self.assertEqual(self.tensor.get_dims()[0], self.ta.size()) + + def test_read(self): + self.ta.unstack(self.tensor) + for i in range(self.batch_size): + tensor = self.ta.read(i) + + def test_write(self): + self.ta.unstack(self.tensor) + + # create a tensor with shape of [1, self.dim] + var = self.scope.new_var("hell") + tensor = var.get_tensor() + tensor.set_dims([1, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + for i in range(self.dim): + tensor_array[0, i] = i + tensor.set(tensor_array, self.place) + + self.ta.write(2, tensor) + + ta_tensor = self.ta.read(2) + ta_tensor_array = np.array(ta_tensor) + self.assertEqual(ta_tensor.get_dims(), [1, self.dim]) + self.assertTrue((tensor_array == ta_tensor_array).all()) + + def test_write_shared(self): + self.ta.unstack(self.tensor) + + # create a tensor with shape of [1, self.dim] + var = self.scope.new_var("hell") + tensor = var.get_tensor() + tensor.set_dims([1, self.dim]) + tensor.alloc_float(self.place) + tensor_array = np.array(tensor) + for i in range(self.dim): + tensor_array[0, i] = i + tensor.set(tensor_array, self.place) + + self.ta.write_shared(2, tensor) + + ta_tensor = self.ta.read(2) + ta_tensor_array = np.array(ta_tensor) + self.assertEqual(ta_tensor.get_dims(), [1, self.dim]) + self.assertTrue((tensor_array == ta_tensor_array).all()) + + def test_unpack(self): + meta = self.ta.unpack(self.tensor, 0, True) + self.assertEqual(self.ta.size(), 5) + self.assertEqual(meta, self.py_seq_meta) + + def test_pack(self): + meta = self.ta.unpack(self.tensor, 0, True) + print "meta", meta + tensor = self.ta.pack(0, meta, self.tensor.lod()) + print np.array(self.tensor) + print np.array(tensor) + self.assertTrue((np.array(self.tensor) == np.array(tensor)).all()) + self.assertTrue(tensor.lod(), self.tensor.lod()) + + +if __name__ == '__main__': + unittest.main() From b68a95f7f488f8ff94f4793ec294121aa004d02d Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 7 Oct 2017 00:47:19 +0000 Subject: [PATCH 251/342] prune pass simple test --- paddle/framework/executor.cc | 2 ++ paddle/framework/executor.h | 4 ++++ paddle/framework/executor_test.cc | 1 + 3 files changed, 7 insertions(+) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 10d22ba01c..4f217277d0 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -145,9 +145,11 @@ std::vector Executor::Preprocess(const ProgramDesc& pdesc) { // this op should be executed should_run.push_back(true); + LOG(INFO) << "Yes " << op_desc.type(); } else { // this op should NOT be executed should_run.push_back(false); + LOG(INFO) << "No " << op_desc.type(); } } diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 1d2e6c96de..75cb5939ff 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -38,9 +38,13 @@ class Executor { protected: /* @Brief + * Pruning the graph * * @param * ProgramDesc + * + * @return + * vector Same size as ops. Indicates whether an op should be run. */ std::vector Preprocess(const ProgramDesc& pdesc); diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index bf6c1dffc1..6a4b2e3d1a 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -226,6 +226,7 @@ class ExecutorTesterFeed : public ::testing::Test { Out->set_parameter("Out"); Out->add_arguments("c"); + add_fetch_op("a", dim, 0, root_block); add_fetch_op("c", dim, 0, root_block); std::vector vec1 = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; From 34193844adc9d67e4a9c8bb00ff9e67ffbc4ba3c Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 6 Oct 2017 21:06:44 -0400 Subject: [PATCH 252/342] add background for TensorArray (#4564) --- doc/design/tensor_array.md | 268 ++++++++++++++++++++++++++++++++----- 1 file changed, 233 insertions(+), 35 deletions(-) diff --git a/doc/design/tensor_array.md b/doc/design/tensor_array.md index a0419ec002..8378e97bf7 100644 --- a/doc/design/tensor_array.md +++ b/doc/design/tensor_array.md @@ -1,39 +1,250 @@ # Design for TensorArray +This design doc presents the necessity of a new C++ class `TensorArray`. +In addition to the very simple C++ implementation + +```c++ +class TensorArray { + public: + explicit TensorArray(const LoDTensor&); + explicit TensorArray(size_t size); + + private: + vector values_; +}; +``` + +We also need to expose it to PaddlePaddle's Python API, +because users would want to use it with our very flexible operators `WhileLoop`. +An example for a RNN based on dynamic operators is + +```python +input = pd.data(...) +num_steps = Var(12) + +TensorArray states(size=num_steps) +TensorArray step_inputs(unstack_from=input) +TensorArray step_outputs(size=num_steps) + +W = Tensor(...) +U = Tensor(...) +default_state = some_op() + +step = Var(1) + +wloop = paddle.create_whileloop(loop_vars=[step]) +with wloop.frame(): + wloop.break_if(pd.equal(step, num_steps) + pre_state = states.read(step-1, default_state) + step_input = step_inputs.read(step) + state = pd.sigmoid(pd.matmul(U, pre_state) + pd.matmul(W, step_input)) + states.write(step, state) + step_outputs.write(step, state) # output state + step.update(state+1) + +output = step_outputs.stack() +``` + +## Background +Steps are one of the core concepts of RNN. In each time step of RNN, there should be several input segments, states, and output segments; all these components act like arrays, for example, call `states[step_id]` will get the state in `step_id`th time step. + +An RNN can be implemented with the following pseudocode + +```c++ +Array states; +Array input_segments; +Array output_segments; +Parameter W, U; + +step = 1 +seq_len = 12 +while_loop { + if (step == seq_len) break; + states[step] = sigmoid(W * states[step-1] + U * input_segments[step]); + output_segments[step] = states[step] // take state as output + step++; +} +``` +According to the [RNN roadmap](https://github.com/PaddlePaddle/Paddle/issues/4561), there are several different RNNs that PaddlePaddle will eventually support. + +Currently, the basic RNN implementation supported by PaddlePaddle is the `recurrent_op` which takes tensors as input and splits them into `input_segments`. + + +Since a tensor cannot store variable-length sequences directly, PaddlePaddle implements the tensor with level of details (`LoDTensor` for short). +Segmenting the `LoDTensor` is much more complicated than splitting a tensor, that makes it necessary to refactor the `recurrent_op` with `LoDTensor` segmenting support. + +As the next step in RNN support, `dynamic_recurrent_op` should be introduced to handle inputs with variable-length sequences. + +The implementation is similar to `recurrent_op`. +The key difference is the way **the original input `LoDTensors` and outupts are split to get the `input_segments` and the `output_segments`.** + + +Though it can't be built over `recurrent_op` or `dynamic_recurrent_op` directly, +the logic behind splitting a tensor or a LoD tensor into `input_segments` remains the same. + +## Why `TensorArray` +The logic behind splitting the inputs to segments, states and outputs is similar and can be shared in a seperate module. + +The array of `states`, `input_segments` and `output_segments` would be exposed to users when writing a dynamic RNN model similar to the above pseudo codes. + +So there should be an array-like container, which can store the segments of a tensor or LoD tensor. + +**This container can store an array of tensors and provides several methods to split a tensor or a LoD tensor** . +This is where the notion of `TensorArray` comes from. + +## Introduce TensorArray to uniform all the three RNNs TensorArray as a new concept is borrowed from TensorFlow, it is meant to be used with dynamic iteration primitives such as `while_loop` and `map_fn`. This concept can be used to support our new design of dynamic operations, and help to refactor some existing variant-sentence-related layers, -such as `RecurrentGradientMachine`. +such as `recurrent_op`, `RecurrentGradientMachine`. In [our design for dynamic RNN](https://github.com/PaddlePaddle/Paddle/pull/4401), `TensorArray` is used to segment inputs and store states in all time steps. By providing some methods similar to a C++ array, -the definition of some state-based dynamic models such as RNN could be more natural and highly flexible. - -## Dynamic-Related Methods -Some basic methods should be proposed as follows: - -### stack() -Pack the values in a `TensorArray` into a tensor with rank one higher than each tensor in `values`. -### unstack(axis=0) -Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. -### concat() -Return the values in the `TensorArray` as a concatenated Tensor. -### write(index, value, data_shared=true) -Write value into index of the TensorArray. -### read(index) -Read the value at location `index` in the `TensorArray`. -### size() -Return the number of values. +the definition of some state-based dynamic models such as RNN can be more natural and highly flexible. + +## Dynamic-operations on TensorArray + +`TensorArray` will be used directly when defining dynamic models, so some operators listed below should be implemented + +```python +# several helper operators for TensorArray +def tensor_array_stack(ta, tensor): + ''' + get a tensor array `ta`, return a packed `tensor`. + ''' + pass + +def tensor_array_unstack(tensor, ta): + ''' + get a `tensor`, unstack it and get a tensor array `ta`. + ''' + pass + +def tensor_array_write(ta, index, tensor, data_shared): + ''' + get a `tensor` and a scalar tensor `index`, write `tensor` into index-th + value of the tensor array `ta`. + `data_shared` is an attribute that specifies whether to copy or reference the tensors. + ''' + pass + +def tensor_array_read(ta, index, tensor): + ''' + get a tensor array `ta`, a scalar tensor `index`, read the index-th value of + `ta` and return as the `tensor`. + ''' + pass + +def tensor_array_size(ta, tensor): + ''' + get a tensor array `ta`, return the size of `ta` and return as the scalar `tensor`. + ''' + pass +``` + +It is trivial for users to use so many low-level operators, so some helper methods should be proposed in python wrapper to make `TensorArray` easier to use, +for example + +```python +class TensorArray: + def __init__(self, name): + self.name = name + self.desc = TensorArrayDesc() + + def stack(self, name=None): + ''' + Pack the values in a `TensorArray` into a tensor with rank one higher + than each tensor in `values`. + `stack` can be used to split tensor into time steps for RNN or whileloop. + + @name: str + the name of the variable to output. + ''' + tensor = NewVar(name) + tensor_array_stack(self.name, tensor) + return tensor + + def unstack(self, input): + ''' + Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. + `unstack` can be used to concatenate all the time steps for RNN or whileloop. + + @input: str + the name of input tensor + ''' + tensor_array_unstack(tensor, self.name) + + def write(self, index, value, data_shared=True): + ''' + Write value into index of the TensorArray. + If `data_shared` is set to True, than the index-th value in TensorArray will + be shared with the tensor passed in. + + @index: str + name of a scalar tensor + @value: str + name of a tensor + @data_shared: bool + ''' + tensor_array_write(self.name, index, value, data_shared) + + def read(self, index, output): + ''' + Read the value at location `index` in the `TensorArray`. + + @index: str + name of a scalar tensor + @output: + name of a output variable + ''' + tensor_array_read(self.name, index, output) + + + def size(self, output): + ''' + Return the number of values. + + @output: str + name of a scalar tensor + ''' + tensor_array_size(self.name, output) +``` ## LoDTensor-related Supports -The `RecurrentGradientMachine` in Paddle serves as a flexible RNN layer; it takes variant length sequences as input, -because each step of RNN could only take a tensor-represented batch of data as input, +The `RecurrentGradientMachine` in Paddle serves as a flexible RNN layer; it takes varience-length sequences as input, and output sequences too. + +Since each step of RNN can only take a tensor-represented batch of data as input, some preprocess should be taken on the inputs such as sorting the sentences by their length in descending order and cut each word and pack to new batches. -Such cut-like operations can be embedded into `TensorArray` as general methods called `unpack` and `pack`. +Such cut-like operations can be embedded into `TensorArray` as general methods called `unpack` and `pack`, +these two operations are similar to `stack` and `unstack` except that they operate on variable-length sequences formated as a LoD tensor rather than a tensor. + +Some definitions are like + +```python +def unpack(level): + ''' + Split LodTensor in some `level` and generate batches, if set `sort_by_length`, + will sort by length. -With these two methods, a variant-sentence-RNN can be implemented like + Returns: + - a new `TensorArray`, whose values are LodTensors and represents batches + of data. + - an int32 Tensor, which stores the map from the new batch's indices to + original LoDTensor + ''' + pass + +def pack(level, indices_map): + ''' + Recover the original LoD-arranged LoDTensor with the values in a `TensorArray` + and `level` and `indices_map`. + ''' + pass +``` + +With these two methods, a varience-length sentence supported RNN can be implemented like ```c++ // input is the varient-length data @@ -58,16 +269,3 @@ LoDTensor rnn_output = ta.pack(ta, indice_map); ``` the code above shows that by embedding the LoDTensor-related preprocess operations into `TensorArray`, the implementation of a RNN that supports varient-length sentences is far more concise than `RecurrentGradientMachine` because the latter mixes all the codes together, hard to read and extend. - - -some details are as follows. - -### unpack(level, sort_by_length) -Split LodTensor in some `level` and generate batches, if set `sort_by_length`, will sort by length. - -Returns: - -- a new `TensorArray`, whose values are LodTensors and represents batches of data. -- an int32 Tensor, which stores the map from the new batch's indices to original LoDTensor -### pack(level, indices_map) -Recover the original LoD-arranged LoDTensor with the values in a `TensorArray` and `level` and `indices_map`. From bfe6dcb5d345b3a734c2d496a97bffa184dfbbd4 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 6 Oct 2017 23:48:44 -0400 Subject: [PATCH 253/342] fix RNN and IfElse syntax in Block design (#4210) * fix block syntax * add builder * fix argument names * uniform all Var to var * fix c demo * factor set_outputs * update block * delete as_ifelse_input interface from block design * simplify set_outputs and output_num * make the c++ codes compatible with demo * fix compatible * fix syntax * update ie * update syntax --- doc/design/block.md | 96 ++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/doc/design/block.md b/doc/design/block.md index be88001220..4d5dd4ba95 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -55,17 +55,23 @@ Let us consolidate the discussion by presenting some examples. The following C++ programs shows how blocks are used with the `if-else` structure: ```c++ +namespace pd = paddle; + int x = 10; -int y = 20; -int out; +int y = 1; +int z = 10; bool cond = false; +int o1, o2; if (cond) { int z = x + y; - out = softmax(z); + o1 = z; + o2 = pd::layer::softmax(z); } else { - int z = fc(x); - out = z; + int d = pd::layer::fc(z); + o1 = d; + o2 = d+1; } + ``` An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows: @@ -73,57 +79,55 @@ An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator ```python import paddle as pd -x = var(10) -y = var(20) -cond = var(false) -ie = pd.create_ifelseop(inputs=[x], output_num=1) +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] + +ie = pd.ifelse() with ie.true_block(): - x = ie.inputs(true, 0) - z = operator.add(x, y) - ie.set_output(true, 0, operator.softmax(z)) + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) with ie.false_block(): - x = ie.inputs(false, 0) - z = layer.fc(x) - ie.set_output(true, 0, operator.softmax(z)) -out = b(cond) + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) ``` -In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`. +In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `x+1` and `fc(x)`. A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values. + ### Blocks with `for` and `RNNOp` The following RNN model from the [RNN design doc](./rnn.md) ```python -x = sequence([10, 20, 30]) -m = var(0) -W = tensor() -U = tensor() - -rnn = create_rnn(inputs=[input]) -with rnn.stepnet() as net: - x = net.set_inputs(0) - h = net.add_memory(init=m) - fc_out = pd.matmul(W, x) - hidden_out = pd.matmul(U, h.pre(n=1)) - sum = pd.add_two(fc_out, hidden_out) - act = pd.sigmoid(sum) - h.update(act) # update memory with act - net.set_outputs(0, act, hidden_out) # two outputs - +x = sequence([10, 20, 30]) # shape=[None, 1] +m = var(0) # shape=[1] +W = var(0.314, param=true) # shape=[1] +U = var(0.375, param=true) # shape=[1] + +rnn = pd.rnn() +with rnn.step(): + h = rnn.memory(init = m) + hh = rnn.previous_memory(h) + a = layer.fc(W, x) + b = layer.fc(U, hh) + s = pd.add(a, b) + act = pd.sigmoid(s) + rnn.update_memory(h, act) + rnn.output(a, b) o1, o2 = rnn() -print o1, o2 ``` - has its equivalent C++ program as follows ```c++ int* x = {10, 20, 30}; -int m = 0; -int W = some_value(); -int U = some_other_value(); +int* m = {0}; +int* W = {0.314}; +int* U = {0.375}; int mem[sizeof(x) / sizeof(x[0]) + 1]; int o1[sizeof(x) / sizeof(x[0]) + 1]; @@ -131,20 +135,16 @@ int o2[sizeof(x) / sizeof(x[0]) + 1]; for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { int x = x[i-1]; if (i == 1) mem[0] = m; - int fc_out = W * x; - int hidden_out = Y * mem[i-1]; - int sum = fc_out + hidden_out; + int a = W * x; + int b = Y * mem[i-1]; + int s = fc_out + hidden_out; int act = sigmoid(sum); mem[i] = act; o1[i] = act; o2[i] = hidden_out; } - -print_array(o1); -print_array(o2); ``` - ## Compilation and Execution Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference. @@ -210,11 +210,11 @@ a = pd.Varaible(shape=[20, 20]) b = pd.fc(a, params=["fc.w", "fc.b"]) rnn = pd.create_rnn() -with rnn.stepnet() as net: - x = net.set_inputs(a) +with rnn.stepnet() + x = a.as_step_input() # reuse fc's parameter fc_without_b = pd.get_variable("fc.w") - net.set_outputs(fc_without_b) + rnn.output(fc_without_b) out = rnn() ``` From cb1baa3be739023a0a72dc36a45c71f4fc4545f4 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 6 Oct 2017 22:54:35 -0700 Subject: [PATCH 254/342] Update program.md (#4618) * Update program.md * Update * Update --- doc/design/program.md | 104 +++++++++++++++++++++++++++---- paddle/framework/framework.proto | 3 + 2 files changed, 94 insertions(+), 13 deletions(-) diff --git a/doc/design/program.md b/doc/design/program.md index fb8f86ac07..bd2456787c 100644 --- a/doc/design/program.md +++ b/doc/design/program.md @@ -1,8 +1,10 @@ -# Design Doc: ProgramDesc +# Design Doc: PaddlePaddle Programs -The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program. +## Compile and Execution + +A PaddlePaddle program consists of two parts -- the first generates a `ProgramDesc` protobuf message that describes the program, and the second runs this message using a C++ class `Executor`. -As described in [graph.md](./graph.md), the first five lines of the following PaddlePaddle program +A simple example PaddlePaddle program can be found in [graph.md](./graph.md): ```python x = layer.data("images") @@ -13,36 +15,112 @@ optimize(cost) train(cost, reader=mnist.train()) ``` -generates, or compiles, a PaddelPaddle program, which is represented by the following protobuf message: +The first five lines of the following PaddlePaddle program generates, or, compiles, the `ProgramDesc` message. The last line runs it. -```protobuf -message ProgramDesc { - repeated BlockDesc blocks = 1; +## Programs and Blocks + +The basic structure of a PaddlePaddle program is some nested blocks, as a C++ or Java program. + +- program: some nested blocks +- [block](./block.md): + - some local variable definitions, and + - a sequence of operators + +The concept of block comes from usual programs. For example, the following C++ program has three blocks: + +```c++ +int main() { // block 0 + int i = 0; + if (i < 10) { // block 1 + for (int j = 0; j < 10; j++) { // block 2 + } + } + return 0; } +``` + +The following PaddlePaddle program has three blocks: + +```python +import paddle as pd // block 0 + +x = minibatch([10, 20, 30]) # shape=[None, 1] +y = var(1) # shape=[1], value=1 +z = minibatch([10, 20, 30]) # shape=[None, 1] +cond = larger_than(x, 15) # [false, true, true] +ie = pd.ifelse() +with ie.true_block(): // block 1 + d = pd.layer.add_scalar(x, y) + ie.output(d, pd.layer.softmax(d)) +with ie.false_block(): // block 2 + d = pd.layer.fc(z) + ie.output(d, d+1) +o1, o2 = ie(cond) +``` + +## `BlockDesc` and `ProgramDesc` + +All protobuf messages are defined in `framework.proto`. + +`BlockDesc` is straight-forward -- it includes local variable definitions, `vars`, and a sequence of operators, `ops`. + +```protobuf message BlockDesc { required int32 parent = 1; repeated VarDesc vars = 2; repeated OpDesc ops = 3; } +``` + +The parent ID indicates the parent block so that operators in a block can refer to variables defined locally and also those defined in their ancestor blocks. + +All hierarchical blocks in a program are flattened and stored in an array. The block ID is the index of the block in this array. + +```protobuf +message ProgramDesc { + repeated BlockDesc blocks = 1; +} +``` + + +### Global Block +The global block is the first one in the above array. + +## Operators that Use Blocks + +In the above example, the operator `IfElseOp` has two blocks -- the true branch and the false branch. + +The definition of `OpDesc` shows that an operator could have some attributes: + +```protobuf message OpDesc { AttrDesc attrs = 1; ... } +``` + +and an attribute could be of type block, which is, in fact, a block ID as described above: +``` message AttrDesc { - required AttrType type = 1; + required string name = 1; - // index into ProgramDesc::blocks when type==BLOCK - optional int32 block = 2; + enum AttrType { + INT = 1, + STRING = 2, + ... + BLOCK = ... + } + required AttrType type = 2; + + optional int32 block = 10; // when type == BLOCK ... } ``` -When each of the first five lines runs, related Python function, e.g., `layer.fc`, calls C++ InferShape functions. This InferShape function needs to access the properties of VarDesc's accessed by the current OpDesc. These VarDesc's might not be defined in the current block, but in some ancestor blocks. This requires that we can trace the parent of a block. - -A nested block is often an attribute of an operator, most likely, an IfElseOp or a WhileOp. In above solution, all blocks are in `ProgramDesc::blocks`, this implicitly assigns a zero-based ID to each block -- the index of the block in `ProgramDesc::blocks`. So that `AttrDesc::block` could be an integer block ID. +## InferShape With this design, the InferShape function should take the following parameters: diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 9a3077fe6e..ac2827e547 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -115,4 +115,7 @@ message BlockDesc { repeated OpDesc ops = 4; } +// Please refer to +// https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md +// for more details. message ProgramDesc { repeated BlockDesc blocks = 1; } From a0767228bd70ff8809a71dc4f9273d9dfac2aa46 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 7 Oct 2017 00:39:08 -0700 Subject: [PATCH 255/342] merge InferShapeContext and ExecutionContext --- paddle/framework/operator.cc | 8 +++--- paddle/framework/operator.h | 49 +++++++++++++++--------------------- 2 files changed, 24 insertions(+), 33 deletions(-) diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 2ca838f838..2fca816f35 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -205,13 +205,13 @@ void OperatorBase::GenerateTemporaryNames() { } template <> -const Tensor* InferShapeContext::Input(const std::string& name) const { +const Tensor* ExecutionContext::Input(const std::string& name) const { auto* var = InputVar(name); return var == nullptr ? nullptr : GetTensorFromVar(var); } template <> -const std::vector InferShapeContext::MultiInput( +const std::vector ExecutionContext::MultiInput( const std::string& name) const { auto names = op().Inputs(name); std::vector res; @@ -225,13 +225,13 @@ const std::vector InferShapeContext::MultiInput( } template <> -Tensor* InferShapeContext::Output(const std::string& name) const { +Tensor* ExecutionContext::Output(const std::string& name) const { auto var = OutputVar(name); return var == nullptr ? nullptr : var->GetMutable(); } template <> -std::vector InferShapeContext::MultiOutput( +std::vector ExecutionContext::MultiOutput( const std::string& name) const { auto names = op().Outputs(name); std::vector res; diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index d7bc9c9ffb..af8989dc4f 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -57,7 +57,6 @@ inline std::string GradVarName(const std::string& var_name) { } class OperatorBase; -class InferShapeContext; class ExecutionContext; extern const Tensor* GetTensorFromVar(const Variable* var); @@ -169,10 +168,11 @@ class NOP : public OperatorBase { } }; -class InferShapeContext { +class ExecutionContext { public: - InferShapeContext(const OperatorBase& op, const Scope& scope) - : op_(op), scope_(scope) {} + ExecutionContext(const OperatorBase& op, const Scope& scope, + const platform::DeviceContext& device_context) + : op_(op), scope_(scope), device_context_(device_context) {} const OperatorBase& op() const { return op_; } @@ -278,31 +278,6 @@ class InferShapeContext { out_tensor->set_lod(in_tensor.lod()); } - private: - const OperatorBase& op_; - const Scope& scope_; -}; - -template <> -const Tensor* InferShapeContext::Input(const std::string& name) const; - -template <> -const std::vector InferShapeContext::MultiInput( - const std::string& name) const; - -template <> -Tensor* InferShapeContext::Output(const std::string& name) const; - -template <> -std::vector InferShapeContext::MultiOutput( - const std::string& name) const; - -class ExecutionContext : public InferShapeContext { - public: - ExecutionContext(const OperatorBase& op, const Scope& scope, - const platform::DeviceContext& device_context) - : InferShapeContext(op, scope), device_context_(device_context) {} - template ::EigenDeviceType> @@ -315,9 +290,25 @@ class ExecutionContext : public InferShapeContext { } private: + const OperatorBase& op_; + const Scope& scope_; const platform::DeviceContext& device_context_; }; +template <> +const Tensor* ExecutionContext::Input(const std::string& name) const; + +template <> +const std::vector ExecutionContext::MultiInput( + const std::string& name) const; + +template <> +Tensor* ExecutionContext::Output(const std::string& name) const; + +template <> +std::vector ExecutionContext::MultiOutput( + const std::string& name) const; + class CompileTimeInferShapeContext : public InferShapeContextBase { public: CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block) From c0a34e1c645381bbf4c80d13b257f62c828662f7 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 7 Oct 2017 00:47:55 -0700 Subject: [PATCH 256/342] rename InferShapeContextBase to InferShapeContext --- paddle/framework/operator.h | 6 +++--- paddle/framework/operator_test.cc | 2 +- paddle/framework/shape_inference.h | 6 +++--- paddle/operators/accuracy_op.cc | 2 +- paddle/operators/activation_op.cc | 4 ++-- paddle/operators/adadelta_op.cc | 2 +- paddle/operators/adagrad_op.cc | 2 +- paddle/operators/clip_op.cc | 4 ++-- paddle/operators/concat_op.cc | 4 ++-- paddle/operators/conv2d_op.cc | 4 ++-- paddle/operators/cos_sim_op.cc | 4 ++-- paddle/operators/crop_op.cc | 4 ++-- paddle/operators/cross_entropy_op.cc | 4 ++-- paddle/operators/dropout_op.cc | 4 ++-- paddle/operators/elementwise_op.h | 4 ++-- paddle/operators/fill_zeros_like_op.cc | 2 +- paddle/operators/gather_op.cc | 4 ++-- paddle/operators/gaussian_random_op.cc | 2 +- paddle/operators/lookup_table_op.cc | 4 ++-- paddle/operators/lstm_unit_op.cc | 4 ++-- paddle/operators/mean_op.cc | 4 ++-- paddle/operators/minus_op.cc | 2 +- paddle/operators/modified_huber_loss_op.cc | 4 ++-- paddle/operators/mul_op.cc | 4 ++-- paddle/operators/multiplex_op.cc | 4 ++-- paddle/operators/pad_op.cc | 4 ++-- paddle/operators/pool_op.cc | 4 ++-- paddle/operators/prelu_op.cc | 4 ++-- paddle/operators/rank_loss_op.cc | 4 ++-- paddle/operators/reduce_op.cc | 4 ++-- paddle/operators/reshape_op.cc | 4 ++-- paddle/operators/rmsprop_op.cc | 2 +- paddle/operators/scale_op.cc | 2 +- paddle/operators/scatter_op.cc | 4 ++-- paddle/operators/sequence_pool_op.cc | 4 ++-- paddle/operators/sequence_softmax_op.cc | 4 ++-- paddle/operators/sgd_op.cc | 2 +- paddle/operators/sigmoid_cross_entropy_with_logits_op.cc | 4 ++-- paddle/operators/smooth_l1_loss_op.cc | 4 ++-- paddle/operators/softmax_op.cc | 4 ++-- paddle/operators/softmax_with_cross_entropy_op.cc | 4 ++-- paddle/operators/split_op.cc | 2 +- paddle/operators/squared_l2_distance_op.cc | 4 ++-- paddle/operators/sum_op.cc | 2 +- paddle/operators/top_k_op.cc | 2 +- paddle/operators/transpose_op.cc | 4 ++-- paddle/operators/uniform_random_op.cc | 2 +- 47 files changed, 82 insertions(+), 82 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index af8989dc4f..1e9ace9987 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -309,7 +309,7 @@ template <> std::vector ExecutionContext::MultiOutput( const std::string& name) const; -class CompileTimeInferShapeContext : public InferShapeContextBase { +class CompileTimeInferShapeContext : public InferShapeContext { public: CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block) : op_(op), block_(block) {} @@ -405,7 +405,7 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { const BlockDescBind& block_; }; -class RuntimeInferShapeContext : public InferShapeContextBase { +class RuntimeInferShapeContext : public InferShapeContext { public: RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope) : op_(op), scope_(scope) {} @@ -603,7 +603,7 @@ class OperatorWithKernel : public OperatorBase { }); } - virtual void InferShape(InferShapeContextBase* ctx) const = 0; + virtual void InferShape(InferShapeContext* ctx) const = 0; protected: // indicate kernel DataType by input data. Defaultly all input data must be diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index a0c17b41f2..a02f4668bc 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -113,7 +113,7 @@ class OpWithKernelTest : public OperatorWithKernel { using OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} + void InferShape(framework::InferShapeContext* ctx) const override {} DataType IndicateDataType(const ExecutionContext& ctx) const override { return DataType::FP32; } diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 74e0371e32..64aab16ae5 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -20,11 +20,11 @@ namespace paddle { namespace framework { // TODO(longfei): Once after both CompileTimeInferShapeContext and -// RuntimeInferShapeContext get merged, we can rename InferShapeContextBase into +// RuntimeInferShapeContext get merged, we can rename InferShapeContext into // InferShapeContext so to replace the current InferShapeContext. -class InferShapeContextBase { +class InferShapeContext { public: - virtual ~InferShapeContextBase() {} + virtual ~InferShapeContext() {} virtual bool HasInput(const std::string &name) const = 0; virtual bool HasOutput(const std::string &name) const = 0; diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 82010bfb53..c5fb113e0f 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -22,7 +22,7 @@ class AccuracyOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Inference"), "Input(Inference) of AccuracyOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Label"), diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 66e9d2c401..5df875cd61 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -22,7 +22,7 @@ class ActivationOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { ctx->SetOutputDim("Y", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Y"); } @@ -33,7 +33,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Y")); } }; diff --git a/paddle/operators/adadelta_op.cc b/paddle/operators/adadelta_op.cc index bd8c93b4a1..cf1bca1658 100644 --- a/paddle/operators/adadelta_op.cc +++ b/paddle/operators/adadelta_op.cc @@ -22,7 +22,7 @@ class AdadeltaOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of AdadeltaOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Grad"), diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc index ea2ff3c503..a17747efb7 100644 --- a/paddle/operators/adagrad_op.cc +++ b/paddle/operators/adagrad_op.cc @@ -22,7 +22,7 @@ class AdagradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of AdagradOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Grad"), diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc index b3dd060fd7..3e9b0d82ba 100644 --- a/paddle/operators/clip_op.cc +++ b/paddle/operators/clip_op.cc @@ -22,7 +22,7 @@ class ClipOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ClipOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -61,7 +61,7 @@ class ClipOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 1ffa02c8f9..235c4449ac 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -24,7 +24,7 @@ class ConcatOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, "Inputs(X) of ConcatOp should be empty.") PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -83,7 +83,7 @@ class ConcatOpGrad : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); } }; diff --git a/paddle/operators/conv2d_op.cc b/paddle/operators/conv2d_op.cc index 5cc82944bb..6325d4248f 100644 --- a/paddle/operators/conv2d_op.cc +++ b/paddle/operators/conv2d_op.cc @@ -27,7 +27,7 @@ class Conv2DOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input(Input) of Conv2DOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Filter"), @@ -106,7 +106,7 @@ class Conv2DOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { auto in_dims = ctx->GetInputDim("Input"); auto filter_dims = ctx->GetInputDim("Filter"); if (ctx->HasOutput(framework::GradVarName("Input"))) { diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 040546f1a6..2b4c4b9c45 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -24,7 +24,7 @@ class CosSimOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { // notnull check PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of CosSimOp should not be null."); @@ -98,7 +98,7 @@ class CosSimOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { // notnull check PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) must not be null."); diff --git a/paddle/operators/crop_op.cc b/paddle/operators/crop_op.cc index 9b2305e90e..a1424993cc 100644 --- a/paddle/operators/crop_op.cc +++ b/paddle/operators/crop_op.cc @@ -25,7 +25,7 @@ class CropOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of CropOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -115,7 +115,7 @@ class CropOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 4b67887f36..708e80e96a 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -22,7 +22,7 @@ class CrossEntropyOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); @@ -60,7 +60,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index a669b5cf00..708ccfa0bf 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -24,7 +24,7 @@ class DropoutOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE_GE(ctx->Attrs().Get("dropout_prob"), 0); PADDLE_ENFORCE_LE(ctx->Attrs().Get("dropout_prob"), 1); @@ -70,7 +70,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE_EQ(ctx->Attrs().Get("is_training"), 1, "GradOp is only callable when is_training is true"); diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h index 3082f37422..66f1910a47 100644 --- a/paddle/operators/elementwise_op.h +++ b/paddle/operators/elementwise_op.h @@ -25,7 +25,7 @@ class ElementwiseOp : public framework::OperatorWithKernel { protected: using Tensor = framework::Tensor; - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of elementwise op should not be null"); PADDLE_ENFORCE(ctx->HasInput("Y"), @@ -106,7 +106,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { using Tensor = framework::Tensor; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index e164de6584..4c70b9a36b 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -22,7 +22,7 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FillZerosLikeOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Y"), diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index fe305337cb..fb99c6c016 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -23,7 +23,7 @@ class GatherOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of GatherOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Index"), @@ -51,7 +51,7 @@ class GatherGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 5cd2c7d2c0..ca7fb38505 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -43,7 +43,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of GaussianRandomOp should not be null."); auto dims = ctx->Attrs().Get>("dims"); diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 929008fbcb..3f8d4ab857 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -22,7 +22,7 @@ class LookupTableOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) of LookupTableOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Ids"), @@ -70,7 +70,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { auto table_dims = ctx->GetInputDim("W"); ctx->SetOutputDim(framework::GradVarName("W"), table_dims); } diff --git a/paddle/operators/lstm_unit_op.cc b/paddle/operators/lstm_unit_op.cc index dad56731de..13a45ec246 100644 --- a/paddle/operators/lstm_unit_op.cc +++ b/paddle/operators/lstm_unit_op.cc @@ -22,7 +22,7 @@ class LstmUnitOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LSTM should not be null."); PADDLE_ENFORCE(ctx->HasInput("C_prev"), "Input(C_prev) of LSTM should not be null."); @@ -77,7 +77,7 @@ class LstmUnitGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("C")), "Input(C@GRAD) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("H")), diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 2332c9546b..441543049f 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -22,7 +22,7 @@ class MeanOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MeanOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -47,7 +47,7 @@ class MeanGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 7057dcbd6e..d7fd2f901b 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -26,7 +26,7 @@ class MinusOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MinusOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/operators/modified_huber_loss_op.cc index 84212a2b3b..6522327fdc 100644 --- a/paddle/operators/modified_huber_loss_op.cc +++ b/paddle/operators/modified_huber_loss_op.cc @@ -22,7 +22,7 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); @@ -74,7 +74,7 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); PADDLE_ENFORCE(ctx->HasInput("IntermediateVal"), diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 3c8fe04d2e..ec0683d887 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -24,7 +24,7 @@ class MulOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MulOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of MulOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -97,7 +97,7 @@ class MulOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), diff --git a/paddle/operators/multiplex_op.cc b/paddle/operators/multiplex_op.cc index a069127a19..a86685b6dd 100644 --- a/paddle/operators/multiplex_op.cc +++ b/paddle/operators/multiplex_op.cc @@ -24,7 +24,7 @@ class MultiplexOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) shouldn't be null."); PADDLE_ENFORCE(!ctx->Inputs("X").empty(), "MultiInput(X) shouldn't be empty."); @@ -90,7 +90,7 @@ class MultiplexGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(!ctx->Inputs("X").empty(), "Input(X) should not be null."); PADDLE_ENFORCE(!ctx->Outputs(framework::GradVarName("X")).empty(), "Output(X@Grad) should not be null."); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 15aa05f266..2f26ada85e 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -24,7 +24,7 @@ class PadOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of PadOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of PadOp should not be null."); @@ -98,7 +98,7 @@ class PadOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc index c29f51f056..ba3b5ed207 100644 --- a/paddle/operators/pool_op.cc +++ b/paddle/operators/pool_op.cc @@ -27,7 +27,7 @@ class PoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) of Pooling should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -74,7 +74,7 @@ class PoolOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) of Pooling should not be null."); PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc index 1692464f28..166fe26824 100644 --- a/paddle/operators/prelu_op.cc +++ b/paddle/operators/prelu_op.cc @@ -26,7 +26,7 @@ class PReluOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput("Alpha"), "Input(Alpha) should not be null"); PADDLE_ENFORCE(product(ctx->GetInputDim("Alpha")) == 1, @@ -63,7 +63,7 @@ class PReluGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); diff --git a/paddle/operators/rank_loss_op.cc b/paddle/operators/rank_loss_op.cc index 1ba22006f2..e0abbc4db1 100644 --- a/paddle/operators/rank_loss_op.cc +++ b/paddle/operators/rank_loss_op.cc @@ -25,7 +25,7 @@ class RankLossOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { // input check PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null"); PADDLE_ENFORCE(ctx->HasInput("Left"), "Input(Left) shouldn't be null"); @@ -90,7 +90,7 @@ class RankLossGradOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("Left"), "Input(Left) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("Right"), "Input(Right) shouldn't be null."); diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 3ef443d1c7..12081ee6f0 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -24,7 +24,7 @@ class ReduceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ReduceOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -58,7 +58,7 @@ class ReduceGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null."); diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index a3c3fa2716..3cd54930a0 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -26,7 +26,7 @@ class ReshapeOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { // input check PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ReshapeOp should not be null."); @@ -94,7 +94,7 @@ class ReshapeGradOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) shouldn't be null."); diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc index 8f61c7fdda..ada6f2bc3c 100644 --- a/paddle/operators/rmsprop_op.cc +++ b/paddle/operators/rmsprop_op.cc @@ -22,7 +22,7 @@ class RmspropOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of RmspropOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("MeanSquare"), diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index e225aecc27..ac297da6b7 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -26,7 +26,7 @@ class ScaleOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of ScaleOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index d15ba15153..fbea01a8db 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -23,7 +23,7 @@ class ScatterOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Ref"), "Input(Ref) of ScatterOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Index"), @@ -60,7 +60,7 @@ class ScatterGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("Updates"), ctx->GetInputDim("Updates")); ctx->SetOutputDim(framework::GradVarName("Ref"), ctx->GetInputDim("Ref")); diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/operators/sequence_pool_op.cc index bc4af2f704..06c00d31ea 100644 --- a/paddle/operators/sequence_pool_op.cc +++ b/paddle/operators/sequence_pool_op.cc @@ -22,7 +22,7 @@ class SequencePoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SequencePoolOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -74,7 +74,7 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Gradient of Out should not be null."); PADDLE_ENFORCE(ctx->HasInput("X"), "The input X should not be null."); diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc index 621779ab61..ea217ba459 100644 --- a/paddle/operators/sequence_softmax_op.cc +++ b/paddle/operators/sequence_softmax_op.cc @@ -22,7 +22,7 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SequenceSoftmaxOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -67,7 +67,7 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) of SequenceSoftmaxGradOp should not be null."); PADDLE_ENFORCE( diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 31d491f130..2a6a162a02 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -22,7 +22,7 @@ class SGDOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of SGDOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Grad"), diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc index ede458e011..b6653e1cc7 100644 --- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -24,7 +24,7 @@ class SigmoidCrossEntropyWithLogitsOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Labels"), "Input(Labels) should be not null."); @@ -53,7 +53,7 @@ class SigmoidCrossEntropyWithLogitsGradOp using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Labels"), "Input(Labels) should be not null."); diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/operators/smooth_l1_loss_op.cc index 2d197e3b1b..91391dc945 100644 --- a/paddle/operators/smooth_l1_loss_op.cc +++ b/paddle/operators/smooth_l1_loss_op.cc @@ -22,7 +22,7 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); @@ -94,7 +94,7 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { auto in_dims = ctx->GetInputDim("X"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index e353afee3e..4c131ed44d 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -22,7 +22,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SoftmaxOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Y"), @@ -69,7 +69,7 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), "Input(Y@GRAD) should be not null."); diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 42c1ba6fdf..5431a1657c 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -83,7 +83,7 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Logits"), "Input(Logits) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); @@ -128,7 +128,7 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), "Input(Loss@Grad) should not be null."); PADDLE_ENFORCE(ctx->HasInput("Softmax"), diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc index 5f4b5539af..d5dd4df2a2 100644 --- a/paddle/operators/split_op.cc +++ b/paddle/operators/split_op.cc @@ -24,7 +24,7 @@ class SplitOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SplitOp should not be null."); PADDLE_ENFORCE_GE(ctx->Outputs("Out").size(), 1UL, diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index 5a0cb59600..cce4e527c3 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -22,7 +22,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of SquaredL2DistanceOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Y"), @@ -86,7 +86,7 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Gradient of Out should not be null"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index c701ee8dde..ffb0cb9211 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -22,7 +22,7 @@ class SumOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null"); auto x_dims = ctx->GetInputsDim("X"); PADDLE_ENFORCE(ctx->HasOutput("Out"), diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index 5f22bf1df8..c954819912 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -22,7 +22,7 @@ class TopkOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of TopkOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), diff --git a/paddle/operators/transpose_op.cc b/paddle/operators/transpose_op.cc index 0672f9342d..1101bbe3ef 100644 --- a/paddle/operators/transpose_op.cc +++ b/paddle/operators/transpose_op.cc @@ -24,7 +24,7 @@ class TransposeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); auto x_dims = ctx->GetInputDim("X"); @@ -93,7 +93,7 @@ class TransposeOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 97b1d0bed4..e330877fc4 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -47,7 +47,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of UniformRandomOp should not be null."); From 005f15b4957fcce594e1a3b8a27be1c1723ab0fc Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 7 Oct 2017 21:46:00 +0000 Subject: [PATCH 257/342] FeedOp and FetchOp unit test --- paddle/framework/executor.cc | 6 ++-- paddle/framework/executor_test.cc | 56 +++++++++++-------------------- 2 files changed, 22 insertions(+), 40 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 4f217277d0..9391e18ded 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -69,12 +69,10 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { } std::vector should_run = Preprocess(pdesc); - PADDLE_ENFORCE(should_run.size() == block.ops_size(), - "should_run.size() != block.ops_size()"); - for (int i = 0; i < should_run.size(); ++i) { + PADDLE_ENFORCE(should_run.size() == block.ops_size()); + for (size_t i = 0; i < should_run.size(); ++i) { if (should_run[i]) { auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); - std::cout << op->DebugString() << std::endl; op->Run(*scope, *device); } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 6a4b2e3d1a..b198fa143c 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -127,10 +127,11 @@ void add_fetch_op(string var_name, std::vector& dim, int index, std::once_flag set_variable_flag; +// Tensors in feed value variable will only be in CPUPlace +// So we can memcpy the data from vector to feed_value template void set_feed_variable(const std::vector>& inputs) { typedef std::vector FeedInputs; - // Tensors in feed value variable will only be in CPUPlace Variable* g_feed_value = GetGlobalScope()->FindVar("feed_value"); FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); auto size = inputs.size(); @@ -142,10 +143,11 @@ void set_feed_variable(const std::vector>& inputs) { } } +// Tensors in fetch value variable will only be in CPUPlace +// So we can memcpy the data from fetch_value to vector template std::vector> get_fetch_variable() { typedef std::vector FetchOutputs; - // Tensors in fetch value variable will only be in CPUPlace Variable* g_fetch_value = GetGlobalScope()->FindVar("fetch_value"); FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); @@ -159,6 +161,7 @@ std::vector> get_fetch_variable() { fetch_outputs[i].numel() * sizeof(T)); result.push_back(tmp); } + return result; } @@ -197,7 +200,7 @@ class ExecutorTesterRandom : public ::testing::Test { ProgramDesc pdesc_; }; -class ExecutorTesterFeed : public ::testing::Test { +class ExecutorTesterFeedAndFetch : public ::testing::Test { public: virtual void SetUp() override { auto root_block = pdesc_.add_blocks(); @@ -208,26 +211,8 @@ class ExecutorTesterFeed : public ::testing::Test { add_feed_op("a", dim, 0, root_block); add_feed_op("b", dim, 1, root_block); - - auto c = root_block->add_vars(); - c->set_name("c"); - auto c_lt = c->mutable_lod_tensor(); - c_lt->set_data_type(paddle::framework::DataType::FP32); - - auto op = root_block->add_ops(); - op->set_type("elementwise_add"); - auto X = op->add_inputs(); - X->set_parameter("X"); - X->add_arguments("a"); - auto Y = op->add_inputs(); - Y->set_parameter("Y"); - Y->add_arguments("b"); - auto Out = op->add_outputs(); - Out->set_parameter("Out"); - Out->add_arguments("c"); - add_fetch_op("a", dim, 0, root_block); - add_fetch_op("c", dim, 0, root_block); + add_fetch_op("b", dim, 1, root_block); std::vector vec1 = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; std::vector vec2 = {4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; @@ -255,6 +240,7 @@ TEST_F(ExecutorTesterRandom, CPU) { Executor* executor = new Executor(places); executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); + for (auto& vec : result) { for (auto& num : vec) { std::cout << num << " "; @@ -264,7 +250,7 @@ TEST_F(ExecutorTesterRandom, CPU) { delete executor; } -TEST_F(ExecutorTesterFeed, CPU) { +TEST_F(ExecutorTesterFeedAndFetch, CPU) { std::vector places; CPUPlace cpu_place; places.push_back(cpu_place); @@ -279,16 +265,15 @@ TEST_F(ExecutorTesterFeed, CPU) { // 3 mini-batch for (int i = 0; i < 3; i++) { - // need to set feed variable before Executor::Run - std::cout << "start mini-batch " << i << std::endl; set_feed_variable(inputs_); executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); - for (auto& vec : result) { - for (auto& num : vec) { - std::cout << num << " "; + PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); + for (size_t i = 0; i < result.size(); ++i) { + PADDLE_ENFORCE_EQ(result[i].size(), inputs_[i].size()); + for (size_t j = 0; j < result[i].size(); ++j) { + PADDLE_ENFORCE_EQ(result[i][j], inputs_[i][j]); } - std::cout << std::endl; } } @@ -314,7 +299,7 @@ TEST_F(ExecutorTesterRandom, GPU) { delete executor; } -TEST_F(ExecutorTesterFeed, GPU) { +TEST_F(ExecutorTesterFeedAndFetch, GPU) { std::vector places; GPUPlace gpu_place(0); places.push_back(gpu_place); @@ -331,16 +316,15 @@ TEST_F(ExecutorTesterFeed, GPU) { // 3 mini-batch for (int i = 0; i < 3; i++) { - // need to set feed variable before Executor::Run - std::cout << "start mini-batch " << i << std::endl; set_feed_variable(inputs_); executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); - for (auto& vec : result) { - for (auto& num : vec) { - std::cout << num << " "; + PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); + for (size_t i = 0; i < result.size(); ++i) { + PADDLE_ENFORCE_EQ(result[i].size(), inputs_[i].size()); + for (size_t j = 0; j < result[i].size(); ++j) { + PADDLE_ENFORCE_EQ(result[i][j], inputs_[i][j]); } - std::cout << std::endl; } } delete executor; From a67e8ea3eb8475a17f6285e5cfbe1bf231e0bd28 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 8 Oct 2017 04:49:10 +0000 Subject: [PATCH 258/342] Add AddOp --- paddle/framework/executor_test.cc | 147 +++++++++++++++++++++++++----- 1 file changed, 125 insertions(+), 22 deletions(-) diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index b198fa143c..cf1752f6d8 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -16,7 +16,9 @@ limitations under the License. */ #include #include "gtest/gtest.h" #include "paddle/framework/attribute.h" +#include "paddle/framework/block_desc.h" #include "paddle/framework/grad_op_builder.h" +#include "paddle/framework/op_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" @@ -24,6 +26,7 @@ USE_OP(elementwise_add); USE_OP(gaussian_random); USE_OP(feed); USE_OP(fetch); +USE_OP(mul); using std::string; using namespace paddle::platform; @@ -32,7 +35,71 @@ using namespace paddle::framework; typedef paddle::framework::BlockDesc proto_block; typedef paddle::framework::OpDesc proto_op; -void add_gaussian_random_op(string var_name, std::vector& dim, +struct SetAttrDescVisitor : public boost::static_visitor { + explicit SetAttrDescVisitor(OpDesc::Attr* attr) : attr_(attr) {} + mutable OpDesc::Attr* attr_; + void operator()(int v) const { attr_->set_i(v); } + void operator()(float v) const { attr_->set_f(v); } + void operator()(const std::string& v) const { attr_->set_s(v); } + void operator()(bool b) const { attr_->set_b(b); } + + void operator()(const std::vector& v) const { + VectorToRepeated(v, attr_->mutable_ints()); + } + void operator()(const std::vector& v) const { + VectorToRepeated(v, attr_->mutable_floats()); + } + void operator()(const std::vector& v) const { + VectorToRepeated(v, attr_->mutable_strings()); + } + void operator()(const std::vector& v) const { + VectorToRepeated(v, attr_->mutable_bools()); + } + void operator()(BlockDesc* desc) const { attr_->set_block_idx(desc->idx()); } + void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } +}; + +void AddOp(const std::string& type, const VariableNameMap& inputs, + const VariableNameMap& outputs, AttributeMap attrs, + proto_block* block) { + // insert output + for (auto kv : outputs) { + for (auto v : kv.second) { + auto var = block->add_vars(); + var->set_name(v); + auto var_lt = var->mutable_lod_tensor(); + var_lt->set_data_type(paddle::framework::DataType::FP32); + } + } + + // insert op + auto op = block->add_ops(); + op->set_type(type); + for (auto kv : inputs) { + auto X = op->add_inputs(); + X->set_parameter(kv.first); + for (auto argu : kv.second) { + X->add_arguments(argu); + } + } + for (auto kv : outputs) { + auto X = op->add_outputs(); + X->set_parameter(kv.first); + for (auto argu : kv.second) { + X->add_arguments(argu); + } + } + for (auto& attr : attrs) { + auto* attr_desc = op->add_attrs(); + attr_desc->set_name(attr.first); + attr_desc->set_type( + static_cast(attr.second.which() - 1)); + SetAttrDescVisitor visitor(attr_desc); + boost::apply_visitor(visitor, attr.second); + } +} + +void add_gaussian_random_op(string var_name, std::vector dim, proto_block* block) { // insert variable auto a = block->add_vars(); @@ -91,7 +158,7 @@ void add_feed_op(string var_name, std::vector& dim, int index, Out->add_arguments(var_name); } -void add_fetch_op(string var_name, std::vector& dim, int index, +void add_fetch_op(string var_name, std::vector dim, int index, proto_block* block) { // insert variable auto a = block->add_vars(); @@ -125,6 +192,28 @@ void add_fetch_op(string var_name, std::vector& dim, int index, Out->add_arguments(var_name); } +void add_mul_op(string X_str, string Y_str, string Out_str, + proto_block* block) { + // insert variable + auto a = block->add_vars(); + a->set_name(Out_str); + auto a_lt = a->mutable_lod_tensor(); + a_lt->set_data_type(paddle::framework::DataType::FP32); + + // insert op + auto op = block->add_ops(); + op->set_type("mul"); + auto X = op->add_inputs(); + X->set_parameter("X"); + X->add_arguments(X_str); + auto Y = op->add_inputs(); + Y->set_parameter("Y"); + Y->add_arguments(Y_str); + auto Out = op->add_outputs(); + Out->set_parameter("Out"); + Out->add_arguments(Out_str); +} + std::once_flag set_variable_flag; // Tensors in feed value variable will only be in CPUPlace @@ -168,36 +257,37 @@ std::vector> get_fetch_variable() { class ExecutorTesterRandom : public ::testing::Test { public: virtual void SetUp() override { + int input_dim = 5, batch_size = 2, embed_dim = 5; + + // init pdesc + auto init_root_block = init_pdesc_.add_blocks(); + init_root_block->set_idx(0); + init_root_block->set_parent_idx(-1); + AddOp("gaussian_random", {}, {{"Out", {"w1"}}}, + {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); + AddOp("gaussian_random", {}, {{"Out", {"w2"}}}, + {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); + AddOp("fetch", {{"Input", {"w1"}}}, {}, + {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); + AddOp("fetch", {{"Input", {"w2"}}}, {}, + {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); + + // run pdesc auto root_block = pdesc_.add_blocks(); root_block->set_idx(0); root_block->set_parent_idx(-1); - std::vector dim{2, 3}; - add_gaussian_random_op("a", dim, root_block); - add_gaussian_random_op("b", dim, root_block); + add_gaussian_random_op("a", {batch_size, input_dim}, root_block); - auto c = root_block->add_vars(); - c->set_name("c"); - auto c_lt = c->mutable_lod_tensor(); - c_lt->set_data_type(paddle::framework::DataType::FP32); + add_mul_op("a", "w1", "b", root_block); + add_mul_op("b", "w2", "a_out", root_block); - auto op = root_block->add_ops(); - op->set_type("elementwise_add"); - auto X = op->add_inputs(); - X->set_parameter("X"); - X->add_arguments("a"); - auto Y = op->add_inputs(); - Y->set_parameter("Y"); - Y->add_arguments("b"); - auto Out = op->add_outputs(); - Out->set_parameter("Out"); - Out->add_arguments("c"); - - add_fetch_op("c", dim, 0, root_block); + add_fetch_op("a_out", {input_dim, batch_size}, 0, root_block); } protected: ProgramDesc pdesc_; + ProgramDesc init_pdesc_; }; class ExecutorTesterFeedAndFetch : public ::testing::Test { @@ -238,6 +328,7 @@ TEST_F(ExecutorTesterRandom, CPU) { paddle::memory::Used(cpu_place); Executor* executor = new Executor(places); + executor->Run(init_pdesc_, GetGlobalScope()); executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); @@ -295,7 +386,19 @@ TEST_F(ExecutorTesterRandom, GPU) { paddle::memory::Used(gpu_place); Executor* executor = new Executor(places); + + LOG(INFO) << "Run Init"; + executor->Run(init_pdesc_, GetGlobalScope()); + LOG(INFO) << "Run"; executor->Run(pdesc_, GetGlobalScope()); + std::vector> result = get_fetch_variable(); + + for (auto& vec : result) { + for (auto& num : vec) { + std::cout << num << " "; + } + std::cout << std::endl; + } delete executor; } From c83ea1cdca1b751b93a1c63ea8fa58706131951b Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 8 Oct 2017 05:11:40 +0000 Subject: [PATCH 259/342] remove hardcode add_XX_op --- paddle/framework/executor_test.cc | 147 +++++------------------------- 1 file changed, 21 insertions(+), 126 deletions(-) diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index cf1752f6d8..e8ea09b77d 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -99,121 +99,6 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, } } -void add_gaussian_random_op(string var_name, std::vector dim, - proto_block* block) { - // insert variable - auto a = block->add_vars(); - a->set_name(var_name); - auto a_lt = a->mutable_lod_tensor(); - a_lt->set_data_type(paddle::framework::DataType::FP32); - for (int i : dim) { - a_lt->add_dims(i); - } - - // insert operation - auto op = block->add_ops(); - op->set_type("gaussian_random"); - auto dims = op->add_attrs(); - dims->set_name("dims"); - dims->set_type(paddle::framework::AttrType::INTS); - for (int i : dim) { - dims->add_ints(i); - } - auto Out = op->add_outputs(); - Out->set_parameter("Out"); - Out->add_arguments(var_name); -} - -void add_feed_op(string var_name, std::vector& dim, int index, - proto_block* block) { - // insert variable - auto a = block->add_vars(); - a->set_name(var_name); - auto a_lt = a->mutable_lod_tensor(); - a_lt->set_data_type(paddle::framework::DataType::FP32); - for (int i : dim) { - a_lt->add_dims(i); - } - - // insert operation - auto op = block->add_ops(); - op->set_type("feed"); - - // set dims attr - auto dims = op->add_attrs(); - dims->set_name("dims"); - dims->set_type(paddle::framework::AttrType::INTS); - for (int i : dim) { - dims->add_ints(i); - } - - // set col attr - auto col = op->add_attrs(); - col->set_name("col"); - col->set_type(paddle::framework::AttrType::INT); - col->set_i(index); - - auto Out = op->add_outputs(); - Out->set_parameter("Out"); - Out->add_arguments(var_name); -} - -void add_fetch_op(string var_name, std::vector dim, int index, - proto_block* block) { - // insert variable - auto a = block->add_vars(); - a->set_name(var_name); - auto a_lt = a->mutable_lod_tensor(); - a_lt->set_data_type(paddle::framework::DataType::FP32); - for (int i : dim) { - a_lt->add_dims(i); - } - - // insert operation - auto op = block->add_ops(); - op->set_type("fetch"); - - // set dims attr - auto dims = op->add_attrs(); - dims->set_name("dims"); - dims->set_type(paddle::framework::AttrType::INTS); - for (int i : dim) { - dims->add_ints(i); - } - - // set col attr - auto col = op->add_attrs(); - col->set_name("col"); - col->set_type(paddle::framework::AttrType::INT); - col->set_i(index); - - auto Out = op->add_inputs(); - Out->set_parameter("Input"); - Out->add_arguments(var_name); -} - -void add_mul_op(string X_str, string Y_str, string Out_str, - proto_block* block) { - // insert variable - auto a = block->add_vars(); - a->set_name(Out_str); - auto a_lt = a->mutable_lod_tensor(); - a_lt->set_data_type(paddle::framework::DataType::FP32); - - // insert op - auto op = block->add_ops(); - op->set_type("mul"); - auto X = op->add_inputs(); - X->set_parameter("X"); - X->add_arguments(X_str); - auto Y = op->add_inputs(); - Y->set_parameter("Y"); - Y->add_arguments(Y_str); - auto Out = op->add_outputs(); - Out->set_parameter("Out"); - Out->add_arguments(Out_str); -} - std::once_flag set_variable_flag; // Tensors in feed value variable will only be in CPUPlace @@ -268,21 +153,27 @@ class ExecutorTesterRandom : public ::testing::Test { AddOp("gaussian_random", {}, {{"Out", {"w2"}}}, {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); AddOp("fetch", {{"Input", {"w1"}}}, {}, - {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); + {{"dims", std::vector{input_dim, embed_dim}}, {"col", 0}}, + init_root_block); AddOp("fetch", {{"Input", {"w2"}}}, {}, - {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); + {{"dims", std::vector{embed_dim, input_dim}}, {"col", 1}}, + init_root_block); // run pdesc auto root_block = pdesc_.add_blocks(); root_block->set_idx(0); root_block->set_parent_idx(-1); - add_gaussian_random_op("a", {batch_size, input_dim}, root_block); - - add_mul_op("a", "w1", "b", root_block); - add_mul_op("b", "w2", "a_out", root_block); + AddOp("gaussian_random", {}, {{"Out", {"a"}}}, + {{"dims", std::vector{batch_size, input_dim}}}, root_block); + AddOp("mul", {{"X", {"a"}}, {"Y", {"w1"}}}, {{"Out", {"b"}}}, {}, + root_block); + AddOp("mul", {{"X", {"b"}}, {"Y", {"w2"}}}, {{"Out", {"a_out"}}}, {}, + root_block); - add_fetch_op("a_out", {input_dim, batch_size}, 0, root_block); + AddOp("fetch", {{"Input", {"a_out"}}}, {}, + {{"dims", std::vector{input_dim, batch_size}}, {"col", 1}}, + root_block); } protected: @@ -299,10 +190,14 @@ class ExecutorTesterFeedAndFetch : public ::testing::Test { std::vector dim{6}; - add_feed_op("a", dim, 0, root_block); - add_feed_op("b", dim, 1, root_block); - add_fetch_op("a", dim, 0, root_block); - add_fetch_op("b", dim, 1, root_block); + AddOp("feed", {}, {{"Out", {"a"}}}, {{"dims", dim}, {"col", 0}}, + root_block); + AddOp("feed", {}, {{"Out", {"b"}}}, {{"dims", dim}, {"col", 1}}, + root_block); + AddOp("fetch", {{"Input", {"a"}}}, {}, {{"dims", dim}, {"col", 0}}, + root_block); + AddOp("fetch", {{"Input", {"b"}}}, {}, {{"dims", dim}, {"col", 1}}, + root_block); std::vector vec1 = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; std::vector vec2 = {4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; From 6e7666f199ab1849e37c4f2e1e2570316dcf5c04 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sun, 8 Oct 2017 05:36:19 +0000 Subject: [PATCH 260/342] before backward --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/executor_test.cc | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index d8812d7743..7dc9d5c804 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -44,7 +44,7 @@ add_custom_command(TARGET framework_py_proto POST_BUILD cc_library(backward SRCS backward.cc DEPS net_op) cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) -cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto ${GLOB_OP_LIB}) +cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto backward ${GLOB_OP_LIB}) if(WITH_GPU) nv_test(executor_test SRCS executor_test.cc DEPS executor) else() diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index e8ea09b77d..7ce472ed2f 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include #include "gtest/gtest.h" #include "paddle/framework/attribute.h" +#include "paddle/framework/backward.h" #include "paddle/framework/block_desc.h" #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/op_desc.h" @@ -27,6 +28,7 @@ USE_OP(gaussian_random); USE_OP(feed); USE_OP(fetch); USE_OP(mul); +USE_OP(squared_l2_distance); using std::string; using namespace paddle::platform; @@ -170,10 +172,16 @@ class ExecutorTesterRandom : public ::testing::Test { root_block); AddOp("mul", {{"X", {"b"}}, {"Y", {"w2"}}}, {{"Out", {"a_out"}}}, {}, root_block); - - AddOp("fetch", {{"Input", {"a_out"}}}, {}, - {{"dims", std::vector{input_dim, batch_size}}, {"col", 1}}, + AddOp("squared_l2_distance", {{"X", {"a"}}, {"Y", {"a_out"}}}, + {{"Out", {"l2_distance"}}, {"sub_result", {"l2_distance_sub"}}}, {}, root_block); + + AppendBackward(pdesc_, {}); + // AddOp("fetch", {{"Input", {"sub_result"}}}, {}, + // {{"dims", std::vector{input_dim, batch_size}}, {"col", 0}}, + // root_block); + AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, + {{"dims", std::vector{batch_size}}, {"col", 1}}, root_block); } protected: From ba791f7b3f0b4f2b43f4391f7ccc10cdf7b0d06c Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 27 Sep 2017 23:16:35 +0800 Subject: [PATCH 261/342] Add vol2col functor and unit test --- paddle/operators/math/CMakeLists.txt | 7 +- paddle/operators/math/vol2col.cc | 155 +++++++++++++++++++ paddle/operators/math/vol2col.cu | 204 ++++++++++++++++++++++++++ paddle/operators/math/vol2col.h | 78 ++++++++++ paddle/operators/math/vol2col_test.cc | 156 ++++++++++++++++++++ 5 files changed, 597 insertions(+), 3 deletions(-) create mode 100644 paddle/operators/math/vol2col.cc create mode 100644 paddle/operators/math/vol2col.cu create mode 100644 paddle/operators/math/vol2col.h create mode 100644 paddle/operators/math/vol2col_test.cc diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 91ae3d49f1..176d357f2e 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,16 +1,17 @@ if(WITH_GPU) nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc - im2col.cu DEPS cblas device_context operator) + im2col.cu vol2col.cc vol2col.cu DEPS cblas device_context operator) nv_library(softmax_function SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy_function SRCS cross_entropy.cc cross_entropy.cu DEPS operator) else() - cc_library(math_function SRCS math_function.cc im2col.cc - DEPS cblas device_context operator) + cc_library(math_function SRCS math_function.cc im2col.cc vol2col.cc + DEPS cblas device_context operator) cc_library(softmax_function SRCS softmax.cc DEPS operator) cc_library(cross_entropy_function SRCS cross_entropy.cc DEPS operator) endif() nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) +cc_test(vol2col_test SRCS vol2col_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/vol2col.cc b/paddle/operators/math/vol2col.cc new file mode 100644 index 0000000000..5bad2e8073 --- /dev/null +++ b/paddle/operators/math/vol2col.cc @@ -0,0 +1,155 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/vol2col.h" + +namespace paddle { +namespace operators { +namespace math { + +/* + * vol = [input_channels, input_depth, input_height, input_width] + * col = + * [input_channels, filter_depth, filter_height, filter_width, + * output_depth, output_height, output_width] + */ +template +class Vol2ColFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& vol, framework::Tensor& col, + int stride_depth, int stride_height, int stride_width, + int padding_depth, int padding_height, + int padding_width) const { + PADDLE_ENFORCE(vol.dims().size() == 4); + PADDLE_ENFORCE(col.dims().size() == 7); + + int input_channels = vol.dims()[0]; + int input_depth = vol.dims()[1]; + int input_height = vol.dims()[2]; + int input_width = vol.dims()[3]; + int filter_depth = col.dims()[1]; + int filter_height = col.dims()[2]; + int filter_width = col.dims()[3]; + int output_depth = col.dims()[4]; + int output_height = col.dims()[5]; + int output_width = col.dims()[6]; + int channels_col = + input_channels * filter_depth * filter_height * filter_width; + + const T* vol_data = vol.data(); + T* col_data = col.data(); + + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % filter_width; + int h_offset = (c / filter_width) % filter_height; + int d_offset = (c / filter_width / filter_height) % filter_depth; + int c_in = c / filter_width / filter_height / filter_depth; + for (int d = 0; d < output_depth; ++d) { + int d_pad = d * stride_depth - padding_depth + d_offset; + for (int h = 0; h < output_height; ++h) { + int h_pad = h * stride_height - padding_height + h_offset; + for (int w = 0; w < output_width; ++w) { + int w_pad = w * stride_width - padding_width + w_offset; + + int col_idx = + ((c * output_depth + d) * output_height + h) * output_width + w; + if (h_pad < 0 || h_pad >= input_height || w_pad < 0 || + w_pad >= input_width || d_pad < 0 || d_pad >= input_depth) { + col_data[col_idx] = T(0); + } else { + int vol_idx = + ((c_in * input_depth + d_pad) * input_height + h_pad) * + input_width + + w_pad; + col_data[col_idx] = vol_data[vol_idx]; + } + } + } + } + } + } +}; + +/* + * vol = [input_channels,input_depth, input_height, input_width] + * col = + * [input_channels, filter_depth, filter_height, filter_width, + * output_depth, output_height, output_width] + */ +template +class Col2VolFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& vol, const framework::Tensor& col, + int stride_depth, int stride_height, int stride_width, + int padding_depth, int padding_height, + int padding_width) const { + PADDLE_ENFORCE(vol.dims().size() == 4); + PADDLE_ENFORCE(col.dims().size() == 7); + + int input_channels = vol.dims()[0]; + int input_depth = vol.dims()[1]; + int input_height = vol.dims()[2]; + int input_width = vol.dims()[3]; + int filter_depth = col.dims()[1]; + int filter_height = col.dims()[2]; + int filter_width = col.dims()[3]; + int output_depth = col.dims()[4]; + int output_height = col.dims()[5]; + int output_width = col.dims()[6]; + int channels_col = + input_channels * filter_depth * filter_height * filter_width; + + T* vol_data = vol.data(); + const T* col_data = col.data(); + + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % filter_width; + int h_offset = (c / filter_width) % filter_height; + int d_offset = (c / filter_width / filter_height) % filter_depth; + int cIm = c / filter_width / filter_height / filter_depth; + for (int d = 0; d < output_depth; ++d) { + int d_pad = d * stride_depth - padding_depth + d_offset; + for (int h = 0; h < output_height; ++h) { + int h_pad = h * stride_height - padding_height + h_offset; + for (int w = 0; w < output_width; ++w) { + int w_pad = w * stride_width - padding_width + w_offset; + + if (h_pad >= 0 && h_pad < input_height && w_pad >= 0 && + w_pad < input_width && d_pad >= 0 && d_pad < input_depth) { + int vol_idx = + ((cIm * input_depth + d_pad) * input_height + h_pad) * + input_width + + w_pad; + int col_idx = + ((c * output_depth + d) * output_height + h) * output_width + + w; + vol_data[vol_idx] += col_data[col_idx]; + } + } + } + } + } + } +}; + +template class Vol2ColFunctor; +template class Vol2ColFunctor; +template class Col2VolFunctor; +template class Col2VolFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/vol2col.cu b/paddle/operators/math/vol2col.cu new file mode 100644 index 0000000000..27b11fb237 --- /dev/null +++ b/paddle/operators/math/vol2col.cu @@ -0,0 +1,204 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/vol2col.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { + +template +__global__ void vol2col(int num_kernels, const T* data_vol, int depth, + int height, int width, int filter_depth, + int filter_height, int filter_width, int stride_depth, + int stride_height, int stride_width, int padding_depth, + int padding_height, int padding_width, int output_detph, + int output_height, int output_width, T* data_col) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; + index += blockDim.x * gridDim.x) { + int w_out = index % output_width; + int h_out = (index / output_width) % output_height; + int d_out = (index / output_width / output_height) % output_detph; + int channel_in = index / output_width / output_height / output_detph; + int channel_out = channel_in * filter_depth * filter_height * filter_width; + int w_in = w_out * stride_width - padding_width; + int h_in = h_out * stride_height - padding_height; + int d_in = d_out * stride_depth - padding_depth; + + data_col += ((channel_out * output_detph + d_out) * output_height + h_out) * + output_width + + w_out; + data_vol += ((channel_in * depth + d_in) * height + h_in) * width + w_in; + for (int k = 0; k < filter_depth; ++k) { + for (int i = 0; i < filter_height; ++i) { + for (int j = 0; j < filter_width; ++j) { + int d = d_in + k; + int h = h_in + i; + int w = w_in + j; + *data_col = (d >= 0 && d < depth && h >= 0 && h < height && w >= 0 && + w < width) + ? data_vol[(k * height + i) * width + j] + : 0; + data_col += output_detph * output_height * output_width; + } + } + } + } +} + +/* + * im = [input_channels,intpu_depth, input_height, input_width] + * col = + * [input_channels, filter_depth, filter_height, filter_width, + * output_depth, output_height, output_width] + */ +template +class Vol2ColFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& vol, framework::Tensor& col, + int stride_depth, int stride_height, int stride_width, + int padding_depth, int padding_height, + int padding_width) const { + PADDLE_ENFORCE(vol.dims().size() == 4); + PADDLE_ENFORCE(col.dims().size() == 7); + + int input_channels = vol.dims()[0]; + int input_depth = vol.dims()[1]; + int input_height = vol.dims()[2]; + int input_width = vol.dims()[3]; + int filter_depth = col.dims()[1]; + int filter_height = col.dims()[2]; + int filter_width = col.dims()[3]; + int output_depth = col.dims()[4]; + int output_height = col.dims()[5]; + int output_width = col.dims()[6]; + + int num_outputs = + input_channels * output_depth * output_height * output_width; + + const int threads = 1024; + const int blocks = (num_outputs + 1024 - 1) / 1024; + vol2col<<(context) + .stream()>>>( + num_outputs, vol.data(), input_depth, input_height, input_width, + filter_depth, filter_height, filter_width, stride_depth, stride_height, + stride_width, padding_depth, padding_height, padding_width, + output_depth, output_height, output_width, col.data()); + } +}; + +template +__global__ void col2vol(int num_kernels, const T* data_col, int depth, + int height, int width, int filter_depth, + int filter_height, int filter_width, int stride_depth, + int stride_height, int stride_width, int padding_depth, + int padding_height, int padding_width, int output_detph, + int output_height, int output_width, T* data_vol) { + for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels; + index += blockDim.x * gridDim.x) { + T src_val = 0; + int w = index % width + padding_width; + int h = (index / width) % height + padding_height; + int d = (index / width / height) % depth + padding_depth; + int c = index / width / height / depth; + // compute the start and end of the output + int w_col_start = + (w < filter_width) ? 0 : (w - filter_width) / stride_width + 1; + int w_col_end = min(w / stride_width + 1, output_width); + int h_col_start = + (h < filter_height) ? 0 : (h - filter_height) / stride_height + 1; + int h_col_end = min(h / stride_height + 1, output_height); + int d_col_start = + (d < filter_depth) ? 0 : (d - filter_depth) / stride_depth + 1; + int d_col_end = min(d / stride_depth + 1, output_detph); + + int offset = (c * filter_depth * filter_height * filter_width + + d * filter_width * filter_height + h * filter_width + w) * + output_detph * output_height * output_width; + + int coeff_d_col = + (1 - stride_depth * filter_width * filter_height * output_detph) * + output_height * output_width; + int coeff_h_col = + (1 - stride_height * filter_width * output_detph * output_height) * + output_width; + int coeff_w_col = + (1 - stride_width * output_detph * output_height * output_width); + + for (int d_col = d_col_start; d_col < d_col_end; ++d_col) { + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + src_val += data_col[offset + d_col * coeff_d_col + + h_col * coeff_h_col + w_col * coeff_w_col]; + } + } + } + data_vol[index] = src_val; + } +} + +/* + * im = [input_channels, input_depth, input_height, input_width] + * col = + * [input_channels, filter_depth, filter_height, filter_width, + * output_depth, output_height, output_width] + */ +template +class Col2VolFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& vol, const framework::Tensor& col, + int stride_depth, int stride_height, int stride_width, + int padding_depth, int padding_height, + int padding_width) const { + PADDLE_ENFORCE(vol.dims().size() == 4); + PADDLE_ENFORCE(col.dims().size() == 7); + + int input_channels = vol.dims()[0]; + int input_depth = vol.dims()[1]; + int input_height = vol.dims()[2]; + int input_width = vol.dims()[3]; + int filter_depth = col.dims()[1]; + int filter_height = col.dims()[2]; + int filter_width = col.dims()[3]; + int output_depth = col.dims()[4]; + int output_height = col.dims()[5]; + int output_width = col.dims()[6]; + + int num_kernels = input_channels * input_depth * input_height * input_width; + + const int threads = 1024; + const int blocks = (num_kernels + 1024 - 1) / 1024; + + col2vol<<(context) + .stream()>>>( + num_kernels, col.data(), input_depth, input_height, input_width, + filter_depth, filter_height, filter_width, stride_depth, stride_height, + stride_width, padding_depth, padding_height, padding_width, + output_depth, output_height, output_width, vol.data()); + } +}; + +template class Vol2ColFunctor; +template class Vol2ColFunctor; +template class Col2VolFunctor; +template class Col2VolFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/vol2col.h b/paddle/operators/math/vol2col.h new file mode 100644 index 0000000000..f022365a16 --- /dev/null +++ b/paddle/operators/math/vol2col.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/framework/tensor.h" +#include "paddle/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { +/* + * \brief Converts the feature data of four dimensions(CDHW) into a colData of + * seven dimensions in the Vol2ColFunctor calculation, + * And in the Col2VolFunctor calculation, it is reversed. + * + * \param volData Vol data. + * \param volShape The shape of volData, + * [input_channels, input_depth, input_height, input_width]. + * \param colData Column data. + * \param colShape The shape of colData. + * + * The shape of colData is: + * [input_channels, filter_depth, filter_height, filter_width, output_depth, + * output_height, output_width] + * So, it is easy to reshape into a convolution matrix for convolution + * calculation based on matrix multiplication. + * The shape of convolution matrix is [height, width], where the height is equal + * input_channels * filter_depth * filter_height * filter_width, and the width + * is equal output_depth * output_height * output_width. + * + * Reshape: + * shape of colData shape of convolution matrix + * [input_channels, + * filter_depth, + * filter_height, + * filter_width, ======> [height, width] + * output_depth, + * output_height, + * output_width] + * + * \note The caller needs to ensure that volShape.inputChannels is equal to + * colShape.inputChannels. + */ +template +class Vol2ColFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& vol, framework::Tensor& col, + int stride_depth, int stride_height, int stride_width, + int padding_depth, int padding_height, + int padding_width) const; +}; + +template +class Col2VolFunctor { + public: + void operator()(const platform::DeviceContext& context, + framework::Tensor& vol, const framework::Tensor& col, + int stride_depth, int stride_height, int stride_width, + int padding_depth, int padding_height, + int padding_width) const; +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/math/vol2col_test.cc b/paddle/operators/math/vol2col_test.cc new file mode 100644 index 0000000000..107a94511f --- /dev/null +++ b/paddle/operators/math/vol2col_test.cc @@ -0,0 +1,156 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/vol2col.h" +#include +#include + +template +void testVol2col() { + paddle::framework::Tensor input_tmp; + paddle::framework::Tensor input; + paddle::framework::Tensor output_cfo; + paddle::framework::Tensor output_ocf; + paddle::framework::Tensor output_tmp; + + auto* place = new Place(); + paddle::platform::DeviceContext* context; + if (paddle::platform::is_cpu_place(*place)) { + context = + new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); + } else { +#ifndef PADDLE_ONLY_CPU + context = + new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); +#else + PADDLE_THROW("no GPU support"); +#endif // PADDLE_ONLY_CPU + } + + /** + * input = [[0, 1, 2, + * 3, 4, 5] + * [6, 7, 8, + * 9, 10, 11]] + * + * output_cfo = [0, 1 + * 1, 2 + * 3, 4 + * 4, 5 + * 6, 7 + * 7, 8 + * 9, 10 + * 10, 11] + * + * col2vol = [[0, 2, 2, + * 3, 8, 5] + * [6, 14, 8, + * 9, 20, 11]] + * + */ + int input_depth = 2; + int input_height = 2; + int input_width = 3; + int filter_size = 2; + int stride = 1; + int padding = 0; + int output_depth = (input_depth - filter_size + 2 * padding) / stride + 1; + int output_height = (input_height - filter_size + 2 * padding) / stride + 1; + int output_width = (input_width - filter_size + 2 * padding) / stride + 1; + + // Vol2Col test + float* input_ptr = + input_tmp.mutable_data({1, input_depth, input_height, input_width}, + paddle::platform::CPUPlace()); + float arr[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + memcpy(input_ptr, arr, 12 * sizeof(float)); + + if (paddle::platform::is_cpu_place(*place)) { + input = input_tmp; + } else { + input.CopyFrom(input_tmp, *place); + } + output_cfo.mutable_data({1, filter_size, filter_size, filter_size, + output_depth, output_height, output_width}, + *place); + + paddle::operators::math::Vol2ColFunctor vol2col; + vol2col(*context, input, output_cfo, stride, stride, stride, padding, padding, + padding); + + float* out_cfo_ptr; + if (paddle::platform::is_cpu_place(*place)) { + out_cfo_ptr = output_cfo.data(); + } else { + output_tmp.CopyFrom(output_cfo, paddle::platform::CPUPlace()); + out_cfo_ptr = output_tmp.data(); + } + + EXPECT_EQ(out_cfo_ptr[0], 0); + EXPECT_EQ(out_cfo_ptr[1], 1); + EXPECT_EQ(out_cfo_ptr[2], 1); + EXPECT_EQ(out_cfo_ptr[3], 2); + EXPECT_EQ(out_cfo_ptr[4], 3); + EXPECT_EQ(out_cfo_ptr[5], 4); + EXPECT_EQ(out_cfo_ptr[6], 4); + EXPECT_EQ(out_cfo_ptr[7], 5); + EXPECT_EQ(out_cfo_ptr[8], 6); + EXPECT_EQ(out_cfo_ptr[9], 7); + EXPECT_EQ(out_cfo_ptr[10], 7); + EXPECT_EQ(out_cfo_ptr[11], 8); + EXPECT_EQ(out_cfo_ptr[12], 9); + EXPECT_EQ(out_cfo_ptr[13], 10); + EXPECT_EQ(out_cfo_ptr[14], 10); + EXPECT_EQ(out_cfo_ptr[15], 11); + + // Col2Vol test + memset(input_ptr, 0, 12 * sizeof(float)); + if (paddle::platform::is_cpu_place(*place)) { + input = input_tmp; + } else { + input.CopyFrom(input_tmp, *place); + } + + paddle::operators::math::Col2VolFunctor col2vol; + col2vol(*context, input, output_cfo, stride, stride, stride, padding, padding, + padding); + + float* in_cfo_ptr; + if (paddle::platform::is_cpu_place(*place)) { + in_cfo_ptr = input.data(); + } else { + input_tmp.CopyFrom(input, paddle::platform::CPUPlace()); + in_cfo_ptr = input_tmp.data(); + } + + EXPECT_EQ(in_cfo_ptr[0], 0); + EXPECT_EQ(in_cfo_ptr[1], 2); + EXPECT_EQ(in_cfo_ptr[2], 2); + EXPECT_EQ(in_cfo_ptr[3], 3); + EXPECT_EQ(in_cfo_ptr[4], 8); + EXPECT_EQ(in_cfo_ptr[5], 5); + EXPECT_EQ(in_cfo_ptr[6], 6); + EXPECT_EQ(in_cfo_ptr[7], 14); + EXPECT_EQ(in_cfo_ptr[8], 8); + EXPECT_EQ(in_cfo_ptr[9], 9); + EXPECT_EQ(in_cfo_ptr[10], 20); + EXPECT_EQ(in_cfo_ptr[11], 11); +} + +TEST(math, vol2col) { + testVol2col(); +#ifndef PADDLE_ONLY_CPU + testVol2col(); +#endif +} From adad8d9ed2cd722e6ac45b18596099b31fdb9929 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Mon, 9 Oct 2017 11:20:09 +0800 Subject: [PATCH 262/342] Open WITH_TESTING option. --- CMakeLists.txt | 4 -- paddle/capi/tests/CMakeLists.txt | 17 ++--- paddle/gserver/tests/CMakeLists.txt | 70 +++++++++++-------- paddle/gserver/tests/LayerGradUtil.h | 1 - paddle/gserver/tests/test_ActivationGrad.cpp | 1 - paddle/gserver/tests/test_BatchNorm.cpp | 1 - paddle/gserver/tests/test_CRFLayerGrad.cpp | 1 - paddle/gserver/tests/test_ConvTrans.cpp | 1 - paddle/gserver/tests/test_ConvUnify.cpp | 1 - .../tests/test_CrossEntropyOverBeamGrad.cpp | 1 - paddle/gserver/tests/test_KmaxSeqScore.cpp | 1 - paddle/gserver/tests/test_LayerGrad.cpp | 1 - .../gserver/tests/test_SelectiveFCLayer.cpp | 1 - .../gserver/tests/test_SeqSliceLayerGrad.cpp | 1 - 14 files changed, 48 insertions(+), 54 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7d549b864b..4783095194 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -94,10 +94,6 @@ if(ANDROID OR IOS) endif() set(MOBILE_INFERENCE ON) add_definitions(-DPADDLE_MOBILE_INFERENCE) - - # TODO: Need Open the WITH_TESTING - set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling - for Android and iOS" FORCE) endif() set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt index 8208808b94..bb38ace628 100644 --- a/paddle/capi/tests/CMakeLists.txt +++ b/paddle/capi/tests/CMakeLists.txt @@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH}) target_link_libraries(capi_test_mats paddle_capi) - -add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) -target_include_directories(capi_test_gradientMachine PUBLIC - ${PADDLE_CAPI_INC_PATH}) -target_link_libraries(capi_test_gradientMachine paddle_capi) -add_test(NAME capi_test_gradientMachine - COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests) +if(NOT MOBILE_INFERENCE) + add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp) + target_include_directories(capi_test_gradientMachine PUBLIC + ${PADDLE_CAPI_INC_PATH}) + target_link_libraries(capi_test_gradientMachine paddle_capi) + add_test(NAME capi_test_gradientMachine + COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests) +endif() diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index de9b8e63df..fcee19415c 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -1,15 +1,17 @@ # gserver pacakge unittests +if(NOT MOBILE_INFERENCE) ################### test_ProtoDataProvider ############ -add_unittest_without_exec(test_ProtoDataProvider - test_ProtoDataProvider.cpp) - -# test_ProtoDataProvider will mkdir as same name, -# so if WORKING_DIRECTORY is default directory, then -# mkdir will get error. -add_test(NAME test_ProtoDataProvider - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + add_unittest_without_exec(test_ProtoDataProvider + test_ProtoDataProvider.cpp) + + # test_ProtoDataProvider will mkdir as same name, + # so if WORKING_DIRECTORY is default directory, then + # mkdir will get error. + add_test(NAME test_ProtoDataProvider + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) +endif() ################# test_LayerGrad ####################### add_unittest_without_exec(test_LayerGrad @@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore add_test(NAME test_KmaxSeqScore COMMAND test_KmaxSeqScore) +if(NOT MOBILE_INFERENCE) ################## test_Evaluator ####################### -add_unittest(test_Evaluator - test_Evaluator.cpp) + add_unittest(test_Evaluator + test_Evaluator.cpp) +endif() ################ test_LinearChainCRF #################### add_simple_unittest(test_LinearChainCRF) @@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE) WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) endif() +if(NOT MOBILE_INFERENCE) ############### test_RecurrentGradientMachine ############### -# TODO(yuyang18): There is some bug in test_RecurrentGradientMachine -# I will fix it. -add_unittest_without_exec(test_RecurrentGradientMachine - test_RecurrentGradientMachine.cpp) -add_test(NAME test_RecurrentGradientMachine - COMMAND .set_python_path.sh -d - ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests - ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) - -add_unittest_without_exec(test_NetworkCompare - test_NetworkCompare.cpp) -if(WITH_GPU) - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) -else() - add_test(NAME test_NetworkCompare - COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false - WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + # TODO(yuyang18): There is some bug in test_RecurrentGradientMachine + # I will fix it. + add_unittest_without_exec(test_RecurrentGradientMachine + test_RecurrentGradientMachine.cpp) + add_test(NAME test_RecurrentGradientMachine + COMMAND .set_python_path.sh -d + ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests + ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) +endif() + +if(NOT MOBILE_INFERENCE) + add_unittest_without_exec(test_NetworkCompare + test_NetworkCompare.cpp) + if(WITH_GPU) + add_test(NAME test_NetworkCompare + COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + else() + add_test(NAME test_NetworkCompare + COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle) + endif() endif() diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 88e831f78b..e10a27eedf 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -15,7 +15,6 @@ limitations under the License. */ #pragma once #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/testing/TestUtil.h" using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index de93972a58..f4c2a07c44 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 659eefa31b..38bcbb880d 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -17,7 +17,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp index df14449291..f010066ebc 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -16,7 +16,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/gserver/layers/LinearChainCRF.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 6035a866b4..5f2f966547 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index e7325e0cc3..bcc10a6197 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp index c922237d33..477638426f 100644 --- a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp +++ b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp index 308abe6816..483e382f6d 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "LayerGradUtil.h" diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 090bde7b20..876a935fb0 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -21,7 +21,6 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" #include "paddle/math/MathUtils.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index ab23d00a2c..1da935dfc9 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -24,7 +24,6 @@ limitations under the License. */ #include "paddle/gserver/layers/Layer.h" #include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h" #include "paddle/math/CpuSparseMatrix.h" -#include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp index e1d4ae1617..8e04ccd16a 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -15,7 +15,6 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/gserver/layers/DataLayer.h" -#include "paddle/trainer/Trainer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" From 089cc11df48c8b29b34eda8ea19328a090d4c9f6 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Mon, 9 Oct 2017 03:30:53 +0000 Subject: [PATCH 263/342] clean up && fix #4624 --- paddle/framework/block_desc.cc | 6 ++ paddle/framework/executor.cc | 37 +++------ paddle/framework/executor_test.cc | 129 ++++++++++++------------------ 3 files changed, 68 insertions(+), 104 deletions(-) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 01f50e1393..509aa235d3 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -74,6 +74,12 @@ void BlockDescBind::Sync() { for (auto &op_desc : ops_) { op_field.AddAllocated(op_desc->Proto()); } + auto &var_field = *this->desc_->mutable_vars(); + var_field.Clear(); + var_field.Reserve(static_cast(vars_.size())); + for (auto &var_desc : vars_) { + var_field.AddAllocated(var_desc.second->Proto()); + } need_update_ = false; } } diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 9391e18ded..c6c9d13469 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -54,39 +54,33 @@ Executor::~Executor() { void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { // TODO(tonyyang-svail): - // - only runs the first block - // - only runs on the first device - // - test on gpu + // - only runs the first block (i.e. no RNN support) + // - only runs on the first device (i.e. no interdevice communication) auto& block = pdesc.blocks(0); auto& device = device_contexts_[0]; - // TODO(tonyyang-svail): - // - runs on a new local scope - // Scope& local_scope = scope->NewScope(); - + // Instantiate all the vars in the global scope for (auto& var : block.vars()) { scope->NewVar(var.name()); } + Scope& local_scope = scope->NewScope(); + std::vector should_run = Preprocess(pdesc); PADDLE_ENFORCE(should_run.size() == block.ops_size()); for (size_t i = 0; i < should_run.size(); ++i) { if (should_run[i]) { + for (auto var : block.ops(i).outputs()) { + for (auto argu : var.arguments()) { + if (local_scope.FindVar(argu) == nullptr) { + local_scope.NewVar(argu); + } + } + } auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); - op->Run(*scope, *device); + op->Run(local_scope, *device); } } - - // // print tensor value - // for (auto& var : block.vars()) { - // std::cout << var.name() << std::endl; - // auto v = scope->FindVar(var.name()); - // const LoDTensor& t = v->Get(); - // for (int i = 0; i < t.numel(); ++i) { - // std::cout << t.data()[i] << " "; - // } - // std::cout << std::endl; - // } } std::vector Executor::Preprocess(const ProgramDesc& pdesc) { @@ -125,7 +119,6 @@ std::vector Executor::Preprocess(const ProgramDesc& pdesc) { } } - // TODO(tonyyang-svail): add VLOG here for debugging if (op_desc.type() == "fetch" || found_dependent_vars) { // erase its output to the dependency graph for (auto& var : op_desc.outputs()) { @@ -141,13 +134,9 @@ std::vector Executor::Preprocess(const ProgramDesc& pdesc) { } } - // this op should be executed should_run.push_back(true); - LOG(INFO) << "Yes " << op_desc.type(); } else { - // this op should NOT be executed should_run.push_back(false); - LOG(INFO) << "No " << op_desc.type(); } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 7ce472ed2f..99f80d04e8 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/framework/attribute.h" #include "paddle/framework/backward.h" #include "paddle/framework/block_desc.h" -#include "paddle/framework/grad_op_builder.h" +// #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/op_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" @@ -37,68 +37,27 @@ using namespace paddle::framework; typedef paddle::framework::BlockDesc proto_block; typedef paddle::framework::OpDesc proto_op; -struct SetAttrDescVisitor : public boost::static_visitor { - explicit SetAttrDescVisitor(OpDesc::Attr* attr) : attr_(attr) {} - mutable OpDesc::Attr* attr_; - void operator()(int v) const { attr_->set_i(v); } - void operator()(float v) const { attr_->set_f(v); } - void operator()(const std::string& v) const { attr_->set_s(v); } - void operator()(bool b) const { attr_->set_b(b); } - - void operator()(const std::vector& v) const { - VectorToRepeated(v, attr_->mutable_ints()); - } - void operator()(const std::vector& v) const { - VectorToRepeated(v, attr_->mutable_floats()); - } - void operator()(const std::vector& v) const { - VectorToRepeated(v, attr_->mutable_strings()); - } - void operator()(const std::vector& v) const { - VectorToRepeated(v, attr_->mutable_bools()); - } - void operator()(BlockDesc* desc) const { attr_->set_block_idx(desc->idx()); } - void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } -}; - void AddOp(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, AttributeMap attrs, - proto_block* block) { + paddle::framework::BlockDescBind* block) { // insert output for (auto kv : outputs) { for (auto v : kv.second) { - auto var = block->add_vars(); - var->set_name(v); - auto var_lt = var->mutable_lod_tensor(); - var_lt->set_data_type(paddle::framework::DataType::FP32); + auto var = block->NewVar(v); + var->SetDataType(paddle::framework::DataType::FP32); } } // insert op - auto op = block->add_ops(); - op->set_type(type); + auto op = block->AppendOp(); + op->SetType(type); for (auto kv : inputs) { - auto X = op->add_inputs(); - X->set_parameter(kv.first); - for (auto argu : kv.second) { - X->add_arguments(argu); - } + op->SetInput(kv.first, kv.second); } for (auto kv : outputs) { - auto X = op->add_outputs(); - X->set_parameter(kv.first); - for (auto argu : kv.second) { - X->add_arguments(argu); - } - } - for (auto& attr : attrs) { - auto* attr_desc = op->add_attrs(); - attr_desc->set_name(attr.first); - attr_desc->set_type( - static_cast(attr.second.which() - 1)); - SetAttrDescVisitor visitor(attr_desc); - boost::apply_visitor(visitor, attr.second); + op->SetOutput(kv.first, kv.second); } + op->SetAttrMap(attrs); } std::once_flag set_variable_flag; @@ -146,10 +105,16 @@ class ExecutorTesterRandom : public ::testing::Test { virtual void SetUp() override { int input_dim = 5, batch_size = 2, embed_dim = 5; - // init pdesc - auto init_root_block = init_pdesc_.add_blocks(); - init_root_block->set_idx(0); - init_root_block->set_parent_idx(-1); + // init pdesc ----------------------------------------- + auto temp_init_root_block = init_pdesc_.add_blocks(); + temp_init_root_block->set_idx(0); + temp_init_root_block->set_parent_idx(-1); + + // wrap to BlockDescBind + paddle::framework::ProgramDescBind& init_program = + paddle::framework::ProgramDescBind::Instance(&init_pdesc_); + paddle::framework::BlockDescBind* init_root_block = init_program.Block(0); + AddOp("gaussian_random", {}, {{"Out", {"w1"}}}, {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); AddOp("gaussian_random", {}, {{"Out", {"w2"}}}, @@ -160,11 +125,18 @@ class ExecutorTesterRandom : public ::testing::Test { AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"dims", std::vector{embed_dim, input_dim}}, {"col", 1}}, init_root_block); + // flush + init_program.Proto(); + + // run pdesc ----------------------------------------- + auto temp_root_block = pdesc_.add_blocks(); + temp_root_block->set_idx(0); + temp_root_block->set_parent_idx(-1); - // run pdesc - auto root_block = pdesc_.add_blocks(); - root_block->set_idx(0); - root_block->set_parent_idx(-1); + // wrap to BlockDescBind + paddle::framework::ProgramDescBind& program = + paddle::framework::ProgramDescBind::Instance(&pdesc_); + paddle::framework::BlockDescBind* root_block = program.Block(0); AddOp("gaussian_random", {}, {{"Out", {"a"}}}, {{"dims", std::vector{batch_size, input_dim}}}, root_block); @@ -175,13 +147,16 @@ class ExecutorTesterRandom : public ::testing::Test { AddOp("squared_l2_distance", {{"X", {"a"}}, {"Y", {"a_out"}}}, {{"Out", {"l2_distance"}}, {"sub_result", {"l2_distance_sub"}}}, {}, root_block); - - AppendBackward(pdesc_, {}); - // AddOp("fetch", {{"Input", {"sub_result"}}}, {}, - // {{"dims", std::vector{input_dim, batch_size}}, {"col", 0}}, - // root_block); AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, {{"dims", std::vector{batch_size}}, {"col", 1}}, root_block); + // flush + program.Proto(); + + // TODO(tonyyang-svail): + // - Test with Backward + // AddOp("gaussian_random", {}, {{"Out", {"l2_distance@GRAD"}}}, + // {{"dims", std::vector{batch_size, 1}}}, root_block); + // AppendBackward(program, {}); } protected: @@ -192,9 +167,14 @@ class ExecutorTesterRandom : public ::testing::Test { class ExecutorTesterFeedAndFetch : public ::testing::Test { public: virtual void SetUp() override { - auto root_block = pdesc_.add_blocks(); - root_block->set_idx(0); - root_block->set_parent_idx(-1); + auto temp_root_block = pdesc_.add_blocks(); + temp_root_block->set_idx(0); + temp_root_block->set_parent_idx(-1); + + // wrap to BlockDescBind + paddle::framework::ProgramDescBind& program = + paddle::framework::ProgramDescBind::Instance(&pdesc_); + paddle::framework::BlockDescBind* root_block = program.Block(0); std::vector dim{6}; @@ -207,6 +187,9 @@ class ExecutorTesterFeedAndFetch : public ::testing::Test { AddOp("fetch", {{"Input", {"b"}}}, {}, {{"dims", dim}, {"col", 1}}, root_block); + // flush + program.Proto(); + std::vector vec1 = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; std::vector vec2 = {4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; inputs_.push_back(vec1); @@ -235,12 +218,6 @@ TEST_F(ExecutorTesterRandom, CPU) { executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); - for (auto& vec : result) { - for (auto& num : vec) { - std::cout << num << " "; - } - std::cout << std::endl; - } delete executor; } @@ -290,18 +267,10 @@ TEST_F(ExecutorTesterRandom, GPU) { Executor* executor = new Executor(places); - LOG(INFO) << "Run Init"; executor->Run(init_pdesc_, GetGlobalScope()); - LOG(INFO) << "Run"; executor->Run(pdesc_, GetGlobalScope()); std::vector> result = get_fetch_variable(); - for (auto& vec : result) { - for (auto& num : vec) { - std::cout << num << " "; - } - std::cout << std::endl; - } delete executor; } From 5b862fedf1feb78b7dc63451e5219cef2fde33a3 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 9 Oct 2017 11:31:13 +0800 Subject: [PATCH 264/342] remove debug log in interp_op.cc --- paddle/operators/interp_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/operators/interp_op.cc b/paddle/operators/interp_op.cc index 04bcb9ade8..fc8b9a11b8 100644 --- a/paddle/operators/interp_op.cc +++ b/paddle/operators/interp_op.cc @@ -58,7 +58,6 @@ class InterpOp : public NetOp { {{"Out", {Output("Out")}}}, {})); CompleteAddOp(false); - LOG(INFO) << DebugString(); } }; From 8f2c48cf4c46b3b3d1b58fb8fec13a9c95d2327c Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Mon, 9 Oct 2017 11:32:17 +0800 Subject: [PATCH 265/342] Fix Layer.cpp --- paddle/gserver/layers/Layer.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index 075e8166ef..01f2aae6cf 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -15,11 +15,14 @@ limitations under the License. */ #include "paddle/utils/Util.h" #include "CostLayer.h" -#include "ValidationLayer.h" #include "paddle/math/SparseMatrix.h" #include "paddle/utils/Error.h" #include "paddle/utils/Logging.h" +#ifndef PADDLE_MOBILE_INFERENCE +#include "ValidationLayer.h" +#endif + DEFINE_bool(log_error_clipping, false, "enable log error clipping or not"); namespace paddle { From 707d144c93aa6053cd02c58bc92bf1d7306c95c3 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 9 Oct 2017 14:45:01 +0800 Subject: [PATCH 266/342] Unify Reduce functions and simplify register code --- paddle/operators/activation_op.cc | 8 +++---- paddle/operators/activation_op.cu | 10 ++++----- paddle/operators/reduce_op.cc | 34 +++++++++-------------------- paddle/operators/reduce_op.cu | 36 ++++++++----------------------- paddle/operators/reduce_op.h | 6 ++++++ 5 files changed, 33 insertions(+), 61 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 66e9d2c401..2afa8a68b0 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -285,11 +285,9 @@ REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad, #define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ REGISTER_OP_CPU_KERNEL( \ act_type, \ - paddle::operators::ActivationKernel>); \ + ops::ActivationKernel>); \ REGISTER_OP_CPU_KERNEL(act_type##_grad, \ - paddle::operators::ActivationGradKernel< \ - paddle::platform::CPUPlace, \ - paddle::operators::grad_functor>); + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL); diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu index 93e9f1c694..7b7644519d 100644 --- a/paddle/operators/activation_op.cu +++ b/paddle/operators/activation_op.cu @@ -15,14 +15,14 @@ #define EIGEN_USE_GPU #include "paddle/operators/activation_op.h" +namespace ops = paddle::operators; + #define REGISTER_ACTIVATION_GPU_KERNEL(act_type, functor, grad_functor) \ REGISTER_OP_GPU_KERNEL( \ act_type, \ - paddle::operators::ActivationKernel>); \ + ops::ActivationKernel>); \ REGISTER_OP_GPU_KERNEL(act_type##_grad, \ - paddle::operators::ActivationGradKernel< \ - paddle::platform::GPUPlace, \ - paddle::operators::grad_functor>); + ops::ActivationGradKernel>); FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_GPU_KERNEL); diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 3ef443d1c7..87f66e1e93 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -168,36 +168,22 @@ namespace ops = paddle::operators; REGISTER_OP(reduce_sum, ops::ReduceOp, ops::ReduceSumOpMaker, reduce_sum_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_sum, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_sum_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, reduce_mean_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_mean, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_mean_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_max, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_max_grad, - ops::ReduceGradKernel); REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad, ops::ReduceGradOp); -REGISTER_OP_CPU_KERNEL( - reduce_min, - ops::ReduceKernel); -REGISTER_OP_CPU_KERNEL(reduce_min_grad, - ops::ReduceGradKernel); + +#define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_CPU_KERNEL( \ + reduce_type, \ + ops::ReduceKernel); \ + REGISTER_OP_CPU_KERNEL(reduce_type##_grad, \ + ops::ReduceGradKernel); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_CPU_KERNEL); diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index 595127b858..d306e1a240 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -17,30 +17,12 @@ namespace ops = paddle::operators; -REGISTER_OP_GPU_KERNEL( - reduce_sum, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_sum_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_mean, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_mean_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_max, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_max_grad, - ops::ReduceGradKernel); - -REGISTER_OP_GPU_KERNEL( - reduce_min, - ops::ReduceKernel); -REGISTER_OP_GPU_KERNEL(reduce_min_grad, - ops::ReduceGradKernel); +#define REGISTER_REDUCE_GPU_KERNEL(reduce_type, functor, grad_functor) \ + REGISTER_OP_GPU_KERNEL( \ + reduce_type, \ + ops::ReduceKernel); \ + REGISTER_OP_GPU_KERNEL(reduce_type##_grad, \ + ops::ReduceGradKernel); + +FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_GPU_KERNEL); diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index ba3f3db81d..45043c440b 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -198,3 +198,9 @@ class ReduceGradKernel : public framework::OpKernel { } // namespace operators } // namespace paddle + +#define FOR_EACH_KERNEL_FUNCTOR(__macro) \ + __macro(reduce_sum, SumFunctor, SumGradFunctor); \ + __macro(reduce_mean, MeanFunctor, MeanGradFunctor); \ + __macro(reduce_max, MaxFunctor, MaxOrMinGradFunctor); \ + __macro(reduce_min, MinFunctor, MaxOrMinGradFunctor); From 3f874143fe62062607f341f2559840fc23f4bbd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Mon, 9 Oct 2017 14:55:03 +0800 Subject: [PATCH 267/342] fix grad debug event (#4536) --- python/paddle/v2/event.py | 14 +++++++++++++- python/paddle/v2/trainer.py | 9 +++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index e66bf67d79..a0ffd31c54 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -10,7 +10,8 @@ There are: * EndPass """ __all__ = [ - 'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult' + 'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult', + 'EndForwardBackward' ] @@ -73,6 +74,17 @@ class BeginIteration(object): self.batch_id = batch_id +class EndForwardBackward(object): + """ + Event On One Batch ForwardBackward Complete. + """ + + def __init__(self, pass_id, batch_id, gm): + self.pass_id = pass_id + self.batch_id = batch_id + self.gm = gm + + class EndIteration(WithMetric): """ Event On One Batch Training Complete. diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index ca95ef13bd..076e755939 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -164,11 +164,18 @@ class SGD(object): pass_type) self.__gradient_machine__.eval(pass_evaluator) self.__gradient_machine__.eval(batch_evaluator) + event_handler( + v2_event.EndForwardBackward( + pass_id=pass_id, + batch_id=batch_id, + gm=self.__gradient_machine__)) for each_param in self.__gradient_machine__.getNonStaticParameters( ): self.__parameter_updater__.update(each_param) cost_sum = out_args.sum() cost = cost_sum / len(data_batch) + self.__parameter_updater__.finishBatch(cost) + batch_evaluator.finish() event_handler( v2_event.EndIteration( pass_id=pass_id, @@ -176,8 +183,6 @@ class SGD(object): cost=cost, evaluator=batch_evaluator, gm=self.__gradient_machine__)) - self.__parameter_updater__.finishBatch(cost) - batch_evaluator.finish() self.__parameter_updater__.finishPass() pass_evaluator.finish() From fcfce48421650f983b484af9fe20d2e843dc042b Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 9 Oct 2017 19:02:24 +0800 Subject: [PATCH 268/342] follow coments --- paddle/operators/CMakeLists.txt | 3 +- paddle/operators/math/pooling.h | 42 +++++++++++++++++-- paddle/operators/pool_with_index_op.cc | 20 ++++----- paddle/operators/pool_with_index_op.cu | 8 ++-- .../v2/framework/tests/test_pool_max_op.py | 21 +++++----- 5 files changed, 65 insertions(+), 29 deletions(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 49da132049..39af318ca5 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -75,10 +75,11 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n") endif() + # pool_with_index_op contains several operators if ("${TARGET}" STREQUAL "pool_with_index_op") set(pybind_flag 1) # It's enough to just adding one operator to pybind - file(APPEND ${pybind_file} "USE_OP(maxPool2dWithIndex);\n") + file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n") endif() # pybind USE_NO_KERNEL_OP diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index d819e5986e..f15ddca69a 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -21,15 +21,26 @@ limitations under the License. */ namespace paddle { namespace operators { namespace math { -////////////////////// -#define FLT_MAX __FLT_MAX__ // +#define FLT_MAX \ + __FLT_MAX__ // It might need to be placed in another file, but I'm still + // wondering where to put it + +/* + * \brief Extracting simple operations from pooling. + * Both MaxPool and AvgPool need initial, compute and finalize operation. + * MaxPool initializes temp variable to the negative maximum to find the + * maximum value in the pooling field. + * AvgPool initializes temp variable to the zero to accumulate all values + * in pool pooling, and takes the average. + * MaxPoolGrad and AvgPoolGrad are gradient operations respectively. + */ template class MaxPool { public: DEVICE inline T initial() { return static_cast(-FLT_MAX); } DEVICE inline void compute(T& y, const T& x) { y = y > x ? y : x; } - DEVICE inline void finalize(T& y, const T& poo_size) {} + DEVICE inline void finalize(T& y, const T& pool_field) {} }; template @@ -37,8 +48,9 @@ class AvgPool { public: DEVICE inline T initial() { return static_cast(0); } DEVICE inline void compute(T& y, const T& x) { y += x; } - DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; } + DEVICE inline void finalize(T& y, const T& pool_field) { y /= pool_field; } }; + template class MaxPoolGrad { public: @@ -57,6 +69,20 @@ class AvgPoolGrad { } }; +/* + * \brief Getting pooling results, and calculating gradient. + * + * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in + * NCDHW format. + * + * In max pooling, it is possible that the pooling region has multiple maximum + * elements. + * In this case, we should compute the gradient of the first maximum element. + * This is different from average pooling. So we rewrite the max_pool_grad: + * MaxPool2dGradFunctor, MaxPool3dGradFunctor. + * + */ + template class Pool2dFunctor { public: @@ -117,6 +143,14 @@ class MaxPool3dGradFunctor { std::vector& strides, std::vector& paddings); }; +/* + * \brief Getting max pooling results and corresponding max index, and + * calculating gradient. + * In sub-sampling-pooling, it is necessary to know max element index. + * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in + * NCDHW format. + * + */ template class MaxPool2dWithIndexFunctor { public: diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index c51145b923..2e6a5f2555 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -17,8 +17,8 @@ limitations under the License. */ namespace paddle { namespace operators { -int OutputSizeMaxPool(int input_size, int filter_size, int padding, - int stride) { +inline int OutputSizeMaxPool(int input_size, int filter_size, int padding, + int stride) { int output_size = (input_size - filter_size + 2 * padding) / stride + 1; return output_size; } @@ -194,24 +194,24 @@ the input and ksize, strides, paddings parameters. namespace ops = paddle::operators; -REGISTER_OP(maxPool2dWithIndex, ops::MaxPoolWithIndexOp, - ops::MaxPool2dWithIndexOpMaker, maxPool2dWithIndex_grad, +REGISTER_OP(max_pool2d_with_index, ops::MaxPoolWithIndexOp, + ops::MaxPool2dWithIndexOpMaker, max_pool2d_with_index_grad, ops::MaxPoolWithIndexOpGrad); REGISTER_OP_CPU_KERNEL( - maxPool2dWithIndex, + max_pool2d_with_index, ops::MaxPoolWithIndexKernel); REGISTER_OP_CPU_KERNEL( - maxPool2dWithIndex_grad, + max_pool2d_with_index_grad, ops::MaxPoolWithIndexGradKernel) -REGISTER_OP(maxPool3dWithIndex, ops::MaxPoolWithIndexOp, - ops::MaxPool3dWithIndexOpMaker, maxPool3dWithIndex_grad, +REGISTER_OP(max_pool3d_with_index, ops::MaxPoolWithIndexOp, + ops::MaxPool3dWithIndexOpMaker, max_pool3d_with_index_grad, ops::MaxPoolWithIndexOpGrad); REGISTER_OP_CPU_KERNEL( - maxPool3dWithIndex, + max_pool3d_with_index, ops::MaxPoolWithIndexKernel); REGISTER_OP_CPU_KERNEL( - maxPool3dWithIndex_grad, + max_pool3d_with_index_grad, ops::MaxPoolWithIndexGradKernel) diff --git a/paddle/operators/pool_with_index_op.cu b/paddle/operators/pool_with_index_op.cu index 8007fc7ccf..287657d4b1 100644 --- a/paddle/operators/pool_with_index_op.cu +++ b/paddle/operators/pool_with_index_op.cu @@ -17,15 +17,15 @@ limitations under the License. */ namespace ops = paddle::operators; REGISTER_OP_GPU_KERNEL( - maxPool2dWithIndex, + max_pool2d_with_index, ops::MaxPoolWithIndexKernel); REGISTER_OP_GPU_KERNEL( - maxPool2dWithIndex_grad, + max_pool2d_with_index_grad, ops::MaxPoolWithIndexGradKernel) REGISTER_OP_GPU_KERNEL( - maxPool3dWithIndex, + max_pool3d_with_index, ops::MaxPoolWithIndexKernel); REGISTER_OP_GPU_KERNEL( - maxPool3dWithIndex_grad, + max_pool3d_with_index_grad, ops::MaxPoolWithIndexGradKernel) diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py index 17028c3bf6..f0f8aa6089 100644 --- a/python/paddle/v2/framework/tests/test_pool_max_op.py +++ b/python/paddle/v2/framework/tests/test_pool_max_op.py @@ -100,7 +100,8 @@ class TestMaxPoolWithIndex_Op(OpTest): def initTestCase(self): self.global_pool = True - self.op_type = "maxPool3dWithIndex" + self.index = "max_pool3d_with_index" + self.op_type = "%s" % self.index self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] @@ -111,7 +112,7 @@ class TestMaxPoolWithIndex_Op(OpTest): class TestCase1(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = True - self.op_type = "maxPool3dWithIndex" + self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] @@ -122,7 +123,7 @@ class TestCase1(TestMaxPoolWithIndex_Op): class TestCase2(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = False - self.op_type = "maxPool3dWithIndex" + self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] @@ -133,7 +134,7 @@ class TestCase2(TestMaxPoolWithIndex_Op): class TestCase3(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = False - self.op_type = "maxPool3dWithIndex" + self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 7, 7, 7] self.ksize = [3, 3, 3] @@ -144,7 +145,7 @@ class TestCase3(TestMaxPoolWithIndex_Op): class TestCase4(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = True - self.op_type = "maxPool3dWithIndex" + self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] @@ -155,7 +156,7 @@ class TestCase4(TestMaxPoolWithIndex_Op): class TestCase5(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = True - self.op_type = "maxPool3dWithIndex" + self.op_type = "max_pool3d_with_index" self.pool_forward_naive = max_pool3D_forward_naive self.shape = [2, 3, 5, 5, 5] self.ksize = [3, 3, 3] @@ -166,7 +167,7 @@ class TestCase5(TestMaxPoolWithIndex_Op): class TestCase6(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = False - self.op_type = "maxPool2dWithIndex" + self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -177,7 +178,7 @@ class TestCase6(TestMaxPoolWithIndex_Op): class TestCase7(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = False - self.op_type = "maxPool2dWithIndex" + self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 7, 7] self.ksize = [3, 3] @@ -188,7 +189,7 @@ class TestCase7(TestMaxPoolWithIndex_Op): class TestCase8(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = True - self.op_type = "maxPool2dWithIndex" + self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 5, 5] self.ksize = [3, 3] @@ -199,7 +200,7 @@ class TestCase8(TestMaxPoolWithIndex_Op): class TestCase9(TestMaxPoolWithIndex_Op): def initTestCase(self): self.global_pool = True - self.op_type = "maxPool2dWithIndex" + self.op_type = "max_pool2d_with_index" self.pool_forward_naive = max_pool2D_forward_naive self.shape = [2, 3, 5, 5] self.ksize = [3, 3] From a06f099d9f54b47ce4df7d1ae32c928fb8d7593e Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 9 Oct 2017 16:34:05 +0800 Subject: [PATCH 269/342] refine comment of interp_op --- paddle/operators/interp_op.cc | 43 +++++++++++-------- .../v2/framework/tests/test_interp_op.py | 6 +-- 2 files changed, 28 insertions(+), 21 deletions(-) diff --git a/paddle/operators/interp_op.cc b/paddle/operators/interp_op.cc index fc8b9a11b8..d02b01c3f3 100644 --- a/paddle/operators/interp_op.cc +++ b/paddle/operators/interp_op.cc @@ -30,27 +30,26 @@ class InterpOp : public NetOp { "Input(Y) of InterpOp should not be null."); PADDLE_ENFORCE_NE(Input("W"), framework::kEmptyVarName, "Input(W) of InterpOp should not be null."); - PADDLE_ENFORCE_NE(Output("MinusOut"), framework::kEmptyVarName, - "Output(MinusOut) of InterpOp should not be null."); + PADDLE_ENFORCE_NE(Output("SubOut"), framework::kEmptyVarName, + "Output(SubOut) of InterpOp should not be null."); PADDLE_ENFORCE_NE(Output("MulOut"), framework::kEmptyVarName, "Output(MulOut) of InterpOp should not be null."); PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName, "Output(Out) of InterpOp should not be null."); - // MinusOut = X - Y + // SubOut = X - Y auto x = Input("X"); auto y = Input("Y"); - auto minus_out = Output("MinusOut"); - AppendOp(framework::OpRegistry::CreateOp("elementwise_sub", - {{"X", {x}}, {"Y", {y}}}, - {{"Out", {minus_out}}}, {})); + auto sub_out = Output("SubOut"); + AppendOp(framework::OpRegistry::CreateOp( + "elementwise_sub", {{"X", {x}}, {"Y", {y}}}, {{"Out", {sub_out}}}, {})); - // MulOut = MinusOut * W = (X - Y) * W + // MulOut = SubOut * W = (X - Y) * W auto w = Input("W"); auto mul_out = Output("MulOut"); AppendOp(framework::OpRegistry::CreateOp( - "elementwise_mul", {{"X", {minus_out}}, {"Y", {w}}}, - {{"Out", {mul_out}}}, {{"axis", 0}})); + "elementwise_mul", {{"X", {sub_out}}, {"Y", {w}}}, {{"Out", {mul_out}}}, + {{"axis", 0}})); // Out = MulOut + Y = (X - Y) * W + Y = X * W + Y * (1 - W) AppendOp(framework::OpRegistry::CreateOp("elementwise_add", @@ -65,18 +64,26 @@ class InterpOpMaker : public framework::OpProtoAndCheckerMaker { public: InterpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "A 2-D Tensor, the first input of interp_op"); - AddInput("Y", "A 2-D Tensor, the second input of interp_op"); - AddInput("W", "A 1-D Tensor, the interpolated values"); - AddOutput("MinusOut", - "A 2-D Tensor, the intermediate outputs, saving X - Y.") + AddInput("X", + "(Tensor), 2-D Matrix of shape [batch_size, data_dim]" + "containing data samples, the first input of interp_op"); + AddInput("Y", + "(Tensor), 2-D Matrix of shape `[batch_size, data_dim]`" + "containing data samples, the second input of interp_op"); + AddInput("W", + "(Tensor), 1-D Vector of shape [batch_size]," + "the interpolated values in the half-open interval [0.0, 1.0)"); + AddOutput("SubOut", + "(Tensor), the intermediate subtraction outputs, saving X - Y.") .AsIntermediate(); AddOutput("MulOut", - "A 2-D Tensor, the intermediate outputs," - "saving the mul mul of (X - Y) and W") + "(Tensor), the intermediate multiplication outputs," + "saving the elementwise multiplication of (X - Y) and W.") .AsIntermediate(); AddOutput("Out", - "A 2-D Tensor, the output of interp_op, same shape with X"); + "(Tensor), the output of interp_op, same shape with X," + "returns the first-dimensional piecewise linear interpolant " + "between X and Y"); AddComment(R"DOC( Linear Interpolation with two inputs, used in NEURAL TURING MACHINE. diff --git a/python/paddle/v2/framework/tests/test_interp_op.py b/python/paddle/v2/framework/tests/test_interp_op.py index f82dcc7f50..066569b96c 100644 --- a/python/paddle/v2/framework/tests/test_interp_op.py +++ b/python/paddle/v2/framework/tests/test_interp_op.py @@ -10,12 +10,12 @@ class TestInterpOp(OpTest): y = np.random.random((2, 3)).astype("float32") w = np.random.random(2).astype("float32") - minus_out = x - y - mul_out = minus_out * w.reshape(2, 1) + sub_out = x - y + mul_out = sub_out * w.reshape(2, 1) out = mul_out + y self.inputs = {'X': x, 'Y': y, 'W': w} - self.outputs = {'Out': out, 'MinusOut': minus_out, 'MulOut': mul_out} + self.outputs = {'Out': out, 'SubOut': sub_out, 'MulOut': mul_out} def test_check_output(self): self.check_output() From 597299074efb2e926954219c4afac9a6b189904d Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 9 Oct 2017 20:11:01 +0800 Subject: [PATCH 270/342] fix bug in REGISTER_OP(reduce_min) --- paddle/operators/reduce_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index 87f66e1e93..55f294a9be 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -175,7 +175,7 @@ REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad, ops::ReduceGradOp); -REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad, +REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, reduce_min_grad, ops::ReduceGradOp); #define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ From b14c122a949402169678aa1a154349b2fbd8ddc0 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 9 Oct 2017 10:28:26 -0700 Subject: [PATCH 271/342] Fix bug --- paddle/framework/block_desc.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 01f50e1393..509aa235d3 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -74,6 +74,12 @@ void BlockDescBind::Sync() { for (auto &op_desc : ops_) { op_field.AddAllocated(op_desc->Proto()); } + auto &var_field = *this->desc_->mutable_vars(); + var_field.Clear(); + var_field.Reserve(static_cast(vars_.size())); + for (auto &var_desc : vars_) { + var_field.AddAllocated(var_desc.second->Proto()); + } need_update_ = false; } } From 3c39df197e2fbb0e8666bd8bb20e2a60e5a47d9b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 10:30:20 -0700 Subject: [PATCH 272/342] Init Python API Following the design * https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md Just written `Program`, `Block` and unittest of program. --- python/paddle/v2/framework/graph.py | 45 +++++++++++++++++++ .../paddle/v2/framework/tests/test_program.py | 36 +++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 python/paddle/v2/framework/graph.py create mode 100644 python/paddle/v2/framework/tests/test_program.py diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py new file mode 100644 index 0000000000..5211b0f166 --- /dev/null +++ b/python/paddle/v2/framework/graph.py @@ -0,0 +1,45 @@ +import paddle.v2.framework.core as core + + +class Block(object): + def __init__(self, program, idx): + self.proto = program.proto.block(idx) + self.vars = dict() # var_name --> var + self.ops = list() # operator list + self.program = program + + @property + def parent_idx(self): + return self.proto.parent + + @property + def idx(self): + return self.proto.id + + +class Program(object): + def __init__(self): + self.proto = core.ProgramDesc.instance() + assert self.proto.num_blocks() == 1 + self.blocks = [Block(self, 0)] + self.current_block_idx = 0 + + def global_block(self): + return self.blocks[0] + + def current_block(self): + return self.blocks[self.current_block_idx] + + def create_block(self): + new_block_idx = len(self.blocks) + self.proto.append_block(self.current_block().proto) + self.current_block_idx = new_block_idx + self.blocks.append(Block(self, self.current_block_idx)) + return self.current_block() + + def rollback(self): + self.current_block_idx = self.current_block().parent_idx + + +# program is a global instance. +g_program = Program() diff --git a/python/paddle/v2/framework/tests/test_program.py b/python/paddle/v2/framework/tests/test_program.py new file mode 100644 index 0000000000..b82d1760d6 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_program.py @@ -0,0 +1,36 @@ +import unittest +from paddle.v2.framework.graph import g_program + + +class TestProgram(unittest.TestCase): + def test_program(self): + b = g_program.current_block() + self.assertEqual(-1, b.parent_idx) + self.assertEqual(0, b.idx) + + b = g_program.create_block() + self.assertEqual(1, b.idx) + self.assertEqual(0, b.parent_idx) + + b = g_program.create_block() + self.assertEqual(2, b.idx) + self.assertEqual(1, b.parent_idx) + + g_program.rollback() + + b = g_program.current_block() + self.assertEqual(1, b.idx) + self.assertEqual(0, b.parent_idx) + + b = g_program.create_block() + self.assertEqual(3, b.idx) + self.assertEqual(1, b.parent_idx) + + g_program.rollback() + b = g_program.current_block() + self.assertEqual(1, b.idx) + self.assertEqual(0, b.parent_idx) + + +if __name__ == '__main__': + unittest.main() From ee545e47ccfa79a793bb0c7adabe6f0e852afc13 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 9 Oct 2017 10:34:59 -0700 Subject: [PATCH 273/342] gan api --- doc/design/gan_api.md | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index d0f8b47ca3..0db18f92a7 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -15,6 +15,7 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to | batch norm (missing) | Zhuoyuan, Jiayi | Y | | cudnn batch norm (missing)| Zhuoyuan, Jiayi | N | | max-pooling (done) | ? | Y | +| cudnn-max-pool (missing) | Chengduo | Y | | fc (done) | ? | Y | | softmax loss (done) | ? | Y | | reshape op (done) | ? | Y | From bedcf074a2c497afeb057cb8a1ecfaa3eb39a7dd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 10:37:31 -0700 Subject: [PATCH 274/342] Implementation singleton --- python/paddle/v2/framework/graph.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index 5211b0f166..7468279438 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -18,9 +18,18 @@ class Block(object): class Program(object): + @classmethod + def instance(cls): + # From https://stackoverflow.com/questions/8212053 + # Making Program as a Singleton class. + if not hasattr(cls, '_instance'): + cls._instance = cls() + return cls._instance + def __init__(self): + assert not hasattr(self.__class__, + '_instance'), 'Do not call constructor directly!' self.proto = core.ProgramDesc.instance() - assert self.proto.num_blocks() == 1 self.blocks = [Block(self, 0)] self.current_block_idx = 0 @@ -42,4 +51,4 @@ class Program(object): # program is a global instance. -g_program = Program() +g_program = Program.instance() From f30a1f42f0b90b17c2664d7e9a65070ee1c3a473 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Mon, 9 Oct 2017 10:49:21 -0700 Subject: [PATCH 275/342] Adding relu6 activation function (#4607) --- paddle/operators/activation_op.cc | 16 ++++++++++ paddle/operators/activation_op.h | 31 +++++++++++++++++++ .../v2/framework/tests/test_activation_op.py | 19 +++++++----- 3 files changed, 59 insertions(+), 7 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 2afa8a68b0..43081d2326 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -201,6 +201,19 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { } }; +template +class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { + public: + Relu6OpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Relu6 operator"); + AddOutput("Y", "Output of Relu6 operator"); + AddComment("Relu6 activation operator, relu6 = min(max(0, x), 6)"); + AddAttr("threshold", "The threshold value of Relu6") + .SetDefault(static_cast(6)); + } +}; + template class PowOpMaker : public framework::OpProtoAndCheckerMaker { public: @@ -276,6 +289,9 @@ REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker, REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker, soft_relu_grad, ops::ActivationOpGrad); +REGISTER_OP(relu6, ops::ActivationOp, ops::Relu6OpMaker, relu6_grad, + ops::ActivationOpGrad); + REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 2450601742..f127468125 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -280,6 +280,36 @@ struct BReluGradFunctor : public BaseActivationFunctor { } }; +// relu6(x) = min(max(0, x), 6) +template +struct Relu6Functor : public BaseActivationFunctor { + float threshold; + + // NOTE: Explicit hides the `BaseActivationFunctor::GetAttrs` + // not polymorphism for speed. + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } + + template + void operator()(Device d, X x, Y y) const { + y.device(d) = x.cwiseMax(static_cast(0)).cwiseMin(threshold); + } +}; + +template +struct Relu6GradFunctor : public BaseActivationFunctor { + float threshold; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + dx.device(d) = + dy * ((x > static_cast(0)) * (x < threshold)).template cast(); + } +}; + // softsign(x) = x / (1 + |x|) template struct SoftsignFunctor : public BaseActivationFunctor { @@ -425,5 +455,6 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ + __macro(relu6, Relu6Functor, Relu6GradFunctor); \ __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index 701e1a1aee..8b76decaec 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -137,21 +137,26 @@ class TestBRelu(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.02) -class TestLeakyRelu(OpTest): +class TestRelu6(OpTest): def setUp(self): - self.op_type = "leaky_relu" - alpha = 0.02 - self.attrs = {'alpha': alpha} - self.inputs = {'X': np.random.uniform(-3, 3, [4, 4]).astype("float32")} + self.op_type = "relu6" + x = np.random.uniform(-1, 1, [4, 10]).astype("float32") + threshold = 6.0 + # The same with TestAbs + x[np.abs(x) < 0.005] = 0.02 + x[np.abs(x - threshold) < 0.005] = threshold + 0.02 + + self.inputs = {'X': x} + self.attrs = {'threshold': threshold} self.outputs = { - 'Y': np.maximum(self.inputs['X'], alpha * self.inputs['X']) + 'Y': np.minimum(np.maximum(self.inputs['X'], 0), threshold) } def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y', max_relative_error=0.007) + self.check_grad(['X'], 'Y', max_relative_error=0.02) class TestSoftRelu(OpTest): From 8f4771be226e19593e0434db2293cc6bbcbbdc69 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 10:49:53 -0700 Subject: [PATCH 276/342] Add skeleton of Variable --- python/paddle/v2/framework/graph.py | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index 7468279438..5b93115b3e 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -1,5 +1,37 @@ import paddle.v2.framework.core as core +__all__ = ['Block', 'Variable', 'Program'] + + +class Variable(object): + def __init__(self, block, name=None, shape=None, dtype=None, + lod_level=None): + self.block = block + + if name is None: + name = Variable._unique_var_name_() + self.proto = self.block.proto.new_var(name) + + if shape is not None: + self.proto.set_shape(shape) + + if dtype is not None: + # TODO(yuyang18): Convert dtype from numpy.dtype + self.proto.set_data_type(dtype) + + if lod_level is not None: + # TODO(yuyang18): set_lod_level is not defined. + self.proto.set_lod_level(lod_level) + + self.block.vars[name] = self + + # TODO(yuyang18): Get methods + + @staticmethod + def _unique_var_name_(): + uid = core.unique_integer() # unique during whole process. + return "_generated_var_%d" % uid + class Block(object): def __init__(self, program, idx): @@ -16,6 +48,9 @@ class Block(object): def idx(self): return self.proto.id + def create_var(self, *args, **kwargs): + return Variable(self, *args, **kwargs) + class Program(object): @classmethod From 4cb5bd90218082998f990d0977f05acef8da61e7 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Mon, 9 Oct 2017 10:56:56 -0700 Subject: [PATCH 277/342] Implementing the Adamax optimizer operator (#4538) * Implementing the Adamax optimizer step operator * Adding unit tests for adamax_op * Changing learning rate and time step to inputs from attributes * Changing learning rate and time step to input(tensors) * Making the Adamax operator conform to naming convention * Removing Tensor from comments * Rectifying the Adamax implementation * Changing Unit Test values and adding comments * Changing Unit Test to test multiple steps --- paddle/operators/adamax_op.cc | 139 ++++++++++++++ paddle/operators/adamax_op.cu | 20 ++ paddle/operators/adamax_op.h | 72 +++++++ .../v2/framework/tests/test_adamax_op.py | 178 ++++++++++++++++++ 4 files changed, 409 insertions(+) create mode 100644 paddle/operators/adamax_op.cc create mode 100644 paddle/operators/adamax_op.cu create mode 100644 paddle/operators/adamax_op.h create mode 100644 python/paddle/v2/framework/tests/test_adamax_op.py diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc new file mode 100644 index 0000000000..c348e0a0b2 --- /dev/null +++ b/paddle/operators/adamax_op.cc @@ -0,0 +1,139 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/adamax_op.h" + +namespace paddle { +namespace operators { + +class AdamaxOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Param"), + "Input(Param) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Grad"), + "Input(Grad) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Moment"), + "Input(Moment) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("InfNorm"), + "Input(InfNorm) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("LearningRate"), + "Input(LearningRate) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Beta1Pow"), + "Input(Beta1Pow) of AdamaxOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), + "Output(ParamOut) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), + "Output(MomentOut) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"), + "Output(InfNormOut) of AdamaxOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Beta1PowOut"), + "Output(Beta1PowOut) of AdamaxOp should not be null."); + + auto lr_dims = ctx->GetInputDim("LearningRate"); + PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, + "Learning rate should have 1 dimension"); + auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow"); + PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1, + "Beta1 power accumulator should have 1 dimension"); + auto param_dims = ctx->GetInputDim("Param"); + PADDLE_ENFORCE_EQ( + param_dims, ctx->GetInputDim("Grad"), + "Param and Grad input of AdamaxOp should have same dimension"); + PADDLE_ENFORCE_EQ( + param_dims, ctx->GetInputDim("Moment"), + "Param and Moment input of AdamaxOp should have same dimension"); + PADDLE_ENFORCE_EQ( + param_dims, ctx->GetInputDim("InfNorm"), + "Param and InfNorm input of AdamaxOp should have same dimension"); + + ctx->SetOutputDim("ParamOut", param_dims); + ctx->SetOutputDim("MomentOut", param_dims); + ctx->SetOutputDim("InfNormOut", param_dims); + ctx->SetOutputDim("Beta1PowOut", beta1_pow_dims); + } +}; + +class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AdamaxOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Param", "(Tensor) Input parameter"); + AddInput("Grad", "(Tensor) Input gradient"); + AddInput("LearningRate", "(Tensor) Learning rate"); + AddInput("Moment", "(Tensor) First moment"); + AddInput("InfNorm", + "(Tensor) " + "Input exponentially weighted infinity norm"); + AddInput("Beta1Pow", "(Tensor) Input beta1 power accumulator"); + + AddOutput("ParamOut", "(Tensor) Output parameter"); + AddOutput("MomentOut", "(Tensor) Output first moment"); + AddOutput("InfNormOut", + "(Tensor) " + "Output exponentially weighted infinity norm"); + AddOutput("Beta1PowOut", "(Tensor) Output beta1 power accumulator"); + + AddAttr("beta1", + "(float, default 0.9) " + "Exponential decay rate for the " + "1st moment estimates.") + .SetDefault(0.9f); + AddAttr("beta2", + "(float, default 0.999) " + "exponential decay rate for the weighted " + "infinity norm estimates.") + .SetDefault(0.999f); + AddAttr("epsilon", + "(float, default 1.0e-8) " + "Constant for numerical stability") + .SetDefault(1.0e-8f); + AddComment(R"DOC( +Adamax Updates Operator. + +This implements the Adamax optimizer from Section 7 of the Adam +paper[1]. Adamax is a variant of the +Adam algorithm based on the infinity norm. + +Adamax updates: + +moment_out = beta1 * moment + (1 - beta1) * grad +inf_norm_out = max(beta2 * inf_norm + epsilon, abs(grad)) +beta1_pow_out = beta1_pow * beta1 +learning_rate_t = learning_rate/(1 - beta1_pow_out) +param_out = param - learning_rate_t * moment_out/inf_norm_out + +The original paper does not have an epsilon attribute. +However, it is added here for numerical stability +by preventing divide by 0. + +References: + [1] Adam: A Method for Stochastic Optimization + (https://arxiv.org/abs/1412.6980) + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(adamax, ops::AdamaxOp, ops::AdamaxOpMaker); +REGISTER_OP_CPU_KERNEL(adamax, + ops::AdamaxOpKernel); diff --git a/paddle/operators/adamax_op.cu b/paddle/operators/adamax_op.cu new file mode 100644 index 0000000000..fee3b6fc6b --- /dev/null +++ b/paddle/operators/adamax_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/adamax_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(adamax, + ops::AdamaxOpKernel); diff --git a/paddle/operators/adamax_op.h b/paddle/operators/adamax_op.h new file mode 100644 index 0000000000..9677b1bb78 --- /dev/null +++ b/paddle/operators/adamax_op.h @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class AdamaxOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto param_out_tensor = ctx.Output("ParamOut"); + auto moment_out_tensor = ctx.Output("MomentOut"); + auto inf_norm_out_tensor = ctx.Output("InfNormOut"); + auto beta1_pow_out_tensor = ctx.Output("Beta1PowOut"); + + param_out_tensor->mutable_data(ctx.GetPlace()); + moment_out_tensor->mutable_data(ctx.GetPlace()); + inf_norm_out_tensor->mutable_data(ctx.GetPlace()); + beta1_pow_out_tensor->mutable_data(ctx.GetPlace()); + + float beta1 = ctx.Attr("beta1"); + float beta2 = ctx.Attr("beta2"); + float epsilon = ctx.Attr("epsilon"); + + auto param = framework::EigenVector::Flatten( + *ctx.Input("Param")); + auto grad = framework::EigenVector::Flatten( + *ctx.Input("Grad")); + auto moment = framework::EigenVector::Flatten( + *ctx.Input("Moment")); + auto inf_norm = framework::EigenVector::Flatten( + *ctx.Input("InfNorm")); + auto lr = framework::EigenVector::Flatten( + *ctx.Input("LearningRate")); + auto beta1_pow = framework::EigenVector::Flatten( + *ctx.Input("Beta1Pow")); + auto param_out = framework::EigenVector::Flatten(*param_out_tensor); + auto moment_out = framework::EigenVector::Flatten(*moment_out_tensor); + auto inf_norm_out = + framework::EigenVector::Flatten(*inf_norm_out_tensor); + auto beta1_pow_out = + framework::EigenVector::Flatten(*beta1_pow_out_tensor); + auto place = ctx.GetEigenDevice(); + + moment_out.device(place) = beta1 * moment + (1 - beta1) * grad; + inf_norm_out.device(place) = + grad.abs().cwiseMax((beta2 * inf_norm) + epsilon); + beta1_pow_out.device(place) = beta1_pow * beta1; + auto lr_t = lr / (1 - beta1_pow_out); + Eigen::DSizes m_dsize(moment_out_tensor->numel()); + param_out.device(place) = + param - lr_t.broadcast(m_dsize) * (moment_out / inf_norm_out); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_adamax_op.py b/python/paddle/v2/framework/tests/test_adamax_op.py new file mode 100644 index 0000000000..af81075d6a --- /dev/null +++ b/python/paddle/v2/framework/tests/test_adamax_op.py @@ -0,0 +1,178 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestAdamaxOp1(OpTest): + def setUp(self): + '''Test Adamax Operator with supplied attributes + ''' + self.op_type = "adamax" + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The infinity norm is positive + inf_norm = np.random.random((102, 105)).astype("float32") + + learning_rate = 0.002 + beta1 = 0.78 + beta2 = 0.899 + epsilon = 1e-5 + beta1_pow = beta1**10 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'InfNorm': inf_norm, + 'LearningRate': np.array([learning_rate]).astype("float32"), + 'Beta1Pow': np.array([beta1_pow]).astype("float32") + } + + self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} + + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, self.attrs) + + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'InfNormOut': inf_norm_out, + 'Beta1PowOut': beta1_pow_out + } + + def test_check_output(self): + self.check_output() + + +class TestAdamaxOp2(OpTest): + '''Test Adamax Operator with default attributes + ''' + + def setUp(self): + self.op_type = "adamax" + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The infinity norm is positive + inf_norm = np.random.random((102, 105)).astype("float32") + + learning_rate = 0.002 + beta1 = 0.9 + beta2 = 0.999 + epsilon = 1e-8 + beta1_pow = beta1**8 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'InfNorm': inf_norm, + 'LearningRate': np.array([learning_rate]).astype("float32"), + 'Beta1Pow': np.array([beta1_pow]).astype("float32") + } + + attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, attrs) + + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'InfNormOut': inf_norm_out, + 'Beta1PowOut': beta1_pow_out + } + + def test_check_output(self): + self.check_output() + + +class TestAdamaxOpMultipleSteps(OpTest): + def setUp(self): + '''Test Adamax Operator with supplied attributes + ''' + self.op_type = "adamax" + self.num_steps = 10 + + param = np.random.uniform(-1, 1, (102, 105)).astype("float32") + grad = np.random.uniform(-1, 1, (102, 105)).astype("float32") + moment = np.random.uniform(-1, 1, (102, 105)).astype("float32") + # The infinity norm is positive + inf_norm = np.random.random((102, 105)).astype("float32") + + learning_rate = 0.002 + beta1 = 0.8 + beta2 = 0.99 + epsilon = 1e-5 + beta1_pow = 1 + + self.inputs = { + 'Param': param, + 'Grad': grad, + 'Moment': moment, + 'InfNorm': inf_norm, + 'LearningRate': np.array([learning_rate]).astype("float32"), + 'Beta1Pow': np.array([beta1_pow]).astype("float32") + } + + self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon} + + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, self.attrs) + + def test_check_output(self): + for _ in range(self.num_steps): + param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step( + self.inputs, self.attrs) + + self.outputs = { + 'ParamOut': param_out, + 'MomentOut': moment_out, + 'InfNormOut': inf_norm_out, + 'Beta1PowOut': beta1_pow_out + } + + # Verify output for this step + self.check_output() + + # Output of this step becomes input for next step + self.inputs['Param'] = param_out + self.inputs['Moment'] = moment_out + self.inputs['InfNorm'] = inf_norm_out + self.inputs['Beta1Pow'] = beta1_pow_out + + # Randomize gradient for next step + self.inputs['Grad'] = np.random.uniform( + -1, 1, (102, 105)).astype("float32") + + +def adamax_step(inputs, attributes): + ''' + Simulate one step of the adamax optimizer + :param inputs: dict of inputs + :param attributes: dict of attributes + :return tuple: tuple of output param, moment, inf_norm and + beta1 power accumulator + ''' + param = inputs['Param'] + grad = inputs['Grad'] + moment = inputs['Moment'] + inf_norm = inputs['InfNorm'] + lr = inputs['LearningRate'] + beta1_pow = inputs['Beta1Pow'] + + beta1 = attributes['beta1'] + beta2 = attributes['beta2'] + epsilon = attributes['epsilon'] + + moment_out = beta1 * moment + (1 - beta1) * grad + inf_norm_out = np.maximum(beta2 * inf_norm + epsilon, np.abs(grad)) + beta1_pow_out = beta1_pow * beta1 + lr_t = (lr / (1 - beta1_pow_out)) + param_out = param - lr_t * np.divide(moment_out, inf_norm_out) + + return param_out, moment_out, inf_norm_out, beta1_pow_out + + +if __name__ == "__main__": + unittest.main() From 61a5181e31a073a2b23cc76028fc24119d4970c7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 11:05:25 -0700 Subject: [PATCH 278/342] Add skeleton of Operator --- python/paddle/v2/framework/graph.py | 44 +++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index 5b93115b3e..6f2a76a983 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -1,6 +1,7 @@ import paddle.v2.framework.core as core +import collections -__all__ = ['Block', 'Variable', 'Program'] +__all__ = ['Block', 'Variable', 'Program', 'Operator'] class Variable(object): @@ -24,6 +25,7 @@ class Variable(object): self.proto.set_lod_level(lod_level) self.block.vars[name] = self + self.op = None # TODO(yuyang18): Get methods @@ -33,11 +35,37 @@ class Variable(object): return "_generated_var_%d" % uid +class Operator(object): + def __init__(self, + block, + proto, + type=None, + inputs=None, + outputs=None, + attrs=None): + self.block = block + self.proto = proto + if type is not None: + # TODO. + pass + if inputs is not None: + # TODO + pass + if outputs is not None: + # TODO + pass + if attrs is not None: + # TODO + pass + + # TODO: Getters + + class Block(object): def __init__(self, program, idx): self.proto = program.proto.block(idx) self.vars = dict() # var_name --> var - self.ops = list() # operator list + self.ops = collections.deque() # operator list self.program = program @property @@ -51,6 +79,18 @@ class Block(object): def create_var(self, *args, **kwargs): return Variable(self, *args, **kwargs) + def append_op(self, *args, **kwargs): + op_proto = self.proto.append_op() + op = Operator(self, op_proto, *args, **kwargs) + self.ops.append(op) + return op + + def prepend_op(self, *args, **kwargs): + op_proto = self.proto.prepend_op() + op = Operator(self, op_proto, *args, **kwargs) + self.ops.appendleft(op) + return op + class Program(object): @classmethod From c464ec21d8b0a1e7ad6da7115b78cd047d9a2041 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 12:09:39 -0700 Subject: [PATCH 279/342] Fix bug of foward default attribute not passed to backward --- paddle/framework/backward.cc | 2 +- paddle/framework/op_desc.h | 5 +++++ paddle/framework/op_registry.cc | 11 ++++++++--- paddle/framework/op_registry.h | 2 +- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c970e01dd1..0a4688db9c 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -302,7 +302,7 @@ std::vector> MakeOpGrad( return grad_op_descs; // empty vector } - grad_op_descs = OpRegistry::CreateGradOpDescs(*op_desc); + grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc.get()); std::list> pending_fill_zeros_ops; for (auto& desc : grad_op_descs) { diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index b39808dad1..b729029412 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -97,6 +97,11 @@ class OpDescBind { const VariableNameMap &Outputs() const { return outputs_; } + AttributeMap *MutableAttrMap() { + this->need_update_ = true; + return &this->attrs_; + } + private: template static std::vector MapKeys(const MapType &map) { diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 66043f6e04..b118edae17 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -60,9 +60,14 @@ std::unique_ptr OpRegistry::CreateOp(const OpDescBind& op_desc) { } std::vector> OpRegistry::CreateGradOpDescs( - const OpDescBind& op_desc) { - auto& info = OpInfoMap::Instance().Get(op_desc.Type()); - return info.grad_op_maker_(op_desc); + OpDescBind* op_desc) { + auto& info = OpInfoMap::Instance().Get(op_desc->Type()); + + if (info.Checker() != nullptr) { + info.Checker()->Check(*op_desc->MutableAttrMap()); + } + + return info.grad_op_maker_(*op_desc); } } // namespace framework diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index cce3605fd4..5ca3af52a6 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -80,7 +80,7 @@ class OpRegistry { static std::unique_ptr CreateOp(const OpDesc& op_desc); static std::vector> CreateGradOpDescs( - const OpDescBind& op_desc); + OpDescBind* op_desc); static std::unique_ptr CreateOp(const OpDescBind& op_desc); }; From 49ca0b4831037a3faa955ecf45ebedbee21ea833 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 13:10:04 -0700 Subject: [PATCH 280/342] Stash --- paddle/framework/var_desc.cc | 8 ++++++++ paddle/framework/var_desc.h | 4 ++++ paddle/pybind/protobuf.cc | 4 +++- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 13b9c5f3cd..a88e813b5e 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -32,5 +32,13 @@ std::vector VarDescBind::Shape() const { DataType VarDescBind::GetDataType() const { return desc_.lod_tensor().data_type(); } + +void VarDescBind::SetLoDLevel(int32_t lod_level) { + desc_.mutable_lod_tensor()->set_lod_level(lod_level); +} + +int32_t VarDescBind::GetLodLevel() const { + return desc_.lod_tensor().lod_level(); +} } // namespace framework } // namespace paddle diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h index 4763bf09d0..464fece85f 100644 --- a/paddle/framework/var_desc.h +++ b/paddle/framework/var_desc.h @@ -66,6 +66,10 @@ class VarDescBind { DataType GetDataType() const; + void SetLoDLevel(int32_t lod_level); + + int32_t GetLodLevel() const; + private: VarDesc desc_; }; diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 218821b35b..7e18bf1c07 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -167,7 +167,9 @@ void BindVarDsec(py::module &m) { .def("set_shape", &VarDescBind::SetShape) .def("set_data_type", &VarDescBind::SetDataType) .def("shape", &VarDescBind::Shape, py::return_value_policy::reference) - .def("data_type", &VarDescBind::GetDataType); + .def("data_type", &VarDescBind::GetDataType) + .def("lod_level", &VarDescBind::GetLodLevel) + .def("set_lod_level", &VarDescBind::SetLoDLevel); } void BindOpDesc(py::module &m) { From 92add2a29b4d0e5e5d4abe44d86e3a7c7af23645 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 13:20:08 -0700 Subject: [PATCH 281/342] Fix compile error in develop branch --- paddle/operators/adamax_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc index c348e0a0b2..5cf727742c 100644 --- a/paddle/operators/adamax_op.cc +++ b/paddle/operators/adamax_op.cc @@ -22,7 +22,7 @@ class AdamaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Param"), "Input(Param) of AdamaxOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Grad"), From 1e41a675d4111a826ffac45cbd197054d193d72e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 13:39:38 -0700 Subject: [PATCH 282/342] Convert np.dtype to core.DataType --- python/paddle/v2/framework/graph.py | 25 +++++++++++++++++-- .../v2/framework/tests/test_variable.py | 22 ++++++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 python/paddle/v2/framework/tests/test_variable.py diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index 6f2a76a983..a7a3ca62c7 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -1,5 +1,6 @@ import paddle.v2.framework.core as core import collections +import numpy as np __all__ = ['Block', 'Variable', 'Program', 'Operator'] @@ -17,11 +18,11 @@ class Variable(object): self.proto.set_shape(shape) if dtype is not None: - # TODO(yuyang18): Convert dtype from numpy.dtype + if not isinstance(dtype, core.DataType): + dtype = Variable._convert_np_dtype_to_dtype_(dtype) self.proto.set_data_type(dtype) if lod_level is not None: - # TODO(yuyang18): set_lod_level is not defined. self.proto.set_lod_level(lod_level) self.block.vars[name] = self @@ -34,6 +35,26 @@ class Variable(object): uid = core.unique_integer() # unique during whole process. return "_generated_var_%d" % uid + @staticmethod + def _convert_np_dtype_to_dtype_(np_dtype): + dtype = np.dtype(np_dtype) + if dtype == np.float32: + return core.DataType.FP32 + elif dtype == np.float64: + return core.DataType.FP64 + elif dtype == np.float16: + return core.DataType.FP16 + elif dtype == np.int32: + return core.DataType.INT32 + elif dtype == np.int16: + return core.DataType.INT16 + elif dtype == np.int64: + return core.DataType.INT64 + elif dtype == np.bool: + return core.DataType.BOOL + else: + raise ValueError("Not supported numpy dtype " + str(dtype)) + class Operator(object): def __init__(self, diff --git a/python/paddle/v2/framework/tests/test_variable.py b/python/paddle/v2/framework/tests/test_variable.py new file mode 100644 index 0000000000..dd23eac0cd --- /dev/null +++ b/python/paddle/v2/framework/tests/test_variable.py @@ -0,0 +1,22 @@ +import unittest +from paddle.v2.framework.graph import Variable +import paddle.v2.framework.core as core +import numpy as np + + +class TestVariable(unittest.TestCase): + def test_np_dtype_convert(self): + DT = core.DataType + convert = Variable._convert_np_dtype_to_dtype_ + self.assertEqual(DT.FP32, convert(np.float32)) + self.assertEqual(DT.FP16, convert("float16")) + self.assertEqual(DT.FP64, convert("float64")) + self.assertEqual(DT.INT32, convert("int32")) + self.assertEqual(DT.INT16, convert("int16")) + self.assertEqual(DT.INT64, convert("int64")) + self.assertEqual(DT.BOOL, convert("bool")) + self.assertRaises(ValueError, lambda: convert("int8")) + + +if __name__ == '__main__': + unittest.main() From 569616b329db71bfc4739021d55e0a74179732e2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 14:04:36 -0700 Subject: [PATCH 283/342] Complete Variable for Python API --- python/paddle/v2/framework/graph.py | 59 ++++++++++++++++--- .../v2/framework/tests/test_variable.py | 20 ++++++- 2 files changed, 71 insertions(+), 8 deletions(-) diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index a7a3ca62c7..a66e7a9d73 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -12,23 +12,68 @@ class Variable(object): if name is None: name = Variable._unique_var_name_() - self.proto = self.block.proto.new_var(name) + try: + self.proto = self.block.proto.var(name) + is_new_var = False + except core.EnforceNotMet: + self.proto = self.block.proto.new_var(name) + is_new_var = True if shape is not None: - self.proto.set_shape(shape) - + if is_new_var: + self.proto.set_shape(shape) + else: + old_shape = self.shape + shape = tuple(shape) + if shape != old_shape: + raise ValueError( + "Variable {0} has been created before. the previous " + "shape is {1}; the new shape is {2}. They are not " + "matched.".format(self.name, old_shape, shape)) if dtype is not None: if not isinstance(dtype, core.DataType): dtype = Variable._convert_np_dtype_to_dtype_(dtype) - self.proto.set_data_type(dtype) + if is_new_var: + self.proto.set_data_type(dtype) + else: + old_dtype = self.data_type() + if dtype != old_shape: + raise ValueError("Variable {0} has been created before. " + "The previous data type is {1}; the new " + "data type is {2}. They are not " + "matched.".format(self.name, old_dtype, + dtype)) if lod_level is not None: - self.proto.set_lod_level(lod_level) + if is_new_var: + self.proto.set_lod_level(lod_level) + else: + if lod_level != self.lod_level: + raise ValueError("Variable {0} has been created before. " + "The previous lod_level is {1}; the new " + "lod_level is {2}. They are not " + "matched".format(self.name, self.lod_level, + lod_level)) self.block.vars[name] = self self.op = None - # TODO(yuyang18): Get methods + @property + def name(self): + return self.proto.name() + + @property + def shape(self): + # convert to tuple, make it as same as numpy API. + return tuple(self.proto.shape()) + + @property + def data_type(self): + return self.proto.data_type() + + @property + def lod_level(self): + return self.proto.lod_level() @staticmethod def _unique_var_name_(): @@ -79,7 +124,7 @@ class Operator(object): # TODO pass - # TODO: Getters + # TODO: Getters class Block(object): diff --git a/python/paddle/v2/framework/tests/test_variable.py b/python/paddle/v2/framework/tests/test_variable.py index dd23eac0cd..8ea1083ff6 100644 --- a/python/paddle/v2/framework/tests/test_variable.py +++ b/python/paddle/v2/framework/tests/test_variable.py @@ -1,5 +1,5 @@ import unittest -from paddle.v2.framework.graph import Variable +from paddle.v2.framework.graph import Variable, g_program import paddle.v2.framework.core as core import numpy as np @@ -17,6 +17,24 @@ class TestVariable(unittest.TestCase): self.assertEqual(DT.BOOL, convert("bool")) self.assertRaises(ValueError, lambda: convert("int8")) + def test_var(self): + b = g_program.current_block() + w = b.create_var( + dtype="float64", shape=[784, 100], lod_level=0, name="fc.w") + self.assertEqual(core.DataType.FP64, w.data_type) + self.assertEqual((784, 100), w.shape) + self.assertEqual("fc.w", w.name) + self.assertEqual(0, w.lod_level) + + w = b.create_var(name='fc.w') + self.assertEqual(core.DataType.FP64, w.data_type) + self.assertEqual((784, 100), w.shape) + self.assertEqual("fc.w", w.name) + self.assertEqual(0, w.lod_level) + + self.assertRaises(ValueError, + lambda: b.create_var(name="fc.w", shape=(24, 100))) + if __name__ == '__main__': unittest.main() From dcb09e932d57701b553a5308aaab5b16bf214910 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 14:21:58 -0700 Subject: [PATCH 284/342] Use PROTO_LITE when refactoring Paddle It will significantly reduce binary size. It is useful for mobile deployment. --- paddle/framework/framework.proto | 1 + paddle/framework/op_desc.h | 2 -- paddle/framework/program_desc.h | 2 -- paddle/operators/net_op.h | 1 + paddle/pybind/protobuf.cc | 3 --- 5 files changed, 2 insertions(+), 7 deletions(-) diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index ac2827e547..b7a63f9ba1 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto2"; +option optimize_for = LITE_RUNTIME; package paddle.framework; enum AttrType { diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index b729029412..d0c314771c 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -52,8 +52,6 @@ class OpDescBind { void SetOutput(const std::string ¶m_name, const std::vector &args); - std::string DebugString() { return this->Proto()->DebugString(); } - bool HasAttr(const std::string &name) const { return attrs_.find(name) != attrs_.end(); } diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index 9b34a06aef..d684b08d16 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -31,8 +31,6 @@ class ProgramDescBind { BlockDescBind *Block(size_t idx) { return blocks_[idx].get(); } - std::string DebugString() { return Proto()->DebugString(); } - size_t Size() const { return blocks_.size(); } ProgramDesc *Proto(); diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 2388b094d2..ebeb262d96 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/framework/framework.pb.h" #include "paddle/framework/op_registry.h" diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 218821b35b..47bd7bc3bb 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -117,7 +117,6 @@ void BindProgramDesc(py::module &m) { .def("append_block", &ProgramDescBind::AppendBlock, py::return_value_policy::reference) .def("block", &ProgramDescBind::Block, py::return_value_policy::reference) - .def("__str__", &ProgramDescBind::DebugString) .def("num_blocks", &ProgramDescBind::Size); } @@ -191,8 +190,6 @@ void BindOpDesc(py::module &m) { .def("output", &OpDescBind::Output) .def("output_names", &OpDescBind::OutputNames) .def("set_output", &OpDescBind::SetOutput) - .def("__str__", &OpDescBind::DebugString) - .def("__repr__", &OpDescBind::DebugString) .def("has_attr", &OpDescBind::HasAttr) .def("attr_type", &OpDescBind::GetAttrType) .def("attr_names", &OpDescBind::AttrNames) From 383faaf71907605b92feef2ffc91f955623fa82d Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Mon, 9 Oct 2017 14:25:38 -0700 Subject: [PATCH 285/342] Adding implementation for copying a vector to a tensor (#4635) * Adding implementation for copying a vector to tensor * Changing Tensor test to access gpu memory indirectly --- paddle/framework/tensor.h | 13 +++++ paddle/framework/tensor_impl.h | 23 +++++++++ paddle/framework/tensor_test.cc | 87 +++++++++++++++++++++++++++++++++ 3 files changed, 123 insertions(+) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 80a3f0a393..ba82127d9c 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -95,6 +95,19 @@ class Tensor { template inline void CopyFrom(const Tensor& src, const platform::Place& dst_place); + /** + * @brief Copy the content of an external vector to a tensor. + * + * @param[in] src The external vector. + * @param[in] ctx The device context contains place where to store. + * + * * @note CopyFromVector assumes that the tensor has been resized + * before invoking. + */ + template + inline void CopyFromVector(const std::vector& src, + const platform::Place& dst_place); + /** * @brief Return the slice of the tensor. * diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 379eac94f9..8ee9941982 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -123,6 +123,29 @@ inline void Tensor::CopyFrom(const Tensor& src, #endif } +template +inline void Tensor::CopyFromVector(const std::vector& src, + const platform::Place& dst_place) { + auto src_ptr = static_cast(src.data()); + platform::CPUPlace src_place; + auto dst_ptr = static_cast(mutable_data(dst_place)); + auto size = src.size() * sizeof(T); + + if (platform::is_cpu_place(dst_place)) { + memory::Copy(boost::get(dst_place), dst_ptr, src_place, + src_ptr, size); + } +#ifdef PADDLE_WITH_CUDA + else if (platform::is_gpu_place(dst_place)) { + memory::Copy(boost::get(dst_place), dst_ptr, src_place, + src_ptr, size, 0); + } + PADDLE_ENFORCE(cudaStreamSynchronize(0), + "cudaStreamSynchronize failed in Tensor CopyFromVector"); + +#endif +} + template inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { check_memory_size(); diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index 58cf0fc3cb..492eba69e1 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -263,6 +263,93 @@ TEST(Tensor, CopyFrom) { #endif } +TEST(Tensor, CopyFromVector) { + using namespace paddle::framework; + using namespace paddle::platform; + { + std::vector src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + Tensor cpu_tensor; + + // Copy to CPU Tensor + cpu_tensor.Resize(make_ddim({3, 3})); + auto cpu_place = new paddle::platform::CPUPlace(); + cpu_tensor.CopyFromVector(src_vec, *cpu_place); + + // Compare Tensors + const int* cpu_ptr = cpu_tensor.data(); + const int* src_ptr = src_vec.data(); + ASSERT_NE(src_ptr, cpu_ptr); + for (size_t i = 0; i < 9; ++i) { + EXPECT_EQ(src_ptr[i], cpu_ptr[i]); + } + + src_vec.erase(src_vec.begin(), src_vec.begin() + 5); + cpu_tensor.Resize(make_ddim({2, 2})); + cpu_tensor.CopyFromVector(src_vec, *cpu_place); + cpu_ptr = cpu_tensor.data(); + src_ptr = src_vec.data(); + ASSERT_NE(src_ptr, cpu_ptr); + for (size_t i = 0; i < 5; ++i) { + EXPECT_EQ(src_ptr[i], cpu_ptr[i]); + } + + delete cpu_place; + } + +#ifdef PADDLE_WITH_CUDA + { + std::vector src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + Tensor cpu_tensor; + Tensor gpu_tensor; + Tensor dst_tensor; + + // Copy to CPU Tensor + cpu_tensor.Resize(make_ddim({3, 3})); + auto cpu_place = new paddle::platform::CPUPlace(); + cpu_tensor.CopyFromVector(src_vec, *cpu_place); + + // Copy to GPUTensor + gpu_tensor.Resize(make_ddim({3, 3})); + auto gpu_place = new paddle::platform::GPUPlace(); + gpu_tensor.CopyFromVector(src_vec, *gpu_place); + // Copy from GPU to CPU tensor for comparison + dst_tensor.CopyFrom(gpu_tensor, *cpu_place); + + // Compare Tensors + const int* src_ptr = src_vec.data(); + const int* cpu_ptr = cpu_tensor.data(); + const int* dst_ptr = dst_tensor.data(); + ASSERT_NE(src_ptr, cpu_ptr); + ASSERT_NE(src_ptr, dst_ptr); + for (size_t i = 0; i < 9; ++i) { + EXPECT_EQ(src_ptr[i], cpu_ptr[i]); + EXPECT_EQ(src_ptr[i], dst_ptr[i]); + } + + src_vec.erase(src_vec.begin(), src_vec.begin() + 5); + + cpu_tensor.Resize(make_ddim({2, 2})); + cpu_tensor.CopyFromVector(src_vec, *cpu_place); + gpu_tensor.Resize(make_ddim({2, 2})); + gpu_tensor.CopyFromVector(src_vec, *gpu_place); + dst_tensor.CopyFrom(gpu_tensor, *cpu_place); + + src_ptr = src_vec.data(); + cpu_ptr = cpu_tensor.data(); + dst_ptr = dst_tensor.data(); + ASSERT_NE(src_ptr, cpu_ptr); + ASSERT_NE(src_ptr, dst_ptr); + for (size_t i = 0; i < 5; ++i) { + EXPECT_EQ(src_ptr[i], cpu_ptr[i]); + EXPECT_EQ(src_ptr[i], dst_ptr[i]); + } + + delete cpu_place; + delete gpu_place; + } +#endif +} + TEST(Tensor, ReshapeToMatrix) { using namespace paddle::framework; using namespace paddle::platform; From 5984cbca47a4663b47b16390fc028829dbc9f183 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 9 Oct 2017 14:30:31 -0700 Subject: [PATCH 286/342] Add Attr test --- paddle/framework/backward_test.cc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 30225a4a99..05ebf356ba 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -440,6 +440,25 @@ TEST(Backward, simple_single_op) { std::vector({f::GradVarName("b")})); } +TEST(Backward, default_attribute) { + f::ProgramDesc *program_desc = GetNewProgramDesc(); + f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); + f::BlockDescBind *block = program.Block(0); + f::OpDescBind *op = block->AppendOp(); + op->SetType("mul"); + op->SetInput("X", {"x"}); + op->SetInput("Y", {"y"}); + op->SetOutput("Out", {"out"}); + + AppendBackward(program, {}); + + ASSERT_EQ(block->AllOps().size(), 2UL); + f::OpDescBind *grad_op = block->AllOps()[1]; + ASSERT_EQ(grad_op->Type(), "mul_grad"); + EXPECT_EQ(boost::get(grad_op->GetAttr("x_num_col_dims")), 1); + EXPECT_EQ(boost::get(grad_op->GetAttr("y_num_col_dims")), 1); +} + TEST(Backward, simple_mult_op) { f::ProgramDesc *program_desc = GetNewProgramDesc(); f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc); From 4238b9b95cda29618828a9a477afecb3bbed984e Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 9 Oct 2017 14:35:45 -0700 Subject: [PATCH 287/342] gan_api --- doc/design/gan_api.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 0db18f92a7..4fcff8b70a 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -1,10 +1,10 @@ # Design for GAN -GAN (General Adversarial Net) is an important model for unsupervised learning and widely used in many areas. +GAN (General Adversarial Net [https://arxiv.org/abs/1406.2661]) is an important model for unsupervised learning and widely used in many areas. -It contains several important machine learning concepts, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth. +It applies several important concepts in machine learning system design, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth. -In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation. +In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation. | important building blocks | People in Charge | Required | |---------------------------|-------------------|----------| @@ -51,7 +51,7 @@ Returns a 0/1 binary label. build the whole GAN model, define training loss for both generator and discrimator. ## Discussion on Engine Functions required to build GAN -- Trace the ternsor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly) +- Trace the tensor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly) - Different optimizers responsible for optimizing different loss. To be more detailed, we introduce our design of DCGAN as following: From ce901b1186b671781cd86b91ce530e2be3408f37 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 9 Oct 2017 15:16:34 -0700 Subject: [PATCH 288/342] Refine unit test --- paddle/framework/backward_test.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 05ebf356ba..3b7cbcd989 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -58,6 +58,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker { AddInput("X", "A"); AddInput("Y", "B"); AddOutput("Out", "Out"); + AddAttr("x_num_col_dims", "").SetDefault(1).EqualGreaterThan(1); + AddAttr("y_num_col_dims", "").SetDefault(1).EqualGreaterThan(1); AddComment("Mul"); } }; @@ -453,6 +455,9 @@ TEST(Backward, default_attribute) { AppendBackward(program, {}); ASSERT_EQ(block->AllOps().size(), 2UL); + EXPECT_EQ(boost::get(op->GetAttr("x_num_col_dims")), 1); + EXPECT_EQ(boost::get(op->GetAttr("y_num_col_dims")), 1); + f::OpDescBind *grad_op = block->AllOps()[1]; ASSERT_EQ(grad_op->Type(), "mul_grad"); EXPECT_EQ(boost::get(grad_op->GetAttr("x_num_col_dims")), 1); From e51557130e91383afb0e54dee00710664c9bf555 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Mon, 9 Oct 2017 22:57:11 +0000 Subject: [PATCH 289/342] clean up for review --- paddle/framework/executor.cc | 40 ++++++++++++++------- paddle/framework/executor.h | 2 +- paddle/framework/executor_test.cc | 60 +++++++++++++------------------ paddle/framework/scope.cc | 1 + paddle/operators/feed_op.cc | 1 + paddle/operators/fetch_op.cc | 1 + paddle/platform/gpu_info.cc | 2 +- 7 files changed, 56 insertions(+), 51 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index c6c9d13469..3ac752388f 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -13,11 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" + #include #include #include #include #include + #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/scope.h" @@ -27,7 +29,11 @@ limitations under the License. */ namespace paddle { namespace framework { +const std::string kFeedOpType = "feed"; +const std::string kFetchOpType = "fetch"; + Executor::Executor(const std::vector& places) { + PADDLE_ENFORCE_GT(places.size(), 0); device_contexts_.resize(places.size()); for (size_t i = 0; i < places.size(); i++) { if (platform::is_cpu_place(places[i])) { @@ -46,9 +52,7 @@ Executor::Executor(const std::vector& places) { Executor::~Executor() { for (auto& device_context : device_contexts_) { - if (device_context) { - delete device_context; - } + delete device_context; } } @@ -56,6 +60,8 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { // TODO(tonyyang-svail): // - only runs the first block (i.e. no RNN support) // - only runs on the first device (i.e. no interdevice communication) + // - will change to use multiple blocks for RNN op and Cond Op + PADDLE_ENFORCE_GT(pdesc.blocks_size(), 0); auto& block = pdesc.blocks(0); auto& device = device_contexts_[0]; @@ -66,12 +72,12 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { Scope& local_scope = scope->NewScope(); - std::vector should_run = Preprocess(pdesc); - PADDLE_ENFORCE(should_run.size() == block.ops_size()); + std::vector should_run = Prune(pdesc); + PADDLE_ENFORCE_EQ(should_run.size(), block.ops_size()); for (size_t i = 0; i < should_run.size(); ++i) { if (should_run[i]) { - for (auto var : block.ops(i).outputs()) { - for (auto argu : var.arguments()) { + for (auto& var : block.ops(i).outputs()) { + for (auto& argu : var.arguments()) { if (local_scope.FindVar(argu) == nullptr) { local_scope.NewVar(argu); } @@ -81,28 +87,32 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { op->Run(local_scope, *device); } } + + // TODO(tonyyang-svail): + // - Destroy local_scope } -std::vector Executor::Preprocess(const ProgramDesc& pdesc) { +std::vector Executor::Prune(const ProgramDesc& pdesc) { // TODO(tonyyang-svail): // - only runs the first block + // - will change to use multiple blocks for RNN op and Cond Op auto& block = pdesc.blocks(0); auto& ops = block.ops(); bool expect_feed = true; for (auto& op_desc : ops) { - PADDLE_ENFORCE(op_desc.type() != "feed" || expect_feed, + PADDLE_ENFORCE(op_desc.type() != kFeedOpType || expect_feed, "All FeedOps are at the beginning of the ProgramDesc"); - expect_feed = (op_desc.type() == "feed"); + expect_feed = (op_desc.type() == kFeedOpType); } bool expect_fetch = true; for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { auto& op_desc = *op_iter; - PADDLE_ENFORCE(op_desc.type() != "fetch" || expect_fetch, + PADDLE_ENFORCE(op_desc.type() != kFetchOpType || expect_fetch, "All FetchOps must at the end of the ProgramDesc"); - expect_fetch = (op_desc.type() == "fetch"); + expect_fetch = (op_desc.type() == kFetchOpType); } std::set dependent_vars; @@ -119,7 +129,7 @@ std::vector Executor::Preprocess(const ProgramDesc& pdesc) { } } - if (op_desc.type() == "fetch" || found_dependent_vars) { + if (op_desc.type() == kFetchOpType || found_dependent_vars) { // erase its output to the dependency graph for (auto& var : op_desc.outputs()) { for (auto& argu : var.arguments()) { @@ -140,6 +150,10 @@ std::vector Executor::Preprocess(const ProgramDesc& pdesc) { } } + // TODO(tonyyang-svail): + // - check this after integration of Init + // PADDLE_ENFORCE(dependent_vars.empty()); + // since we are traversing the ProgramDesc in reverse order // we reverse the should_run vector std::reverse(should_run.begin(), should_run.end()); diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 75cb5939ff..f832b0d7d6 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -46,7 +46,7 @@ class Executor { * @return * vector Same size as ops. Indicates whether an op should be run. */ - std::vector Preprocess(const ProgramDesc& pdesc); + std::vector Prune(const ProgramDesc& pdesc); private: std::vector device_contexts_; diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 99f80d04e8..f28651e809 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -13,12 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" + +#include #include + #include "gtest/gtest.h" #include "paddle/framework/attribute.h" #include "paddle/framework/backward.h" #include "paddle/framework/block_desc.h" -// #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/op_desc.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" @@ -34,9 +36,6 @@ using std::string; using namespace paddle::platform; using namespace paddle::framework; -typedef paddle::framework::BlockDesc proto_block; -typedef paddle::framework::OpDesc proto_op; - void AddOp(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, AttributeMap attrs, paddle::framework::BlockDescBind* block) { @@ -51,10 +50,10 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, // insert op auto op = block->AppendOp(); op->SetType(type); - for (auto kv : inputs) { + for (auto& kv : inputs) { op->SetInput(kv.first, kv.second); } - for (auto kv : outputs) { + for (auto& kv : outputs) { op->SetOutput(kv.first, kv.second); } op->SetAttrMap(attrs); @@ -65,11 +64,11 @@ std::once_flag set_variable_flag; // Tensors in feed value variable will only be in CPUPlace // So we can memcpy the data from vector to feed_value template -void set_feed_variable(const std::vector>& inputs) { +void SetFeedVariable(const std::vector>& inputs) { typedef std::vector FeedInputs; Variable* g_feed_value = GetGlobalScope()->FindVar("feed_value"); FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); - auto size = inputs.size(); + size_t size = inputs.size(); feed_inputs.resize(size); for (size_t i = 0; i < size; i++) { T* dst = feed_inputs[i].mutable_data( @@ -81,12 +80,12 @@ void set_feed_variable(const std::vector>& inputs) { // Tensors in fetch value variable will only be in CPUPlace // So we can memcpy the data from fetch_value to vector template -std::vector> get_fetch_variable() { +std::vector> GetFetchVariable() { typedef std::vector FetchOutputs; Variable* g_fetch_value = GetGlobalScope()->FindVar("fetch_value"); FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); - auto size = fetch_outputs.size(); + size_t size = fetch_outputs.size(); std::vector> result; result.reserve(size); for (size_t i = 0; i < size; i++) { @@ -105,7 +104,7 @@ class ExecutorTesterRandom : public ::testing::Test { virtual void SetUp() override { int input_dim = 5, batch_size = 2, embed_dim = 5; - // init pdesc ----------------------------------------- + // init pdesc auto temp_init_root_block = init_pdesc_.add_blocks(); temp_init_root_block->set_idx(0); temp_init_root_block->set_parent_idx(-1); @@ -128,7 +127,7 @@ class ExecutorTesterRandom : public ::testing::Test { // flush init_program.Proto(); - // run pdesc ----------------------------------------- + // run pdesc auto temp_root_block = pdesc_.add_blocks(); temp_root_block->set_idx(0); temp_root_block->set_parent_idx(-1); @@ -154,9 +153,6 @@ class ExecutorTesterRandom : public ::testing::Test { // TODO(tonyyang-svail): // - Test with Backward - // AddOp("gaussian_random", {}, {{"Out", {"l2_distance@GRAD"}}}, - // {{"dims", std::vector{batch_size, 1}}}, root_block); - // AppendBackward(program, {}); } protected: @@ -213,12 +209,11 @@ TEST_F(ExecutorTesterRandom, CPU) { // "pointer being freed was not allocated" error will appear. paddle::memory::Used(cpu_place); - Executor* executor = new Executor(places); + std::unique_ptr executor(new Executor(places)); + executor->Run(init_pdesc_, GetGlobalScope()); executor->Run(pdesc_, GetGlobalScope()); - std::vector> result = get_fetch_variable(); - - delete executor; + std::vector> result = GetFetchVariable(); } TEST_F(ExecutorTesterFeedAndFetch, CPU) { @@ -232,13 +227,12 @@ TEST_F(ExecutorTesterFeedAndFetch, CPU) { // "pointer being freed was not allocated" error will appear. paddle::memory::Used(cpu_place); - Executor* executor = new Executor(places); + std::unique_ptr executor(new Executor(places)); - // 3 mini-batch - for (int i = 0; i < 3; i++) { - set_feed_variable(inputs_); + for (int batch_id = 0; batch_id < 3; batch_id++) { + SetFeedVariable(inputs_); executor->Run(pdesc_, GetGlobalScope()); - std::vector> result = get_fetch_variable(); + std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); for (size_t i = 0; i < result.size(); ++i) { PADDLE_ENFORCE_EQ(result[i].size(), inputs_[i].size()); @@ -247,8 +241,6 @@ TEST_F(ExecutorTesterFeedAndFetch, CPU) { } } } - - delete executor; } #else TEST_F(ExecutorTesterRandom, GPU) { @@ -265,13 +257,11 @@ TEST_F(ExecutorTesterRandom, GPU) { paddle::memory::Used(CPUPlace()); paddle::memory::Used(gpu_place); - Executor* executor = new Executor(places); + std::unique_ptr executor(new Executor(places)); executor->Run(init_pdesc_, GetGlobalScope()); executor->Run(pdesc_, GetGlobalScope()); - std::vector> result = get_fetch_variable(); - - delete executor; + std::vector> result = GetFetchVariable(); } TEST_F(ExecutorTesterFeedAndFetch, GPU) { @@ -287,13 +277,12 @@ TEST_F(ExecutorTesterFeedAndFetch, GPU) { paddle::memory::Used(CPUPlace()); paddle::memory::Used(gpu_place); - Executor* executor = new Executor(places); + std::unique_ptr executor(new Executor(places)); - // 3 mini-batch - for (int i = 0; i < 3; i++) { - set_feed_variable(inputs_); + for (int batch_id = 0; batch_id < 3; batch_id++) { + SetFeedVariable(inputs_); executor->Run(pdesc_, GetGlobalScope()); - std::vector> result = get_fetch_variable(); + std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); for (size_t i = 0; i < result.size(); ++i) { PADDLE_ENFORCE_EQ(result[i].size(), inputs_[i].size()); @@ -302,6 +291,5 @@ TEST_F(ExecutorTesterFeedAndFetch, GPU) { } } } - delete executor; } #endif diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 2a0d9bbf33..c9e53a0d85 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/scope.h" + #include // for unique_ptr #include // for call_once #include "paddle/string/printf.h" diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index b9e43be966..dcd5f7fb77 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -31,6 +31,7 @@ class FeedOp : public framework::OperatorWithKernel { const FeedInputs& tensors = g_feed_variable->Get(); + PADDLE_ENFORCE_GT(tensors.size(), col); auto in_dim = tensors[col].dims(); ctx->SetOutputDim("Out", in_dim); // TODO(qijun): need to handle LodTensor later diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 7bde4953cd..5adb83144a 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -35,6 +35,7 @@ class FetchOp : public framework::OperatorWithKernel { } auto input_dim = ctx->GetInputDim("Input"); + PADDLE_ENFORCE_GT(tensors->size(), col); (*tensors)[col].Resize(input_dim); // TODO(qijun): need to handle LodTensor later diff --git a/paddle/platform/gpu_info.cc b/paddle/platform/gpu_info.cc index aa76bb209d..0cab5ffc56 100644 --- a/paddle/platform/gpu_info.cc +++ b/paddle/platform/gpu_info.cc @@ -44,7 +44,7 @@ int GetCurrentDeviceId() { void SetDeviceId(int id) { // TODO(qijun): find a better way to cache the cuda device count - PADDLE_ENFORCE(id < GetCUDADeviceCount(), "id must less than GPU count"); + PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(), "id must less than GPU count"); PADDLE_ENFORCE(cudaSetDevice(id), "cudaSetDevice failed in paddle::platform::SetDeviceId"); } From d2217ffa39f30ec719e93cd2cb6b9449467bcbc1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 16:03:39 -0700 Subject: [PATCH 290/342] Fix CI --- paddle/framework/block_desc.h | 1 + paddle/framework/program_desc.h | 1 + 2 files changed, 2 insertions(+) diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 2de270f60e..3437e89923 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include #include "paddle/framework/op_desc.h" diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index d684b08d16..f29b1c54e7 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include "paddle/framework/framework.pb.h" #include "paddle/platform/macros.h" From 4767fb6719694ee400d3a6c9344aa21edde8bd36 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 9 Oct 2017 16:05:14 -0700 Subject: [PATCH 291/342] gan api modified --- doc/design/gan_api.md | 67 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 53 insertions(+), 14 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 4fcff8b70a..77c867bac7 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -139,10 +139,10 @@ class DCGAN(object): - Define data readers as placeholders to hold the data; - Build generator and discriminators; - Define two training losses for discriminator and generator, respectively. +If we have execution dependency engine to back-trace all tensors, the module building our GAN model will be like this: ```python class DCGAN(object): def build_model(self): - # input data if self.y_dim: self.y = pd.data(pd.float32, [self.batch_size, self.y_dim]) self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) @@ -151,17 +151,17 @@ class DCGAN(object): # step 1: generate images by generator, classify real/fake images with discriminator if self.y_dim: # if conditional GAN, includes label - self.G = self.generator(self.z, self.y) - self.D_t = self.discriminator(self.images) - # generated fake images - self.sampled = self.sampler(self.z, self.y) - self.D_f = self.discriminator(self.images) + self.G = self.generator(self.z, self.y) + self.D_t = self.discriminator(self.images) + # generated fake images + self.sampled = self.sampler(self.z, self.y) + self.D_f = self.discriminator(self.G) else: # original version of GAN - self.G = self.generator(self.z) - self.D_t = self.discriminator(self.images) - # generate fake images - self.sampled = self.sampler(self.z) - self.D_f = self.discriminator(self.images) + self.G = self.generator(self.z) + self.D_t = self.discriminator(self.images) + # generate fake images + self.sampled = self.sampler(self.z) + self.D_f = self.discriminator(self.images) # step 2: define the two losses self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) @@ -171,6 +171,44 @@ class DCGAN(object): self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie)) ``` +If we do not have dependency engine but blocks, the module building our GAN model will be like this: +```python +class DCGAN(object): + def build_model(self, default_block): + # input data in the default block + if self.y_dim: + self.y = pd.data(pd.float32, [self.batch_size, self.y_dim]) + self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + # self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size]) + self.z = pd.data(tf.float32, [None, self.z_size]) + + # step 1: generate images by generator, classify real/fake images with discriminator + with pd.default_block().g_block(): + if self.y_dim: # if conditional GAN, includes label + self.G = self.generator(self.z, self.y) + self.D_g = self.discriminator(self.G, self.y) + else: # original version of GAN + self.G = self.generator(self.z) + self.D_g = self.discriminator(self.G, self.y) + self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_g, np.ones(self.batch_szie)) + + with pd.default_block().d_block(): + if self.y_dim: # if conditional GAN, includes label + self.D_t = self.discriminator(self.images, self.y) + self.D_f = self.discriminator(self.G, self.y) + else: # original version of GAN + self.D_t = self.discriminator(self.images) + self.D_f = self.discriminator(self.G) + + # step 2: define the two losses + self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size)) + self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size)) + self.d_loss = self.d_loss_real + self.d_loss_fake +``` +Some small confusion and problems with this design: +- D\_g and D\_f are actually the same thing, but has to be written twice; +- Requires ability to create a block anytime, rather than in if-else or rnn only; + ## Main function for the demo: Generally, the user of GAN just need to the following things: - Define an object as DCGAN class; @@ -183,9 +221,10 @@ import numpy as np import logging if __name__ == "__main__": - # dcgan - dcgan = DCGAN() - dcgan.build_model() + # dcgan class in the default graph/block + with pd.block() as def_block: + dcgan = DCGAN() + dcgan.build_model(def_block) # load mnist data data_X, data_y = self.load_mnist() From 35a5b9b99756188f2782ed19b4eaca57cb44ceea Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 9 Oct 2017 16:22:49 -0700 Subject: [PATCH 292/342] gan api --- doc/design/gan_api.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 77c867bac7..ed7622920b 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -222,6 +222,10 @@ import logging if __name__ == "__main__": # dcgan class in the default graph/block + # if we use dependency engine as tensorflow + # the codes, will be slightly different like: + # dcgan = DCGAN() + # dcgan.build_model() with pd.block() as def_block: dcgan = DCGAN() dcgan.build_model(def_block) @@ -230,8 +234,12 @@ if __name__ == "__main__": data_X, data_y = self.load_mnist() # Two subgraphs required!!! - d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.d_loss, dcgan.theta_D) - g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.g_loss, dcgan.theta_G) + with pd.block().d_block(): + d_optim = pd.train.Adam(lr = .001, beta= .1) + d_step = d_optim.minimize(dcgan.d_loss, dcgan.theta_D) + with pd.block.g_block(): + g_optim = pd.train.Adam(lr = .001, beta= .1) + g_step = pd.minimize(dcgan.g_loss, dcgan.theta_G) # executor sess = pd.executor() @@ -246,11 +254,11 @@ if __name__ == "__main__": batch_z = np.random.uniform(-1., 1., [batch_size, z_dim]) if batch_id % 2 == 0: - sess.run(d_optim, + sess.run(d_step, feed_dict = {dcgan.images: batch_im, dcgan.y: batch_label, dcgan.z: batch_z}) else: - sess.run(g_optim, + sess.run(g_step, feed_dict = {dcgan.z: batch_z}) ``` From 67c77b57fc8ad8eff7e9e46e842aa54b1e39047a Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 9 Oct 2017 16:47:05 -0700 Subject: [PATCH 293/342] gan api --- doc/design/gan_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index ed7622920b..689f359564 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -206,7 +206,7 @@ class DCGAN(object): self.d_loss = self.d_loss_real + self.d_loss_fake ``` Some small confusion and problems with this design: -- D\_g and D\_f are actually the same thing, but has to be written twice; +- D\_g and D\_f are actually the same thing, but has to be written twice; i.e., if we want to run two sub-graphs conceptually, the same codes have to be written twice if they are shared by the graph. - Requires ability to create a block anytime, rather than in if-else or rnn only; ## Main function for the demo: From 340d21d4ed7d8f0f2cc511b6480771965234570e Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 00:02:47 +0000 Subject: [PATCH 294/342] Init at block[0]; Run at block[1] --- paddle/framework/executor.cc | 16 ++++---- paddle/framework/executor.h | 4 +- paddle/framework/executor_test.cc | 63 +++++++++++++------------------ 3 files changed, 36 insertions(+), 47 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 3ac752388f..bbc7f77a94 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -56,13 +56,12 @@ Executor::~Executor() { } } -void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { +void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { // TODO(tonyyang-svail): - // - only runs the first block (i.e. no RNN support) // - only runs on the first device (i.e. no interdevice communication) // - will change to use multiple blocks for RNN op and Cond Op - PADDLE_ENFORCE_GT(pdesc.blocks_size(), 0); - auto& block = pdesc.blocks(0); + PADDLE_ENFORCE_GT(pdesc.blocks_size(), block_id); + auto& block = pdesc.blocks(block_id); auto& device = device_contexts_[0]; // Instantiate all the vars in the global scope @@ -72,7 +71,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { Scope& local_scope = scope->NewScope(); - std::vector should_run = Prune(pdesc); + std::vector should_run = Prune(pdesc, block_id); PADDLE_ENFORCE_EQ(should_run.size(), block.ops_size()); for (size_t i = 0; i < should_run.size(); ++i) { if (should_run[i]) { @@ -92,12 +91,11 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { // - Destroy local_scope } -std::vector Executor::Prune(const ProgramDesc& pdesc) { +std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { // TODO(tonyyang-svail): - // - only runs the first block // - will change to use multiple blocks for RNN op and Cond Op - auto& block = pdesc.blocks(0); + auto& block = pdesc.blocks(block_id); auto& ops = block.ops(); bool expect_feed = true; @@ -144,8 +142,10 @@ std::vector Executor::Prune(const ProgramDesc& pdesc) { } } + LOG(INFO) << "1 " << op_desc.type(); should_run.push_back(true); } else { + LOG(INFO) << "0 " << op_desc.type(); should_run.push_back(false); } } diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index f832b0d7d6..7fac4f4f46 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -34,7 +34,7 @@ class Executor { * ProgramDesc * Scope */ - void Run(const ProgramDesc&, Scope*); + void Run(const ProgramDesc&, Scope*, int); protected: /* @Brief @@ -46,7 +46,7 @@ class Executor { * @return * vector Same size as ops. Indicates whether an op should be run. */ - std::vector Prune(const ProgramDesc& pdesc); + std::vector Prune(const ProgramDesc& pdesc, int block_id); private: std::vector device_contexts_; diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index f28651e809..b64ba1c98f 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -104,50 +104,40 @@ class ExecutorTesterRandom : public ::testing::Test { virtual void SetUp() override { int input_dim = 5, batch_size = 2, embed_dim = 5; - // init pdesc - auto temp_init_root_block = init_pdesc_.add_blocks(); - temp_init_root_block->set_idx(0); - temp_init_root_block->set_parent_idx(-1); - - // wrap to BlockDescBind - paddle::framework::ProgramDescBind& init_program = - paddle::framework::ProgramDescBind::Instance(&init_pdesc_); - paddle::framework::BlockDescBind* init_root_block = init_program.Block(0); + auto temp_root_block = pdesc_.add_blocks(); + temp_root_block->set_idx(0); + temp_root_block->set_parent_idx(-1); + paddle::framework::ProgramDescBind& program = + paddle::framework::ProgramDescBind::Instance(&pdesc_); + paddle::framework::BlockDescBind* root_block = program.Block(0); + // block[0] AddOp("gaussian_random", {}, {{"Out", {"w1"}}}, - {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); + {{"dims", std::vector{input_dim, embed_dim}}}, root_block); AddOp("gaussian_random", {}, {{"Out", {"w2"}}}, - {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); + {{"dims", std::vector{embed_dim, input_dim}}}, root_block); AddOp("fetch", {{"Input", {"w1"}}}, {}, {{"dims", std::vector{input_dim, embed_dim}}, {"col", 0}}, - init_root_block); + root_block); AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"dims", std::vector{embed_dim, input_dim}}, {"col", 1}}, - init_root_block); - // flush - init_program.Proto(); - - // run pdesc - auto temp_root_block = pdesc_.add_blocks(); - temp_root_block->set_idx(0); - temp_root_block->set_parent_idx(-1); - - // wrap to BlockDescBind - paddle::framework::ProgramDescBind& program = - paddle::framework::ProgramDescBind::Instance(&pdesc_); - paddle::framework::BlockDescBind* root_block = program.Block(0); + root_block); + // block[1] + paddle::framework::BlockDescBind* run_block = + program.AppendBlock(*root_block); AddOp("gaussian_random", {}, {{"Out", {"a"}}}, - {{"dims", std::vector{batch_size, input_dim}}}, root_block); + {{"dims", std::vector{batch_size, input_dim}}}, run_block); AddOp("mul", {{"X", {"a"}}, {"Y", {"w1"}}}, {{"Out", {"b"}}}, {}, - root_block); + run_block); AddOp("mul", {{"X", {"b"}}, {"Y", {"w2"}}}, {{"Out", {"a_out"}}}, {}, - root_block); + run_block); AddOp("squared_l2_distance", {{"X", {"a"}}, {"Y", {"a_out"}}}, {{"Out", {"l2_distance"}}, {"sub_result", {"l2_distance_sub"}}}, {}, - root_block); + run_block); AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, - {{"dims", std::vector{batch_size}}, {"col", 1}}, root_block); + {{"dims", std::vector{batch_size}}, {"col", 1}}, run_block); + // flush program.Proto(); @@ -157,7 +147,6 @@ class ExecutorTesterRandom : public ::testing::Test { protected: ProgramDesc pdesc_; - ProgramDesc init_pdesc_; }; class ExecutorTesterFeedAndFetch : public ::testing::Test { @@ -211,8 +200,8 @@ TEST_F(ExecutorTesterRandom, CPU) { std::unique_ptr executor(new Executor(places)); - executor->Run(init_pdesc_, GetGlobalScope()); - executor->Run(pdesc_, GetGlobalScope()); + executor->Run(pdesc_, GetGlobalScope(), 0); + executor->Run(pdesc_, GetGlobalScope(), 1); std::vector> result = GetFetchVariable(); } @@ -231,7 +220,7 @@ TEST_F(ExecutorTesterFeedAndFetch, CPU) { for (int batch_id = 0; batch_id < 3; batch_id++) { SetFeedVariable(inputs_); - executor->Run(pdesc_, GetGlobalScope()); + executor->Run(pdesc_, GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); for (size_t i = 0; i < result.size(); ++i) { @@ -259,8 +248,8 @@ TEST_F(ExecutorTesterRandom, GPU) { std::unique_ptr executor(new Executor(places)); - executor->Run(init_pdesc_, GetGlobalScope()); - executor->Run(pdesc_, GetGlobalScope()); + executor->Run(pdesc_, GetGlobalScope(), 0); + executor->Run(pdesc_, GetGlobalScope(), 1); std::vector> result = GetFetchVariable(); } @@ -281,7 +270,7 @@ TEST_F(ExecutorTesterFeedAndFetch, GPU) { for (int batch_id = 0; batch_id < 3; batch_id++) { SetFeedVariable(inputs_); - executor->Run(pdesc_, GetGlobalScope()); + executor->Run(pdesc_, GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); for (size_t i = 0; i < result.size(); ++i) { From 63912dcc198729b5e29e9080da6d76e649fd9394 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Mon, 9 Oct 2017 17:09:04 -0700 Subject: [PATCH 295/342] gan design --- doc/design/gan_api.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 689f359564..1a7d0df116 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -262,3 +262,7 @@ if __name__ == "__main__": sess.run(g_step, feed_dict = {dcgan.z: batch_z}) ``` + +# More thinking about dependency engine v.s. block design: +- What if we just want to run an intermediate result? Do we need to run the whole block/graph? +- Should we call eval() to get the fake images in the first stage? And then train the discriminator in the second stage? From 6efacc14d857bd117d5918bf02afc9cca702bd78 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Mon, 9 Oct 2017 17:24:21 -0700 Subject: [PATCH 296/342] Implementing the fill constant op for the executor --- paddle/operators/fill_constant_op.cc | 68 +++++++++++++++++++ paddle/operators/fill_constant_op.cu | 22 ++++++ paddle/operators/fill_constant_op.h | 37 ++++++++++ .../framework/tests/test_fill_constant_op.py | 35 ++++++++++ 4 files changed, 162 insertions(+) create mode 100644 paddle/operators/fill_constant_op.cc create mode 100644 paddle/operators/fill_constant_op.cu create mode 100644 paddle/operators/fill_constant_op.h create mode 100644 python/paddle/v2/framework/tests/test_fill_constant_op.py diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc new file mode 100644 index 0000000000..65d03d5fa4 --- /dev/null +++ b/paddle/operators/fill_constant_op.cc @@ -0,0 +1,68 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/fill_constant_op.h" + +namespace paddle { +namespace operators { + +class FillConstantOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of FillConstantOp should not be null."); + auto &shape = ctx->Attrs().Get>("shape"); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto dims = framework::make_ddim(shape_int64); + ctx->SetOutputDim("Out", dims); + } + + framework::DataType IndicateDataType( + const framework::ExecutionContext &ctx) const override { + return static_cast(ctx.Attr("dataType")); + } +}; + +class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { + public: + FillConstantOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddAttr("dataType", + "(int, default 5 (FP32)) " + "Output data type") + .SetDefault(framework::DataType::FP32); + AddAttr>("shape", "(vector) The shape of the output"); + AddAttr("value", "(float, default 0) The value to be filled") + .SetDefault(0.0f); + AddOutput("Out", + "(Tensor) Tensor of specified shape will be filled " + "with the specified value"); + AddComment(R"DOC(Fill up a variable with specified constant value.)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(fill_constant, ops::FillConstantOp, + ops::FillConstantOpMaker); +REGISTER_OP_CPU_KERNEL( + fill_constant, + ops::FillConstantOpKernel); diff --git a/paddle/operators/fill_constant_op.cu b/paddle/operators/fill_constant_op.cu new file mode 100644 index 0000000000..eef8fcbd7f --- /dev/null +++ b/paddle/operators/fill_constant_op.cu @@ -0,0 +1,22 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/framework/op_registry.h" +#include "paddle/operators/fill_constant_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + fill_constant, + ops::FillConstantOpKernel); diff --git a/paddle/operators/fill_constant_op.h b/paddle/operators/fill_constant_op.h new file mode 100644 index 0000000000..53b8b548ec --- /dev/null +++ b/paddle/operators/fill_constant_op.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class FillConstantOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* out = ctx.Output("Out"); + out->mutable_data(ctx.GetPlace()); + auto value = ctx.Attr("value"); + + auto out_eigen = framework::EigenVector::Flatten(*out); + auto place = ctx.GetEigenDevice(); + out_eigen.device(place) = out_eigen.constant(static_cast(value)); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_fill_constant_op.py b/python/paddle/v2/framework/tests/test_fill_constant_op.py new file mode 100644 index 0000000000..dff7b615aa --- /dev/null +++ b/python/paddle/v2/framework/tests/test_fill_constant_op.py @@ -0,0 +1,35 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestFillConstantOp1(OpTest): + def setUp(self): + '''Test fill_constant op with specified value + ''' + self.op_type = "fill_constant" + + self.inputs = {} + self.attrs = {'shape': [123, 92], 'value': 3.8} + self.outputs = {'Out': np.full((123, 92), 3.8)} + + def test_check_output(self): + self.check_output() + + +class TestFillConstantOp2(OpTest): + def setUp(self): + '''Test fill_constant op with default value + ''' + self.op_type = "fill_constant" + + self.inputs = {} + self.attrs = {'shape': [123, 92]} + self.outputs = {'Out': np.full((123, 92), 0.0)} + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() From c876cacc7b7936f356b0c712dff8f32534eab2ae Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 10 Oct 2017 10:24:18 +0800 Subject: [PATCH 297/342] =?UTF-8?q?fix=20compiler=20error:=20=E2=80=98uniq?= =?UTF-8?q?ue=5Fptr=E2=80=99=20is=20not=20a=20member=20of=20=E2=80=98std?= =?UTF-8?q?=E2=80=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- paddle/framework/type_defs.h | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h index a5b9472213..6f65a942ba 100644 --- a/paddle/framework/type_defs.h +++ b/paddle/framework/type_defs.h @@ -15,6 +15,7 @@ #pragma once #include #include +#include #include "paddle/platform/variant.h" namespace paddle { From d350cdbc973a81d3a1e1b3cd90aaaddf2bdfac2f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 19:59:38 -0700 Subject: [PATCH 298/342] Polish CompileTime InferShape 1. InferShape should be a method for `OpDescBind` not `Operator`, since `OpDescBind` is a compile-time concept. 2. Pre-create operators and store them into a map. Make InferShape faster --- paddle/framework/op_desc.cc | 35 +++++++++++++++++++ paddle/framework/op_desc.h | 2 ++ paddle/pybind/protobuf.cc | 3 +- paddle/pybind/pybind.cc | 15 -------- .../v2/framework/tests/test_infer_shape.py | 6 ++-- 5 files changed, 42 insertions(+), 19 deletions(-) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 02aa74a842..6ce453cc99 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -13,7 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/op_desc.h" +#include +#include #include "paddle/framework/block_desc.h" +#include "paddle/framework/operator.h" namespace paddle { namespace framework { @@ -184,5 +187,37 @@ void OpDescBind::Sync() { need_update_ = false; } } + +using InferShapeFuncMap = + std::unordered_map>; + +static InferShapeFuncMap &InferShapeFuncs() { + static InferShapeFuncMap *g_map = nullptr; + if (g_map == nullptr) { + g_map = new InferShapeFuncMap(); + auto &info_map = OpInfoMap::Instance(); + // all registered kernels + for (auto &pair : OperatorWithKernel::AllOpKernels()) { + auto &info = info_map.Get(pair.first); + auto op = + static_cast(info.Creator()("", {}, {}, {})); + g_map->insert( + {pair.first, [op](InferShapeContext *ctx) { op->InferShape(ctx); }}); + } + } + return *g_map; +} + +void OpDescBind::InferShape(const BlockDescBind &block) const { + auto &funcs = InferShapeFuncs(); + auto it = funcs.find(this->Type()); + if (it == funcs.end()) { + PADDLE_THROW("Operator %s has not been registered", this->Type()); + } + CompileTimeInferShapeContext ctx(*this, block); + it->second(&ctx); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h index d0c314771c..81c4225041 100644 --- a/paddle/framework/op_desc.h +++ b/paddle/framework/op_desc.h @@ -100,6 +100,8 @@ class OpDescBind { return &this->attrs_; } + void InferShape(const BlockDescBind &block) const; + private: template static std::vector MapKeys(const MapType &map) { diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 47bd7bc3bb..6333cc332e 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -196,7 +196,8 @@ void BindOpDesc(py::module &m) { .def("set_attr", &OpDescBind::SetAttr) .def("attr", &OpDescBind::GetAttr) .def("set_block_attr", &OpDescBind::SetBlockAttr) - .def("get_block_attr", &OpDescBind::GetBlockAttr); + .def("get_block_attr", &OpDescBind::GetBlockAttr) + .def("infer_shape", &OpDescBind::InferShape); } } // namespace pybind diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 356c4986e2..0f6e3101e2 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -231,21 +231,6 @@ All parameter, weight, gradient are variables in Paddle. desc.InitializationErrorString()); return OpRegistry::CreateOp(desc); }) - .def_static("infer_shape", - [](OpDescBind &op_desc, BlockDescBind &block) { - auto op = OpRegistry::CreateOp(*op_desc.Proto()); - auto *op_with_kernel = - dynamic_cast(op.get()); - if (op_with_kernel != nullptr) { - auto ctx = CompileTimeInferShapeContext(op_desc, block); - op_with_kernel->InferShape(&ctx); - } else { - PADDLE_THROW( - "OP(%s) is not type of OperatorWithKernel, " - "should not call this function", - op_desc.Type()); - } - }) .def("backward", [](const OperatorBase &forwardOp, const std::unordered_set &no_grad_vars) { diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py index b38ec9c037..99562890fd 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -1,6 +1,6 @@ import unittest + import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator class TestInferShape(unittest.TestCase): @@ -26,7 +26,7 @@ class TestInferShape(unittest.TestCase): sum_op_desc.set_input("X", ["x1", "x2"]) sum_op_desc.set_output("Out", ["out"]) - core.Operator.infer_shape(sum_op_desc, block) + sum_op_desc.infer_shape(block) self.assertEqual(out.shape(), shape) def test_mul_op(self): @@ -55,7 +55,7 @@ class TestInferShape(unittest.TestCase): mul_op_desc.set_attr("x_num_col_dims", 1) mul_op_desc.set_attr("y_num_col_dims", 1) - core.Operator.infer_shape(mul_op_desc, block) + mul_op_desc.infer_shape(block) self.assertEqual(out.shape(), [x_shape[0], y_shape[1]]) From 932402c16b1ad41851a307e2fcb432e674609071 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 02:59:49 +0000 Subject: [PATCH 299/342] debug for sum --- paddle/framework/backward.cc | 1 + paddle/framework/executor.cc | 13 +++++- paddle/framework/executor_test.cc | 69 +++++++++++++++++++++---------- paddle/operators/feed_op.cc | 2 +- paddle/operators/fetch_op.cc | 2 +- 5 files changed, 62 insertions(+), 25 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0a4688db9c..9a5c4e9cf0 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -378,6 +378,7 @@ std::vector> MakeBlockBackward( backward_descs[dup_op[i]]->Rename(out_name, new_name); sum_op_inputs.emplace_back(new_name); } + LOG(INFO) << "fuck " << sum_op_inputs.size(); std::unique_ptr sum_op(new OpDescBind( "sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, {})); pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index bbc7f77a94..ee6243a9bf 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -74,7 +74,8 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { std::vector should_run = Prune(pdesc, block_id); PADDLE_ENFORCE_EQ(should_run.size(), block.ops_size()); for (size_t i = 0; i < should_run.size(); ++i) { - if (should_run[i]) { + // if (should_run[i]) { + if (true) { for (auto& var : block.ops(i).outputs()) { for (auto& argu : var.arguments()) { if (local_scope.FindVar(argu) == nullptr) { @@ -82,7 +83,17 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { } } } + LOG(INFO) << block.ops(i).type(); + if (block.ops(i).type() == "sum") { + LOG(INFO) << "Here"; + for (auto& var : block.ops(i).inputs()) { + for (auto& argu : var.arguments()) { + LOG(INFO) << var.parameter() << " " << argu; + } + } + } auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); + LOG(INFO) << op->DebugString(); op->Run(local_scope, *device); } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index b64ba1c98f..12be79d01b 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -30,6 +30,7 @@ USE_OP(gaussian_random); USE_OP(feed); USE_OP(fetch); USE_OP(mul); +USE_OP(sum); USE_OP(squared_l2_distance); using std::string; @@ -104,40 +105,63 @@ class ExecutorTesterRandom : public ::testing::Test { virtual void SetUp() override { int input_dim = 5, batch_size = 2, embed_dim = 5; - auto temp_root_block = pdesc_.add_blocks(); - temp_root_block->set_idx(0); - temp_root_block->set_parent_idx(-1); - paddle::framework::ProgramDescBind& program = - paddle::framework::ProgramDescBind::Instance(&pdesc_); - paddle::framework::BlockDescBind* root_block = program.Block(0); + auto temp_init_root_block = init_pdesc_.add_blocks(); + temp_init_root_block->set_idx(0); + temp_init_root_block->set_parent_idx(-1); + paddle::framework::ProgramDescBind& init_program = + paddle::framework::ProgramDescBind::Instance(&init_pdesc_); + paddle::framework::BlockDescBind* init_root_block = init_program.Block(0); - // block[0] AddOp("gaussian_random", {}, {{"Out", {"w1"}}}, - {{"dims", std::vector{input_dim, embed_dim}}}, root_block); + {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); AddOp("gaussian_random", {}, {{"Out", {"w2"}}}, - {{"dims", std::vector{embed_dim, input_dim}}}, root_block); + {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); AddOp("fetch", {{"Input", {"w1"}}}, {}, {{"dims", std::vector{input_dim, embed_dim}}, {"col", 0}}, - root_block); + init_root_block); AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"dims", std::vector{embed_dim, input_dim}}, {"col", 1}}, - root_block); + init_root_block); + + // flush + init_program.Proto(); + + auto temp_root_block = pdesc_.add_blocks(); + temp_root_block->set_idx(0); + temp_root_block->set_parent_idx(-1); + paddle::framework::ProgramDescBind& program = + paddle::framework::ProgramDescBind::Instance(&pdesc_); + paddle::framework::BlockDescBind* root_block = program.Block(0); - // block[1] - paddle::framework::BlockDescBind* run_block = - program.AppendBlock(*root_block); AddOp("gaussian_random", {}, {{"Out", {"a"}}}, - {{"dims", std::vector{batch_size, input_dim}}}, run_block); + {{"dims", std::vector{batch_size, input_dim}}}, root_block); AddOp("mul", {{"X", {"a"}}, {"Y", {"w1"}}}, {{"Out", {"b"}}}, {}, - run_block); + root_block); AddOp("mul", {{"X", {"b"}}, {"Y", {"w2"}}}, {{"Out", {"a_out"}}}, {}, - run_block); + root_block); AddOp("squared_l2_distance", {{"X", {"a"}}, {"Y", {"a_out"}}}, {{"Out", {"l2_distance"}}, {"sub_result", {"l2_distance_sub"}}}, {}, - run_block); - AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, - {{"dims", std::vector{batch_size}}, {"col", 1}}, run_block); + root_block); + AddOp("gaussian_random", {}, {{"Out", {"l2_distance@GRAD"}}}, + {{"dims", std::vector{batch_size, 1}}}, root_block); + AppendBackward(program, {}); + + program.Proto(); + + for (auto& op : pdesc_.blocks(0).ops()) { + if (op.type() == "sum") { + LOG(INFO) << "Here"; + for (auto& var : op.inputs()) { + for (auto& argu : var.arguments()) { + LOG(INFO) << var.parameter() << " " << argu; + } + } + } + } + + AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, + {{"dims", std::vector{batch_size}}, {"col", 1}}, root_block); // flush program.Proto(); @@ -146,6 +170,7 @@ class ExecutorTesterRandom : public ::testing::Test { } protected: + ProgramDesc init_pdesc_; ProgramDesc pdesc_; }; @@ -200,8 +225,8 @@ TEST_F(ExecutorTesterRandom, CPU) { std::unique_ptr executor(new Executor(places)); + executor->Run(init_pdesc_, GetGlobalScope(), 0); executor->Run(pdesc_, GetGlobalScope(), 0); - executor->Run(pdesc_, GetGlobalScope(), 1); std::vector> result = GetFetchVariable(); } @@ -248,8 +273,8 @@ TEST_F(ExecutorTesterRandom, GPU) { std::unique_ptr executor(new Executor(places)); + executor->Run(init_pdesc_, GetGlobalScope(), 0); executor->Run(pdesc_, GetGlobalScope(), 0); - executor->Run(pdesc_, GetGlobalScope(), 1); std::vector> result = GetFetchVariable(); } diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index dcd5f7fb77..b15bc86ae1 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -22,7 +22,7 @@ class FeedOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { typedef std::vector FeedInputs; PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null."); int col = ctx->Attrs().Get("col"); diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 5adb83144a..7ca3762c36 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -22,7 +22,7 @@ class FetchOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { typedef std::vector FetchOutputs; PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null."); int col = ctx->Attrs().Get("col"); From 6c6474cbd8514011b1c63d3439d49bd4700e46c8 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 10 Oct 2017 10:32:19 +0800 Subject: [PATCH 300/342] follow coments --- paddle/operators/CMakeLists.txt | 15 +++---- paddle/operators/math/pooling.h | 23 ++++++----- paddle/operators/pool_with_index_op.cc | 57 +++++++++++++++----------- 3 files changed, 54 insertions(+), 41 deletions(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 39af318ca5..31ae4b2cc1 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -55,12 +55,20 @@ function(op_library TARGET) set(pybind_flag 1) endif() + # pool_op contains several operators if ("${TARGET}" STREQUAL "pool_op") set(pybind_flag 1) # It's enough to just adding one operator to pybind file(APPEND ${pybind_file} "USE_OP(pool2d);\n") endif() + # pool_with_index_op contains several operators + if ("${TARGET}" STREQUAL "pool_with_index_op") + set(pybind_flag 1) + # It's enough to just adding one operator to pybind + file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n") + endif() + # activation_op contains several operators if ("${TARGET}" STREQUAL "activation_op") set(pybind_flag 1) @@ -75,13 +83,6 @@ function(op_library TARGET) file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n") endif() - # pool_with_index_op contains several operators - if ("${TARGET}" STREQUAL "pool_with_index_op") - set(pybind_flag 1) - # It's enough to just adding one operator to pybind - file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n") - endif() - # pybind USE_NO_KERNEL_OP file(READ ${TARGET}.cc TARGET_CONTENT) string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}") diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h index f15ddca69a..c50c57b5c5 100644 --- a/paddle/operators/math/pooling.h +++ b/paddle/operators/math/pooling.h @@ -24,15 +24,16 @@ namespace math { #define FLT_MAX \ __FLT_MAX__ // It might need to be placed in another file, but I'm still - // wondering where to put it + // wondering where to put it. /* * \brief Extracting simple operations from pooling. - * Both MaxPool and AvgPool need initial, compute and finalize operation. + * Both MaxPool and AvgPool need "initial", "compute" and "finalize" + * operation. * MaxPool initializes temp variable to the negative maximum to find the * maximum value in the pooling field. * AvgPool initializes temp variable to the zero to accumulate all values - * in pool pooling, and takes the average. + * in pool pooling, and finally takes the average. * MaxPoolGrad and AvgPoolGrad are gradient operations respectively. */ template @@ -72,17 +73,17 @@ class AvgPoolGrad { /* * \brief Getting pooling results, and calculating gradient. * - * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in - * NCDHW format. + * In pool2d, all tensors are in NCHW format. Where N is batch size, C is the + * number of channels, H and W is the height and width of feature. + * In pool3d, all tensors are in NCDHW format. Where N is batch size, C is the + * number of channels, D, H and W is the depth, height and width of feature. * * In max pooling, it is possible that the pooling region has multiple maximum - * elements. - * In this case, we should compute the gradient of the first maximum element. + * elements. In this case, we should compute the gradient of the first maximum + * element. * This is different from average pooling. So we rewrite the max_pool_grad: * MaxPool2dGradFunctor, MaxPool3dGradFunctor. - * */ - template class Pool2dFunctor { public: @@ -146,10 +147,9 @@ class MaxPool3dGradFunctor { /* * \brief Getting max pooling results and corresponding max index, and * calculating gradient. - * In sub-sampling-pooling, it is necessary to know max element index. + * In up-sampling-pooling, it is necessary to know max element index. * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in * NCDHW format. - * */ template class MaxPool2dWithIndexFunctor { @@ -188,6 +188,7 @@ class MaxPool3dWithIndexGradFunctor { const framework::Tensor& mask, std::vector& ksize, std::vector& strides, std::vector& paddings); }; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index 2e6a5f2555..ab933a3400 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -34,7 +34,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Out(Output) of Pooling should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Mask"), - "Out(Output) of Pooling should not be null."); + "Mask(Output) of Pooling should not be null."); auto in_x_dims = ctx->GetInputDim("X"); @@ -52,13 +52,11 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { } PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U, - "Pooling intput size and pooling size should be consistent"); - PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3, - "Pooling size size should be 2 elements. or 3 elements."); + "Intput size and pooling size should be consistent."); PADDLE_ENFORCE_EQ(ksize.size(), strides.size(), - "strides size and pooling size should be the same."); + "Strides size and pooling size should be the same."); PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(), - "paddings size and pooling size should be the same."); + "Paddings size and pooling size should be the same."); std::vector output_shape({in_x_dims[0], in_x_dims[1]}); for (size_t i = 0; i < ksize.size(); ++i) { @@ -76,11 +74,9 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContextBase *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "X(Input) of Pooling should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput(framework::GradVarName("X")), - "X@GRAD(Input@GRAD) of MaxPoolWithIndexOpGrad should not be null."); + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), + "Input(X@GRAD) should not be null."); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } }; @@ -110,9 +106,10 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", - "Pooling size(height, width) of pooling operator." + "The pooling size(height, width) of pooling operator." "If globalPooling = true, ksize is ignored and need not be " - "specified."); // TODO(Add checker) + "specified."); // TODO(Chengduo): Add checker. (Currently, + // TypedAttrChecker don't support vector type.) AddAttr( "globalPooling", "Whether to use the globalPooling." @@ -123,15 +120,21 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>("strides", "Strides(height, width) of pooling operator." "Default {1,1}.") - .SetDefault({1, 1}); // TODO(Add checker) + .SetDefault({1, 1}); // TODO(Chengduo): Add checker. (Currently, + // TypedAttrChecker don't support vector type.) AddAttr>("paddings", "Paddings(height, width) of pooling operator." "Default {0,0}.") - .SetDefault({0, 0}); // TODO(Add checker) + .SetDefault({0, 0}); // TODO(Chengduo): Add checker. (Currently, + // TypedAttrChecker don't support vector type.) AddComment(R"DOC( -The maxPooling2d with index operation calculates the output and the mask based on -the input and ksize, strides, paddings parameters. +The maxPooling2d with index operation calculates the output and the mask +based on the input and ksize, strides, paddings parameters. Input(X) and +output(Out, Mask) are in NCHW format. Where N is batch size, C is the +number of channels, H and W is the height and width of feature. +Parameters(ksize, strides, paddings) are two elements. +These two elements represent height and width, respectively. )DOC"); } }; @@ -162,9 +165,10 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>( "ksize", - "Pooling size(depth, height, width) of pooling operator." + "The pooling size(depth, height, width) of pooling operator." "If globalPooling = true, ksize is ignored and need not be " - "specified."); // TODO(Add checker) + "specified."); // TODO(Chengduo): Add checker. (Currently, + // TypedAttrChecker don't support vector type.) AddAttr( "globalPooling", "Whether to use the globalPooling." @@ -176,19 +180,26 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { "strides", "Strides(depth, height, width) of pooling operator." "Default {1,1,1}.") - .SetDefault({1, 1, 1}); // TODO(Add checker) + .SetDefault({1, 1, 1}); // TODO(Chengduo): Add checker. (Currently, + // TypedAttrChecker don't support vector type.) AddAttr>( "paddings", "Paddings(depth, height, width) of pooling operator." "Default {0,0,0}.") - .SetDefault({0, 0, 0}); // TODO(Add checker) + .SetDefault({0, 0, 0}); // TODO(Chengduo): Add checker. (Currently, + // TypedAttrChecker don't support vector type.) AddComment(R"DOC( -The maxpooling3d with index operation calculates the output and the mask based on -the input and ksize, strides, paddings parameters. +The maxpooling3d with index operation calculates the output and the mask +based on the input and ksize, strides, paddings parameters. +Input(X) and output(Out, Mask) are in NCDHW format. Where N is batch +size, C is the number of channels, D, H and W is the depth, height and +width of feature. Parameters(ksize, strides, paddings) are three elements. +These three elements represent depth, height and width, respectively. )DOC"); } }; + } // namespace operators } // namespace paddle From 15400748ae6d21facb0b8e656b4298e1ae83df89 Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 9 Oct 2017 20:42:29 -0700 Subject: [PATCH 301/342] follow comments and refine codes --- paddle/framework/backward.cc | 2 +- paddle/framework/executor_test.cc | 44 +++++++++++++++---------------- paddle/operators/feed_op.cc | 6 ++--- paddle/operators/feed_op.h | 4 +-- paddle/operators/fetch_op.cc | 4 +-- paddle/operators/fetch_op.h | 4 +-- 6 files changed, 31 insertions(+), 33 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 9a5c4e9cf0..774d8e4918 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -378,7 +378,7 @@ std::vector> MakeBlockBackward( backward_descs[dup_op[i]]->Rename(out_name, new_name); sum_op_inputs.emplace_back(new_name); } - LOG(INFO) << "fuck " << sum_op_inputs.size(); + LOG(INFO) << "sum_op_inputs size " << sum_op_inputs.size(); std::unique_ptr sum_op(new OpDescBind( "sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, {})); pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 12be79d01b..0515fb2216 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -60,15 +60,13 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, op->SetAttrMap(attrs); } -std::once_flag set_variable_flag; - // Tensors in feed value variable will only be in CPUPlace -// So we can memcpy the data from vector to feed_value +// So we can memcpy the data from vector to feed_value template void SetFeedVariable(const std::vector>& inputs) { - typedef std::vector FeedInputs; Variable* g_feed_value = GetGlobalScope()->FindVar("feed_value"); - FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); + auto& feed_inputs = + *(g_feed_value->GetMutable>()); size_t size = inputs.size(); feed_inputs.resize(size); for (size_t i = 0; i < size; i++) { @@ -82,9 +80,9 @@ void SetFeedVariable(const std::vector>& inputs) { // So we can memcpy the data from fetch_value to vector template std::vector> GetFetchVariable() { - typedef std::vector FetchOutputs; Variable* g_fetch_value = GetGlobalScope()->FindVar("fetch_value"); - FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); + auto& fetch_outputs = + *(g_fetch_value->GetMutable>()); size_t size = fetch_outputs.size(); std::vector> result; @@ -143,22 +141,22 @@ class ExecutorTesterRandom : public ::testing::Test { {{"Out", {"l2_distance"}}, {"sub_result", {"l2_distance_sub"}}}, {}, root_block); - AddOp("gaussian_random", {}, {{"Out", {"l2_distance@GRAD"}}}, - {{"dims", std::vector{batch_size, 1}}}, root_block); - AppendBackward(program, {}); - - program.Proto(); - - for (auto& op : pdesc_.blocks(0).ops()) { - if (op.type() == "sum") { - LOG(INFO) << "Here"; - for (auto& var : op.inputs()) { - for (auto& argu : var.arguments()) { - LOG(INFO) << var.parameter() << " " << argu; - } - } - } - } + // AddOp("gaussian_random", {}, {{"Out", {"l2_distance@GRAD"}}}, + // {{"dims", std::vector{batch_size, 1}}}, root_block); + // AppendBackward(program, {}); + + // program.Proto(); + + // for (auto& op : pdesc_.blocks(0).ops()) { + // if (op.type() == "sum") { + // LOG(INFO) << "Here"; + // for (auto& var : op.inputs()) { + // for (auto& argu : var.arguments()) { + // LOG(INFO) << var.parameter() << " " << argu; + // } + // } + // } + // } AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, {{"dims", std::vector{batch_size}}, {"col", 1}}, root_block); diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index b15bc86ae1..29e128ce7e 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -23,15 +23,15 @@ class FeedOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - typedef std::vector FeedInputs; PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null."); int col = ctx->Attrs().Get("col"); framework::Variable* g_feed_variable = framework::GetGlobalScope()->FindVar("feed_value"); - const FeedInputs& tensors = g_feed_variable->Get(); + const auto& tensors = + g_feed_variable->Get>(); - PADDLE_ENFORCE_GT(tensors.size(), col); + PADDLE_ENFORCE_GT(tensors.size(), static_cast(col)); auto in_dim = tensors[col].dims(); ctx->SetOutputDim("Out", in_dim); // TODO(qijun): need to handle LodTensor later diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h index de8ec6ff61..96e3bf52bd 100644 --- a/paddle/operators/feed_op.h +++ b/paddle/operators/feed_op.h @@ -23,13 +23,13 @@ template class FeedKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - typedef std::vector FeedInputs; framework::Tensor* out = ctx.Output("Out"); out->mutable_data(ctx.GetPlace()); framework::Variable* g_feed_variable = framework::GetGlobalScope()->FindVar("feed_value"); int col = ctx.template Attr("col"); - const FeedInputs& tensors = g_feed_variable->Get(); + const auto& tensors = + g_feed_variable->Get>(); out->CopyFrom(tensors[col], ctx.GetPlace()); } }; diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 7ca3762c36..77e3450a73 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -23,13 +23,13 @@ class FetchOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - typedef std::vector FetchOutputs; PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null."); int col = ctx->Attrs().Get("col"); framework::Variable* g_fetch_variable = framework::GetGlobalScope()->FindVar("fetch_value"); - FetchOutputs* tensors = g_fetch_variable->GetMutable(); + auto* tensors = + g_fetch_variable->GetMutable>(); if (tensors->size() < static_cast(col + 1)) { tensors->resize(col + 1); } diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h index 3bec9c9974..fd98552055 100644 --- a/paddle/operators/fetch_op.h +++ b/paddle/operators/fetch_op.h @@ -23,12 +23,12 @@ template class FetchKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - typedef std::vector FetchOutputs; const framework::Tensor* input = ctx.Input("Input"); int col = ctx.template Attr("col"); framework::Variable* g_fetch_variable = framework::GetGlobalScope()->FindVar("fetch_value"); - FetchOutputs* tensors = g_fetch_variable->GetMutable(); + auto* tensors = + g_fetch_variable->GetMutable>(); (*tensors)[col].mutable_data(platform::CPUPlace()); (*tensors)[col].CopyFrom(*input, platform::CPUPlace()); } From 0e1f21a57050df474a4bbf47f538ddf1b4ef4f61 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 9 Oct 2017 20:43:29 -0700 Subject: [PATCH 302/342] Fix bug --- paddle/framework/op_desc.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 02aa74a842..c2e796b7c1 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -25,6 +25,7 @@ OpDescBind::OpDescBind(const std::string &type, const VariableNameMap &inputs, inputs_ = inputs; outputs_ = outputs; attrs_ = attrs; + need_update_ = true; } OpDesc *OpDescBind::Proto() { From cb2ef7d97de9b011f2dc9de060e0a641794ccbe1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 20:46:44 -0700 Subject: [PATCH 303/342] Add comment --- paddle/framework/op_desc.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 6ce453cc99..c4dcb2210d 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -200,6 +200,7 @@ static InferShapeFuncMap &InferShapeFuncs() { // all registered kernels for (auto &pair : OperatorWithKernel::AllOpKernels()) { auto &info = info_map.Get(pair.first); + // use empty type here to avoid runtime checks. auto op = static_cast(info.Creator()("", {}, {}, {})); g_map->insert( From 32cb74be3ebea9c9c59602576f45086934308789 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 9 Oct 2017 21:06:01 -0700 Subject: [PATCH 304/342] Removed unreached code --- paddle/framework/data_type.h | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/framework/data_type.h b/paddle/framework/data_type.h index 55e3931f87..649899d425 100644 --- a/paddle/framework/data_type.h +++ b/paddle/framework/data_type.h @@ -28,7 +28,6 @@ inline DataType ToDataType(std::type_index type) { return DataType::INT32; } else { PADDLE_THROW("Not supported"); - return static_cast(-1); } } From e3161bb61a4686d96588bc1eb86c3edc0e26e6ee Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 04:49:45 +0000 Subject: [PATCH 305/342] pass simple backward --- paddle/framework/executor_test.cc | 51 ++++++++++++++++++------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 0515fb2216..9f8a6f8593 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -32,6 +32,8 @@ USE_OP(fetch); USE_OP(mul); USE_OP(sum); USE_OP(squared_l2_distance); +USE_OP(fill_constant); +USE_OP(sgd); using std::string; using namespace paddle::platform; @@ -124,6 +126,7 @@ class ExecutorTesterRandom : public ::testing::Test { // flush init_program.Proto(); + // run block auto temp_root_block = pdesc_.add_blocks(); temp_root_block->set_idx(0); temp_root_block->set_parent_idx(-1); @@ -131,6 +134,7 @@ class ExecutorTesterRandom : public ::testing::Test { paddle::framework::ProgramDescBind::Instance(&pdesc_); paddle::framework::BlockDescBind* root_block = program.Block(0); + // forward AddOp("gaussian_random", {}, {{"Out", {"a"}}}, {{"dims", std::vector{batch_size, input_dim}}}, root_block); AddOp("mul", {{"X", {"a"}}, {"Y", {"w1"}}}, {{"Out", {"b"}}}, {}, @@ -141,30 +145,33 @@ class ExecutorTesterRandom : public ::testing::Test { {{"Out", {"l2_distance"}}, {"sub_result", {"l2_distance_sub"}}}, {}, root_block); - // AddOp("gaussian_random", {}, {{"Out", {"l2_distance@GRAD"}}}, - // {{"dims", std::vector{batch_size, 1}}}, root_block); - // AppendBackward(program, {}); - - // program.Proto(); - - // for (auto& op : pdesc_.blocks(0).ops()) { - // if (op.type() == "sum") { - // LOG(INFO) << "Here"; - // for (auto& var : op.inputs()) { - // for (auto& argu : var.arguments()) { - // LOG(INFO) << var.parameter() << " " << argu; - // } - // } - // } - // } - - AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, - {{"dims", std::vector{batch_size}}, {"col", 1}}, root_block); + // backward + AddOp("fill_constant", {}, {{"Out", {"l2_distance@GRAD"}}}, + {{"shape", std::vector{batch_size, 1}}, {"value", float(1.0)}}, + root_block); + AppendBackward(program, {}); + + // update + AddOp("fill_constant", {}, {{"Out", {"learning_rate"}}}, + {{"shape", std::vector{1}}, {"value", float(1.0)}}, root_block); + AddOp("sgd", {{"Param", {"w1"}}, + {"LearningRate", {"learning_rate"}}, + {"Grad", {"w1@GRAD"}}}, + {{"ParamOut", {"w1"}}}, {}, root_block); + AddOp("sgd", {{"Param", {"w2"}}, + {"LearningRate", {"learning_rate"}}, + {"Grad", {"w2@GRAD"}}}, + {{"ParamOut", {"w2"}}}, {}, root_block); + + AddOp("fetch", {{"Input", {"w1"}}}, {}, + {{"dims", std::vector{input_dim, embed_dim}}, {"col", 0}}, + root_block); + AddOp("fetch", {{"Input", {"w2"}}}, {}, + {{"dims", std::vector{embed_dim, input_dim}}, {"col", 1}}, + root_block); + // flush program.Proto(); - - // TODO(tonyyang-svail): - // - Test with Backward } protected: From 2fc7fc7a18fb8cbb78d380caf51947097138597c Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 05:33:11 +0000 Subject: [PATCH 306/342] pass multiple forward backward --- paddle/framework/executor_test.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 9f8a6f8593..259205f7c1 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -279,8 +279,10 @@ TEST_F(ExecutorTesterRandom, GPU) { std::unique_ptr executor(new Executor(places)); executor->Run(init_pdesc_, GetGlobalScope(), 0); - executor->Run(pdesc_, GetGlobalScope(), 0); - std::vector> result = GetFetchVariable(); + for (int batch_id = 0; batch_id < 3; batch_id++) { + executor->Run(pdesc_, GetGlobalScope(), 0); + std::vector> result = GetFetchVariable(); + } } TEST_F(ExecutorTesterFeedAndFetch, GPU) { From e21e5646a574b9e2fa299bacb3a8ee85472e84b5 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 10 Oct 2017 13:55:27 +0800 Subject: [PATCH 307/342] fix atomicAdd -> CudaAtomicAdd --- paddle/operators/math/pooling.cu | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 06263737a9..4d50121de4 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -144,7 +144,7 @@ __global__ void KernelMaxPool2DGrad( if (maxIndex != -1) { // atomic add - atomicAdd(input_grad + maxIndex, output_grad[index]); + platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]); } } } @@ -278,9 +278,7 @@ class MaxPool2dGradFunctor { }; template class MaxPool2dGradFunctor; -// template class MaxPool2dGradFunctor; // The -// 64-bit floating-point version of atomicAdd() is only supported by devices of -// compute capability 6.x and higher. +template class MaxPool2dGradFunctor; template class Pool2dFunctor, float>; @@ -453,7 +451,7 @@ __global__ void KernelMaxPool3DGrad( } if (maxIdx != -1) { // atomic add - atomicAdd(input_grad + maxIdx, output_grad[index]); + platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]); } } } @@ -609,9 +607,7 @@ class MaxPool3dGradFunctor { }; template class MaxPool3dGradFunctor; -// template class MaxPool3dGradFunctor; // The -// 64-bit floating-point version of atomicAdd() is only supported by devices of -// compute capability 6.x and higher. +template class MaxPool3dGradFunctor; template class Pool3dFunctor, float>; From 975a51294e20c122e7143a232261d4fd49ac5643 Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 9 Oct 2017 23:55:35 -0700 Subject: [PATCH 308/342] infer feed operator output variable shape with dims attribute --- paddle/operators/feed_op.cc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index 29e128ce7e..1d65c2bb46 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -32,8 +32,12 @@ class FeedOp : public framework::OperatorWithKernel { g_feed_variable->Get>(); PADDLE_ENFORCE_GT(tensors.size(), static_cast(col)); - auto in_dim = tensors[col].dims(); - ctx->SetOutputDim("Out", in_dim); + + auto& shape = ctx->Attrs().Get>("dims"); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + ctx->SetOutputDim("Out", framework::make_ddim(shape_int64)); // TODO(qijun): need to handle LodTensor later } From 871a3f6e76f57432d64b0410f49277a6e4f7d477 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 10 Oct 2017 15:18:02 +0800 Subject: [PATCH 309/342] remove unused PADDLE_ONLY_CPU comment --- paddle/math/tests/test_GpuProfiler.cpp | 2 +- paddle/memory/detail/buddy_allocator.cc | 2 +- paddle/memory/detail/system_allocator.cc | 2 +- paddle/memory/detail/system_allocator.h | 2 +- paddle/memory/detail/system_allocator_test.cc | 2 +- paddle/memory/memcpy.cc | 2 +- paddle/memory/memcpy.h | 2 +- paddle/memory/memory.cc | 2 +- paddle/memory/memory_test.cc | 2 +- paddle/platform/device_context.cc | 2 +- paddle/platform/enforce.h | 2 +- paddle/platform/gpu_info.h | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp index 9402bd3ec4..d9f146f0d1 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/math/tests/test_GpuProfiler.cpp @@ -162,4 +162,4 @@ int main(int argc, char** argv) { return RUN_ALL_TESTS(); } -#endif /* PADDLE_ONLY_CPU */ +#endif diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc index fdc5ed19dc..e212f7737a 100644 --- a/paddle/memory/detail/buddy_allocator.cc +++ b/paddle/memory/detail/buddy_allocator.cc @@ -182,7 +182,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { max_chunk_size_ = platform::GpuMaxChunkSize(); } } -#endif // PADDLE_ONLY_CPU +#endif // Allocate a new maximum sized block size_t index = 0; diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index 6c9a46dd09..33166d9ce2 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -134,7 +134,7 @@ void GPUAllocator::Free(void* p, size_t size, size_t index) { bool GPUAllocator::UseGpu() const { return true; } -#endif // PADDLE_ONLY_CPU +#endif } // namespace detail } // namespace memory diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h index ee9b012f91..552cab4f96 100644 --- a/paddle/memory/detail/system_allocator.h +++ b/paddle/memory/detail/system_allocator.h @@ -51,7 +51,7 @@ class GPUAllocator : public SystemAllocator { size_t gpu_alloc_size_ = 0; size_t fallback_alloc_size_ = 0; }; -#endif // PADDLE_ONLY_CPU +#endif } // namespace detail } // namespace memory diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc index cd563844e7..6a8558937b 100644 --- a/paddle/memory/detail/system_allocator_test.cc +++ b/paddle/memory/detail/system_allocator_test.cc @@ -62,4 +62,4 @@ TEST(GPUAllocator, Alloc) { TestAllocator(a, 2048); TestAllocator(a, 0); } -#endif // PADDLE_ONLY_CPU +#endif diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc index 790420a8ab..1df88a6da9 100644 --- a/paddle/memory/memcpy.cc +++ b/paddle/memory/memcpy.cc @@ -89,7 +89,7 @@ void Copy(platform::GPUPlace dst_place, platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToDevice); } -#endif // PADDLE_ONLY_CPU +#endif } // namespace memory } // namespace paddle diff --git a/paddle/memory/memcpy.h b/paddle/memory/memcpy.h index 0bccee58c3..9b36182c2b 100644 --- a/paddle/memory/memcpy.h +++ b/paddle/memory/memcpy.h @@ -53,7 +53,7 @@ template void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num, cudaStream_t stream); -#endif // PADDLE_ONLY_CPU +#endif } // namespace memory } // namespace paddle diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 30ce8a82e1..5087c02385 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -111,7 +111,7 @@ size_t Used(platform::GPUPlace place) { return GetGPUBuddyAllocator(place.device)->Used(); } -#endif // PADDLE_ONLY_CPU +#endif } // namespace memory } // namespace paddle diff --git a/paddle/memory/memory_test.cc b/paddle/memory/memory_test.cc index 0d402038a0..2444931e26 100644 --- a/paddle/memory/memory_test.cc +++ b/paddle/memory/memory_test.cc @@ -135,4 +135,4 @@ TEST(BuddyAllocator, GPUMultAlloc) { } } -#endif // PADDLE_ONLY_CPU +#endif diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index a9b6b79903..36450e9268 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -136,7 +136,7 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; } cudaStream_t CUDADeviceContext::stream() const { return stream_; } -#endif // PADDLE_ONLY_CPU +#endif } // namespace platform } // namespace paddle diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 15d8446cd8..cd906c3fa9 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -41,7 +41,7 @@ limitations under the License. */ #include #include -#endif // PADDLE_ONLY_CPU +#endif namespace paddle { namespace platform { diff --git a/paddle/platform/gpu_info.h b/paddle/platform/gpu_info.h index fb33db07bd..37665b97d7 100644 --- a/paddle/platform/gpu_info.h +++ b/paddle/platform/gpu_info.h @@ -63,4 +63,4 @@ void GpuMemcpyPeer(void *dst, int dst_device, const void *src, int src_device, } // namespace platform } // namespace paddle -#endif // PADDLE_ONLY_CPU +#endif From 36da82550af759fcfcaec571921851dd04bc4a3b Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 10 Oct 2017 16:28:24 +0800 Subject: [PATCH 310/342] Add code comments --- paddle/operators/math/pooling.cc | 54 ++++++++++++++++++++++++++++++-- paddle/operators/math/pooling.cu | 50 +++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc index 5accde8b07..50cfb88bb5 100644 --- a/paddle/operators/math/pooling.cc +++ b/paddle/operators/math/pooling.cc @@ -18,6 +18,11 @@ namespace paddle { namespace operators { namespace math { +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class Pool2dFunctor { public: @@ -73,6 +78,11 @@ class Pool2dFunctor { } }; +/* +* All tensors are in NCHW format. +* Ksize, strides, paddings are two elements. These two elements represent height +* and width, respectively. +*/ template class Pool2dGradFunctor { public: @@ -135,6 +145,11 @@ class Pool2dGradFunctor { } }; +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class MaxPool2dGradFunctor { public: @@ -197,7 +212,7 @@ class MaxPool2dGradFunctor { }; template class MaxPool2dGradFunctor; -// template class MaxPool2dGradFunctor; +template class MaxPool2dGradFunctor; template class Pool2dFunctor, float>; @@ -216,6 +231,11 @@ template class Pool2dGradFunctor< template class Pool2dGradFunctor< platform::CPUPlace, paddle::operators::math::AvgPoolGrad, double>; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class Pool3dFunctor { public: @@ -286,6 +306,11 @@ class Pool3dFunctor { } }; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class Pool3dGradFunctor { public: @@ -364,6 +389,11 @@ class Pool3dGradFunctor { } }; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class MaxPool3dGradFunctor { public: @@ -440,7 +470,7 @@ class MaxPool3dGradFunctor { }; template class MaxPool3dGradFunctor; -// template class MaxPool3dGradFunctor; +template class MaxPool3dGradFunctor; template class Pool3dFunctor, float>; @@ -459,6 +489,11 @@ template class Pool3dGradFunctor< template class Pool3dGradFunctor< platform::CPUPlace, paddle::operators::math::AvgPoolGrad, double>; +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class MaxPool2dWithIndexFunctor { public: @@ -519,6 +554,11 @@ class MaxPool2dWithIndexFunctor { } }; +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class MaxPool2dWithIndexGradFunctor { public: @@ -563,6 +603,11 @@ template class MaxPool2dWithIndexGradFunctor; template class MaxPool2dWithIndexFunctor; template class MaxPool2dWithIndexGradFunctor; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class MaxPool3dWithIndexFunctor { public: @@ -637,6 +682,11 @@ class MaxPool3dWithIndexFunctor { } }; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class MaxPool3dWithIndexGradFunctor { public: diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu index 4d50121de4..736327f4b7 100644 --- a/paddle/operators/math/pooling.cu +++ b/paddle/operators/math/pooling.cu @@ -149,6 +149,11 @@ __global__ void KernelMaxPool2DGrad( } } +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class Pool2dFunctor { public: @@ -190,6 +195,11 @@ class Pool2dFunctor { } }; +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class Pool2dGradFunctor { public: @@ -234,6 +244,11 @@ class Pool2dGradFunctor { } }; +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class MaxPool2dGradFunctor { public: @@ -456,6 +471,11 @@ __global__ void KernelMaxPool3DGrad( } } +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class Pool3dFunctor { public: @@ -504,6 +524,11 @@ class Pool3dFunctor { } }; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class Pool3dGradFunctor { public: @@ -556,6 +581,11 @@ class Pool3dGradFunctor { } }; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class MaxPool3dGradFunctor { public: @@ -709,6 +739,11 @@ __global__ void KernelMaxPool2DWithIdxGrad( } } +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class MaxPool2dWithIndexFunctor { public: @@ -750,6 +785,11 @@ class MaxPool2dWithIndexFunctor { } }; +/* + * All tensors are in NCHW format. + * Ksize, strides, paddings are two elements. These two elements represent + * height and width, respectively. + */ template class MaxPool2dWithIndexGradFunctor { public: @@ -903,6 +943,11 @@ __global__ void KernelMaxPool3DWithIdxGrad( } } +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class MaxPool3dWithIndexFunctor { public: @@ -951,6 +996,11 @@ class MaxPool3dWithIndexFunctor { } }; +/* + * All tensors are in NCDHW format. + * Ksize, strides, paddings are three elements. These three elements represent + * depth, height and width, respectively. + */ template class MaxPool3dWithIndexGradFunctor { public: From f2e7cf21415fbdc0ae2f34b88b6cf307b37966f0 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 10 Oct 2017 22:08:33 +0800 Subject: [PATCH 311/342] fix InferShapeContextBase to InferShapeContext --- paddle/operators/pool_with_index_op.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc index ab933a3400..7b6afcfd1f 100644 --- a/paddle/operators/pool_with_index_op.cc +++ b/paddle/operators/pool_with_index_op.cc @@ -28,7 +28,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) of Pooling should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), @@ -73,7 +73,7 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase *ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "Input(X@GRAD) should not be null."); From a308ff29af714be50e321c65fdcd88729a505ebe Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 10 Oct 2017 10:25:01 -0700 Subject: [PATCH 312/342] make infershape of feedop and fetchop compatible with compile-time design --- paddle/framework/executor_test.cc | 22 ++++++---------------- paddle/operators/feed_op.cc | 13 ++----------- paddle/operators/feed_op.h | 3 ++- paddle/operators/fetch_op.cc | 20 ++------------------ paddle/operators/fetch_op.h | 8 +++++++- 5 files changed, 19 insertions(+), 47 deletions(-) diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 259205f7c1..0710eb5779 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -116,12 +116,8 @@ class ExecutorTesterRandom : public ::testing::Test { {{"dims", std::vector{input_dim, embed_dim}}}, init_root_block); AddOp("gaussian_random", {}, {{"Out", {"w2"}}}, {{"dims", std::vector{embed_dim, input_dim}}}, init_root_block); - AddOp("fetch", {{"Input", {"w1"}}}, {}, - {{"dims", std::vector{input_dim, embed_dim}}, {"col", 0}}, - init_root_block); - AddOp("fetch", {{"Input", {"w2"}}}, {}, - {{"dims", std::vector{embed_dim, input_dim}}, {"col", 1}}, - init_root_block); + AddOp("fetch", {{"Input", {"w1"}}}, {}, {{"col", 0}}, init_root_block); + AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"col", 1}}, init_root_block); // flush init_program.Proto(); @@ -163,12 +159,8 @@ class ExecutorTesterRandom : public ::testing::Test { {"Grad", {"w2@GRAD"}}}, {{"ParamOut", {"w2"}}}, {}, root_block); - AddOp("fetch", {{"Input", {"w1"}}}, {}, - {{"dims", std::vector{input_dim, embed_dim}}, {"col", 0}}, - root_block); - AddOp("fetch", {{"Input", {"w2"}}}, {}, - {{"dims", std::vector{embed_dim, input_dim}}, {"col", 1}}, - root_block); + AddOp("fetch", {{"Input", {"w1"}}}, {}, {{"col", 0}}, root_block); + AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"col", 1}}, root_block); // flush program.Proto(); @@ -197,10 +189,8 @@ class ExecutorTesterFeedAndFetch : public ::testing::Test { root_block); AddOp("feed", {}, {{"Out", {"b"}}}, {{"dims", dim}, {"col", 1}}, root_block); - AddOp("fetch", {{"Input", {"a"}}}, {}, {{"dims", dim}, {"col", 0}}, - root_block); - AddOp("fetch", {{"Input", {"b"}}}, {}, {{"dims", dim}, {"col", 1}}, - root_block); + AddOp("fetch", {{"Input", {"a"}}}, {}, {{"col", 0}}, root_block); + AddOp("fetch", {{"Input", {"b"}}}, {}, {{"col", 1}}, root_block); // flush program.Proto(); diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index 1d65c2bb46..fa325bb282 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -24,15 +24,6 @@ class FeedOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output should be not null."); - int col = ctx->Attrs().Get("col"); - framework::Variable* g_feed_variable = - framework::GetGlobalScope()->FindVar("feed_value"); - - const auto& tensors = - g_feed_variable->Get>(); - - PADDLE_ENFORCE_GT(tensors.size(), static_cast(col)); - auto& shape = ctx->Attrs().Get>("dims"); std::vector shape_int64(shape.size(), 0); std::transform(shape.begin(), shape.end(), shape_int64.begin(), @@ -43,7 +34,7 @@ class FeedOp : public framework::OperatorWithKernel { framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return static_cast(Attr("data_type")); + return static_cast(Attr("dataType")); } }; @@ -51,7 +42,7 @@ class FeedOpMaker : public framework::OpProtoAndCheckerMaker { public: FeedOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("data_type", "output data type") + AddAttr("dataType", "output data type") .SetDefault(framework::DataType::FP32); AddAttr("col", "The col in global feed variable").SetDefault(0); AddAttr>("dims", "The dimension of feed tensor."); diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h index 96e3bf52bd..47344e309c 100644 --- a/paddle/operators/feed_op.h +++ b/paddle/operators/feed_op.h @@ -27,9 +27,10 @@ class FeedKernel : public framework::OpKernel { out->mutable_data(ctx.GetPlace()); framework::Variable* g_feed_variable = framework::GetGlobalScope()->FindVar("feed_value"); - int col = ctx.template Attr("col"); const auto& tensors = g_feed_variable->Get>(); + int col = ctx.template Attr("col"); + PADDLE_ENFORCE_GT(tensors.size(), static_cast(col)); out->CopyFrom(tensors[col], ctx.GetPlace()); } }; diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index 77e3450a73..90737c8c55 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -24,26 +24,11 @@ class FetchOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input should be not null."); - int col = ctx->Attrs().Get("col"); - framework::Variable* g_fetch_variable = - framework::GetGlobalScope()->FindVar("fetch_value"); - - auto* tensors = - g_fetch_variable->GetMutable>(); - if (tensors->size() < static_cast(col + 1)) { - tensors->resize(col + 1); - } - - auto input_dim = ctx->GetInputDim("Input"); - PADDLE_ENFORCE_GT(tensors->size(), col); - (*tensors)[col].Resize(input_dim); - - // TODO(qijun): need to handle LodTensor later } framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return static_cast(Attr("data_type")); + return static_cast(Attr("dataType")); } }; @@ -51,10 +36,9 @@ class FetchOpMaker : public framework::OpProtoAndCheckerMaker { public: FetchOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddAttr("data_type", "output data type") + AddAttr("dataType", "output data type") .SetDefault(framework::DataType::FP32); AddAttr("col", "The col in global fetch variable").SetDefault(0); - AddAttr>("dims", "The dimension of fetch tensor."); AddInput("Input", "The output of fetch op."); AddComment(R"DOC(Fetch data to global fetch variable)DOC"); } diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h index fd98552055..6fee8b0589 100644 --- a/paddle/operators/fetch_op.h +++ b/paddle/operators/fetch_op.h @@ -24,13 +24,19 @@ class FetchKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor* input = ctx.Input("Input"); - int col = ctx.template Attr("col"); framework::Variable* g_fetch_variable = framework::GetGlobalScope()->FindVar("fetch_value"); auto* tensors = g_fetch_variable->GetMutable>(); + int col = ctx.template Attr("col"); + if (tensors->size() < static_cast(col + 1)) { + tensors->resize(col + 1); + } + PADDLE_ENFORCE_GT(tensors->size(), static_cast(col)); + (*tensors)[col].Resize(input->dims()); (*tensors)[col].mutable_data(platform::CPUPlace()); (*tensors)[col].CopyFrom(*input, platform::CPUPlace()); + // TODO(qijun): need to handle LodTensor later } }; From a281b38393597e9c6342d365b3e0b7371194b97e Mon Sep 17 00:00:00 2001 From: Markus Kliegl Date: Tue, 10 Oct 2017 10:53:02 -0700 Subject: [PATCH 313/342] Conv Shift Operator (#4591) * conv_shift_op: initial implementation using Eigen Limitations: - both gradient outputs must be specified and are always computed - explicit for loops => could be optimized in various ways (e.g., different memory layout) * conv shift - gradient fixes fix case when not all output gradients desired * conv shift: minor cleanup * conv shift - more minor cleanup * conv shift: clean up & initial GPU implementation * fix rebase issue --- paddle/operators/conv_shift_op.cc | 206 ++++++++++++++++++ paddle/operators/conv_shift_op.cu | 194 +++++++++++++++++ paddle/operators/conv_shift_op.h | 33 +++ .../v2/framework/tests/test_conv_shift_op.py | 47 ++++ 4 files changed, 480 insertions(+) create mode 100644 paddle/operators/conv_shift_op.cc create mode 100644 paddle/operators/conv_shift_op.cu create mode 100644 paddle/operators/conv_shift_op.h create mode 100644 python/paddle/v2/framework/tests/test_conv_shift_op.py diff --git a/paddle/operators/conv_shift_op.cc b/paddle/operators/conv_shift_op.cc new file mode 100644 index 0000000000..e1e321ed5f --- /dev/null +++ b/paddle/operators/conv_shift_op.cc @@ -0,0 +1,206 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/conv_shift_op.h" +#include "paddle/framework/eigen.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; +template +using EigenMatrix = framework::EigenMatrix; + +class ConvShiftOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should be not null."); + + auto x_dims = ctx->GetInputDim("X"); + auto y_dims = ctx->GetInputDim("Y"); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); + PADDLE_ENFORCE_EQ(y_dims.size(), 2, "Input(Y)'s rank should be 2."); + PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], + "The 1st dimension of Input(X) and Input(Y) should " + "be equal."); + PADDLE_ENFORCE_EQ(y_dims[1] % 2, 1, + "The 2nd dimension of Input(Y) should be odd."); + PADDLE_ENFORCE_LE(y_dims[1], x_dims[1], + "The 2nd dimension of Input(Y) should be less than or " + "equal to the 2nd dimension of Input(X)."); + ctx->SetOutputDim("Out", x_dims); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class ConvShiftGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should be not null."); + + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + auto x_dims = ctx->GetInputDim("X"); + ctx->SetOutputDim(x_grad_name, x_dims); + } + + auto y_grad_name = framework::GradVarName("Y"); + if (ctx->HasOutput(y_grad_name)) { + auto y_dims = ctx->GetInputDim("Y"); + ctx->SetOutputDim(y_grad_name, y_dims); + } + } +}; + +class ConvShiftOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ConvShiftOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", + "(Tensor, default Tensor), a 2-D tensor with shape B x M, " + "where B is the batch size and M is the data dimension."); + AddInput("Y", + "(Tensor, default Tensor), a 2-D tensor with shape B x N, " + "where B is the batch size and N is the data dimension. N must " + "be odd."); + AddOutput("Out", + "(Tensor, default Tensor), a 2-D tensor with shape B x M, " + "i.e., the same shape as X."); + AddComment(R"DOC( +ConvShift Operator. + +A layer for circular convolution of two vectors, +as used in the Neural Turing Machine: https://arxiv.org/abs/1410.5401 + +The equation is: + + \f[ + Out[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} X_{i+j} * Y_{j} + \f] + +where X's index is computed modulo M, and b's index is computed modulo N. + +Both of the input `X` and `Y` can carry LoD (Level of Details) information. +However, the output only shares the LoD information with input `X`. +)DOC"); + } +}; + +template +class ConvShiftKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *X = context.Input("X"); + auto *Y = context.Input("Y"); + auto *Out = context.Output("Out"); + Out->mutable_data(context.GetPlace()); + + auto x = EigenMatrix::From(*X); + auto y = EigenMatrix::From(*Y); + auto out = EigenMatrix::From(*Out); + out.setZero(); + + size_t batch_size = X->dims()[0]; + size_t x_width = X->dims()[1]; + size_t y_width = Y->dims()[1]; + size_t y_half_width = (y_width - 1) / 2; + + for (size_t k = 0; k < batch_size; ++k) { + for (size_t i = 0; i < x_width; ++i) { + for (size_t j = 0; j < y_width; ++j) { + int index = (i + j - y_half_width + x_width) % x_width; + out(k, i) += x(k, index) * y(k, j); + } + } + } + } +}; + +template +class ConvShiftGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *X = context.Input("X"); + auto *Y = context.Input("Y"); + auto *dOut = context.Input(framework::GradVarName("Out")); + auto *dX = context.Output(framework::GradVarName("X")); + auto *dY = context.Output(framework::GradVarName("Y")); + + auto x = EigenMatrix::From(*X); + auto y = EigenMatrix::From(*Y); + auto dout = EigenMatrix::From(*dOut); + + auto x_dims = X->dims(); + auto y_dims = Y->dims(); + size_t batch_size = x_dims[0]; + size_t x_width = x_dims[1]; + size_t y_width = y_dims[1]; + size_t y_half_width = (y_width - 1) / 2; + + // The below trades code duplication for efficiency (keeping the if + // statement outside of the loop). + if (dX) { + dX->mutable_data(context.GetPlace()); + auto dx = EigenMatrix::From(*dX); + dx.setZero(); + for (size_t k = 0; k < batch_size; ++k) { + for (size_t i = 0; i < x_width; ++i) { + for (size_t j = 0; j < y_width; ++j) { + int index = (i + j - y_half_width + x_width) % x_width; + dx(k, index) += dout(k, i) * y(k, j); + } + } + } + } + + if (dY) { + dY->mutable_data(context.GetPlace()); + auto dy = EigenMatrix::From(*dY); + dy.setZero(); + for (size_t k = 0; k < batch_size; ++k) { + for (size_t i = 0; i < x_width; ++i) { + for (size_t j = 0; j < y_width; ++j) { + int index = (i + j - y_half_width + x_width) % x_width; + dy(k, j) += x(k, index) * dout(k, i); + } + } + } + } + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(conv_shift, ops::ConvShiftOp, ops::ConvShiftOpMaker, + conv_shift_grad, ops::ConvShiftGradOp); +REGISTER_OP_CPU_KERNEL(conv_shift, + ops::ConvShiftKernel); +REGISTER_OP_CPU_KERNEL( + conv_shift_grad, + ops::ConvShiftGradKernel); diff --git a/paddle/operators/conv_shift_op.cu b/paddle/operators/conv_shift_op.cu new file mode 100644 index 0000000000..145e966fe9 --- /dev/null +++ b/paddle/operators/conv_shift_op.cu @@ -0,0 +1,194 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/conv_shift_op.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +namespace { + +inline int div_up(int x, int y) { return (x + y - 1) / y; } + +// Some notes on the design: +// +// Each thread is responsible for computing a single output out[k, i]. +// Thread blocks are based on tiles of x with height 1 in the batch dimension. +// +// This design is based on the typical use case where the filter +// y is fairly small. For large y, it would probably be more efficient +// to also tile across y. +template +__global__ void conv_shift_forward(const T *x, const T *y, T *out, int x_width, + int y_width, int y_half_width, + int batch_size) { + extern __shared__ T mem[]; + + int tx = threadIdx.x; + int i = blockIdx.x * blockDim.x + tx; // global x index + int k = blockIdx.y; // batch index + + // Check if we are in a boundary block with fewer x's to process than + // blockDim.x. + int num_x = + (blockIdx.x == gridDim.x - 1) ? (x_width % blockDim.x) : blockDim.x; + + T *sx = mem; + T *sx_pad = &mem[num_x]; + T *sy = &mem[blockDim.x + y_width]; + + // Collaboratively load y[k, :] and length-y padding of x into shared memory. + int pad_start = blockIdx.x * blockDim.x + num_x + x_width - y_half_width; + for (int j = tx; j < y_width; j += blockDim.x) { + sy[j] = y[k * y_width + j]; + sx_pad[j] = x[k * x_width + (pad_start + j) % x_width]; + } + + // Load a cyclically shifted slice of x into shared memory. + if (tx < num_x) { + int load_i = (i - y_half_width + x_width) % x_width; + sx[tx] = x[k * x_width + load_i]; + } else { + return; + } + __syncthreads(); + + // Compute dot product of sx[tx:tx + y_width] and sy. + T sum = 0; + for (int j = 0; j < y_width; ++j) { + sum += sx[tx + j] * sy[j]; + } + + // Save to out[k, i]. + out[k * x_width + i] = sum; +} + +// Compute x gradient - initial naive implementation with atomic add. +template +__global__ void conv_shift_dx(const T *dout, const T *y, T *dx, int x_width, + int y_width, int y_half_width, int batch_size) { + int i = blockIdx.x * blockDim.x + threadIdx.x; // x index + int j = blockIdx.y; // y index + int k = blockIdx.z; // batch index + + if (i < x_width) { + int index = (i + j - y_half_width + x_width) % x_width; + atomicAdd(&dx[k * x_width + index], + dout[k * x_width + i] * y[k * y_width + j]); + } +} + +// Compute y gradient - initial naive implementation with atomic add. +template +__global__ void conv_shift_dy(const T *x, const T *dout, T *dy, int x_width, + int y_width, int y_half_width, int batch_size) { + int i = blockIdx.x * blockDim.x + threadIdx.x; // x index + int j = blockIdx.y; // y index + int k = blockIdx.z; // batch index + + if (i < x_width) { + int index = (i + j - y_half_width + x_width) % x_width; + atomicAdd(&dy[k * y_width + j], + x[k * x_width + index] * dout[k * x_width + i]); + } +} +} // namespace + +template +class ConvShiftKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + const Tensor *X = context.Input("X"); + const Tensor *Y = context.Input("Y"); + Tensor *Out = context.Output("Out"); + const T *x_data = X->data(); + const T *y_data = Y->data(); + T *out_data = Out->mutable_data(context.GetPlace()); + + int batch_size = X->dims()[0]; + int x_width = X->dims()[1]; + int y_width = Y->dims()[1]; + int y_half_width = (y_width - 1) / 2; + + const int x_per_block = 256; + int num_x_blocks = div_up(x_width, x_per_block); + int mem_per_block = (x_per_block + 2 * y_width) * sizeof(T); + + dim3 grid_dim(num_x_blocks, batch_size); + + auto stream = reinterpret_cast( + context.device_context()) + .stream(); + + conv_shift_forward<<>>( + x_data, y_data, out_data, x_width, y_width, y_half_width, batch_size); + } +}; + +template +class ConvShiftGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + const Tensor *X = context.Input("X"); + const Tensor *Y = context.Input("Y"); + const Tensor *dOut = context.Input(framework::GradVarName("Out")); + const T *x_data = X->data(); + const T *y_data = Y->data(); + const T *dout_data = dOut->data(); + + Tensor *dX = context.Output(framework::GradVarName("X")); + Tensor *dY = context.Output(framework::GradVarName("Y")); + + int batch_size = X->dims()[0]; + int x_width = X->dims()[1]; + int y_width = Y->dims()[1]; + int y_half_width = (y_width - 1) / 2; + + auto stream = reinterpret_cast( + context.device_context()) + .stream(); + + const int x_per_block = 256; + int num_x_blocks = div_up(x_width, x_per_block); + dim3 grid_dim(num_x_blocks, y_width, batch_size); + + if (dX) { + T *dx_data = dX->mutable_data(context.GetPlace()); + cudaMemsetAsync(dx_data, 0, dX->numel() * sizeof(T), stream); + conv_shift_dx<<>>( + dout_data, y_data, dx_data, x_width, y_width, y_half_width, + batch_size); + } + if (dY) { + T *dy_data = dY->mutable_data(context.GetPlace()); + cudaMemsetAsync(dy_data, 0, dY->numel() * sizeof(T), stream); + conv_shift_dy<<>>( + x_data, dout_data, dy_data, x_width, y_width, y_half_width, + batch_size); + } + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(conv_shift, + ops::ConvShiftKernel); +REGISTER_OP_GPU_KERNEL( + conv_shift_grad, + ops::ConvShiftGradKernel); diff --git a/paddle/operators/conv_shift_op.h b/paddle/operators/conv_shift_op.h new file mode 100644 index 0000000000..5a160b0f16 --- /dev/null +++ b/paddle/operators/conv_shift_op.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class ConvShiftKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override; +}; + +template +class ConvShiftGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override; +}; +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_conv_shift_op.py b/python/paddle/v2/framework/tests/test_conv_shift_op.py new file mode 100644 index 0000000000..b9ab21a06a --- /dev/null +++ b/python/paddle/v2/framework/tests/test_conv_shift_op.py @@ -0,0 +1,47 @@ +import unittest +import numpy as np +from op_test import OpTest + + +def conv_shift_forward(x, y): + out = np.zeros_like(x) + M = x.shape[1] + N = y.shape[1] + y_half_width = (N - 1) / 2 + for i in xrange(M): + for j in xrange(N): + out[:, i] += x[:, (i + j + M - y_half_width) % M] * y[:, j] + return out + + +class TestConvShiftOp(OpTest): + def setUp(self): + self.op_type = "conv_shift" + + batch_size = 4 + x_dim = 17 + y_dim = 3 # must be odd and <= x_dim + x = np.random.random((batch_size, x_dim)).astype("float32") + y = np.random.random((batch_size, y_dim)).astype("float32") + self.inputs = {'X': x, 'Y': y} + + out = conv_shift_forward(x, y) + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05) + + def test_check_grad_ignore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X")) + + def test_check_grad_ignore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) + + +if __name__ == '__main__': + unittest.main() From 805639b16c5eb8af6f689a3f5a311d389a88df07 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 11 Oct 2017 01:59:48 +0800 Subject: [PATCH 314/342] Fix compile error in linux --- paddle/framework/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 3e0e0f5903..1bf80b3e58 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -19,7 +19,7 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope) proto_library(framework_proto SRCS framework.proto) cc_library(attribute SRCS attribute.cc DEPS framework_proto) -cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute) +cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute ddim) cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) From 3f9e247a7358ae7824c3ce63a7231b54b31944a3 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 18:53:54 +0000 Subject: [PATCH 315/342] set variable support dim --- paddle/framework/executor.cc | 3 +-- paddle/framework/executor_test.cc | 30 ++++++++++++++++++++++-------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index ee6243a9bf..f4cc37cfa6 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -74,8 +74,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { std::vector should_run = Prune(pdesc, block_id); PADDLE_ENFORCE_EQ(should_run.size(), block.ops_size()); for (size_t i = 0; i < should_run.size(); ++i) { - // if (should_run[i]) { - if (true) { + if (should_run[i]) { for (auto& var : block.ops(i).outputs()) { for (auto& argu : var.arguments()) { if (local_scope.FindVar(argu) == nullptr) { diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 0710eb5779..ce8b599e0e 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -65,15 +65,15 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, // Tensors in feed value variable will only be in CPUPlace // So we can memcpy the data from vector to feed_value template -void SetFeedVariable(const std::vector>& inputs) { +void SetFeedVariable(const std::vector>& inputs, + const std::vector>& dims) { Variable* g_feed_value = GetGlobalScope()->FindVar("feed_value"); auto& feed_inputs = *(g_feed_value->GetMutable>()); size_t size = inputs.size(); feed_inputs.resize(size); for (size_t i = 0; i < size; i++) { - T* dst = feed_inputs[i].mutable_data( - make_ddim({static_cast(inputs[i].size())}), CPUPlace()); + T* dst = feed_inputs[i].mutable_data(make_ddim(dims[i]), CPUPlace()); memcpy(dst, inputs[i].data(), inputs[i].size() * sizeof(T)); } } @@ -103,7 +103,7 @@ std::vector> GetFetchVariable() { class ExecutorTesterRandom : public ::testing::Test { public: virtual void SetUp() override { - int input_dim = 5, batch_size = 2, embed_dim = 5; + int input_dim = 3, batch_size = 2, embed_dim = 5; auto temp_init_root_block = init_pdesc_.add_blocks(); temp_init_root_block->set_idx(0); @@ -130,9 +130,16 @@ class ExecutorTesterRandom : public ::testing::Test { paddle::framework::ProgramDescBind::Instance(&pdesc_); paddle::framework::BlockDescBind* root_block = program.Block(0); + // feed data + inputs_.push_back({1.0, 2.0, 3.0, 4.0, 5.0, 6.0}); + dims_.push_back({batch_size, input_dim}); + AddOp("feed", {}, {{"Out", {"a"}}}, + {{"dims", std::vector{batch_size, input_dim}}, {"col", 0}}, + root_block); + // forward - AddOp("gaussian_random", {}, {{"Out", {"a"}}}, - {{"dims", std::vector{batch_size, input_dim}}}, root_block); + // AddOp("gaussian_random", {}, {{"Out", {"a"}}}, + // {{"dims", std::vector{batch_size, input_dim}}}, root_block); AddOp("mul", {{"X", {"a"}}, {"Y", {"w1"}}}, {{"Out", {"b"}}}, {}, root_block); AddOp("mul", {{"X", {"b"}}, {"Y", {"w2"}}}, {{"Out", {"a_out"}}}, {}, @@ -161,6 +168,7 @@ class ExecutorTesterRandom : public ::testing::Test { AddOp("fetch", {{"Input", {"w1"}}}, {}, {{"col", 0}}, root_block); AddOp("fetch", {{"Input", {"w2"}}}, {}, {{"col", 1}}, root_block); + AddOp("fetch", {{"Input", {"l2_distance"}}}, {}, {{"col", 0}}, root_block); // flush program.Proto(); @@ -169,6 +177,8 @@ class ExecutorTesterRandom : public ::testing::Test { protected: ProgramDesc init_pdesc_; ProgramDesc pdesc_; + std::vector> inputs_; + std::vector> dims_; }; class ExecutorTesterFeedAndFetch : public ::testing::Test { @@ -199,11 +209,14 @@ class ExecutorTesterFeedAndFetch : public ::testing::Test { std::vector vec2 = {4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; inputs_.push_back(vec1); inputs_.push_back(vec2); + dims_.push_back({static_cast(vec1.size())}); + dims_.push_back({static_cast(vec2.size())}); } protected: ProgramDesc pdesc_; std::vector> inputs_; + std::vector> dims_; }; #ifndef PADDLE_WITH_CUDA @@ -239,7 +252,7 @@ TEST_F(ExecutorTesterFeedAndFetch, CPU) { std::unique_ptr executor(new Executor(places)); for (int batch_id = 0; batch_id < 3; batch_id++) { - SetFeedVariable(inputs_); + SetFeedVariable(inputs_, dims_); executor->Run(pdesc_, GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); @@ -270,6 +283,7 @@ TEST_F(ExecutorTesterRandom, GPU) { executor->Run(init_pdesc_, GetGlobalScope(), 0); for (int batch_id = 0; batch_id < 3; batch_id++) { + SetFeedVariable(inputs_, dims_); executor->Run(pdesc_, GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); } @@ -291,7 +305,7 @@ TEST_F(ExecutorTesterFeedAndFetch, GPU) { std::unique_ptr executor(new Executor(places)); for (int batch_id = 0; batch_id < 3; batch_id++) { - SetFeedVariable(inputs_); + SetFeedVariable(inputs_, dims_); executor->Run(pdesc_, GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); From 293a7d1e75d14a744852523383bdbef1663887be Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 18:55:16 +0000 Subject: [PATCH 316/342] add feed infershape todo --- paddle/operators/feed_op.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h index 47344e309c..e406d22209 100644 --- a/paddle/operators/feed_op.h +++ b/paddle/operators/feed_op.h @@ -31,6 +31,9 @@ class FeedKernel : public framework::OpKernel { g_feed_variable->Get>(); int col = ctx.template Attr("col"); PADDLE_ENFORCE_GT(tensors.size(), static_cast(col)); + // TODO(qijun): + // check tensors[col].dims() with attribute, + // except the first dimenson. out->CopyFrom(tensors[col], ctx.GetPlace()); } }; From 062ff4d77b61fc72b0654064911b193714cfb18f Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 19:07:21 +0000 Subject: [PATCH 317/342] clean up --- paddle/framework/executor.cc | 14 +------------- paddle/framework/executor_test.cc | 8 +++----- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index f4cc37cfa6..def1d1fd06 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -72,7 +72,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { Scope& local_scope = scope->NewScope(); std::vector should_run = Prune(pdesc, block_id); - PADDLE_ENFORCE_EQ(should_run.size(), block.ops_size()); + PADDLE_ENFORCE_EQ(should_run.size(), static_cast(block.ops_size())); for (size_t i = 0; i < should_run.size(); ++i) { if (should_run[i]) { for (auto& var : block.ops(i).outputs()) { @@ -82,17 +82,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { } } } - LOG(INFO) << block.ops(i).type(); - if (block.ops(i).type() == "sum") { - LOG(INFO) << "Here"; - for (auto& var : block.ops(i).inputs()) { - for (auto& argu : var.arguments()) { - LOG(INFO) << var.parameter() << " " << argu; - } - } - } auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); - LOG(INFO) << op->DebugString(); op->Run(local_scope, *device); } } @@ -152,10 +142,8 @@ std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { } } - LOG(INFO) << "1 " << op_desc.type(); should_run.push_back(true); } else { - LOG(INFO) << "0 " << op_desc.type(); should_run.push_back(false); } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index ce8b599e0e..5ad5b98e7b 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -131,15 +131,13 @@ class ExecutorTesterRandom : public ::testing::Test { paddle::framework::BlockDescBind* root_block = program.Block(0); // feed data - inputs_.push_back({1.0, 2.0, 3.0, 4.0, 5.0, 6.0}); + inputs_.push_back({1.0, 1.0, 1.0, 1.0, 1.0, 1.0}); dims_.push_back({batch_size, input_dim}); AddOp("feed", {}, {{"Out", {"a"}}}, {{"dims", std::vector{batch_size, input_dim}}, {"col", 0}}, root_block); // forward - // AddOp("gaussian_random", {}, {{"Out", {"a"}}}, - // {{"dims", std::vector{batch_size, input_dim}}}, root_block); AddOp("mul", {{"X", {"a"}}, {"Y", {"w1"}}}, {{"Out", {"b"}}}, {}, root_block); AddOp("mul", {{"X", {"b"}}, {"Y", {"w2"}}}, {{"Out", {"a_out"}}}, {}, @@ -156,7 +154,8 @@ class ExecutorTesterRandom : public ::testing::Test { // update AddOp("fill_constant", {}, {{"Out", {"learning_rate"}}}, - {{"shape", std::vector{1}}, {"value", float(1.0)}}, root_block); + {{"shape", std::vector{1}}, {"value", float(0.001)}}, + root_block); AddOp("sgd", {{"Param", {"w1"}}, {"LearningRate", {"learning_rate"}}, {"Grad", {"w1@GRAD"}}}, @@ -285,7 +284,6 @@ TEST_F(ExecutorTesterRandom, GPU) { for (int batch_id = 0; batch_id < 3; batch_id++) { SetFeedVariable(inputs_, dims_); executor->Run(pdesc_, GetGlobalScope(), 0); - std::vector> result = GetFetchVariable(); } } From fb2ad4c9490925d49a2f0d9a641472137e308876 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 10 Oct 2017 13:10:58 -0700 Subject: [PATCH 318/342] Change PythonAPI `.proto` to `.desc` --- doc/design/python_api.md | 12 ++++++------ python/paddle/v2/framework/graph.py | 30 ++++++++++++++--------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/doc/design/python_api.md b/doc/design/python_api.md index 6213da65c8..c4665e44fc 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -22,7 +22,7 @@ Whenever we create a block, we need to set its parent block to the current block ```python class Program(objects): def __init__(self): - self.proto = core.NewProgram() # a C++ ProgramDesc pointer. + self.desc = core.NewProgram() # a C++ ProgramDesc pointer. self.blocks = vector() self.blocks.append(Block(self, -1)) # the global block self.current_block = 0 # initialized to the global block @@ -57,7 +57,7 @@ A [Block](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.m ```python class Block(objects): def __init__(self, program, parent_idx): - self.proto = core.NewBlock(program.proto) + self.desc = core.NewBlock(program.desc) self.program = program self.vars = map() self.ops = vector() @@ -98,11 +98,11 @@ class Operator(object): outputs,# dict attrs # dict ): - self.proto = core.NewOpDesc(block.proto, type, inputs, outputs, attrs) - core.infer_shape(self.proto, inputs, outputs) + self.desc = core.NewOpDesc(block.desc, type, inputs, outputs, attrs) + core.infer_shape(self.desc, inputs, outputs) def type(self): - return self.proto.type() + return self.desc.type() ``` `Operator` creates the `OpDesc` message in C++ space, so that it can call the `InferShape` function, which is in C++. @@ -124,7 +124,7 @@ class Variable(object): name = unique_name_generator() self.name = name self.block = block - self.proto = core.NewVarDesc(block.proto, name, shape, lod_level) + self.desc = core.NewVarDesc(block.desc, name, shape, lod_level) self.writer = None ``` diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index 6f2a76a983..7fb72c3638 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -11,18 +11,18 @@ class Variable(object): if name is None: name = Variable._unique_var_name_() - self.proto = self.block.proto.new_var(name) + self.desc = self.block.desc.new_var(name) if shape is not None: - self.proto.set_shape(shape) + self.desc.set_shape(shape) if dtype is not None: # TODO(yuyang18): Convert dtype from numpy.dtype - self.proto.set_data_type(dtype) + self.desc.set_data_type(dtype) if lod_level is not None: # TODO(yuyang18): set_lod_level is not defined. - self.proto.set_lod_level(lod_level) + self.desc.set_lod_level(lod_level) self.block.vars[name] = self self.op = None @@ -38,13 +38,13 @@ class Variable(object): class Operator(object): def __init__(self, block, - proto, + desc, type=None, inputs=None, outputs=None, attrs=None): self.block = block - self.proto = proto + self.desc = desc if type is not None: # TODO. pass @@ -63,31 +63,31 @@ class Operator(object): class Block(object): def __init__(self, program, idx): - self.proto = program.proto.block(idx) + self.desc = program.desc.block(idx) self.vars = dict() # var_name --> var self.ops = collections.deque() # operator list self.program = program @property def parent_idx(self): - return self.proto.parent + return self.desc.parent @property def idx(self): - return self.proto.id + return self.desc.id def create_var(self, *args, **kwargs): return Variable(self, *args, **kwargs) def append_op(self, *args, **kwargs): - op_proto = self.proto.append_op() - op = Operator(self, op_proto, *args, **kwargs) + op_desc = self.desc.append_op() + op = Operator(self, op_desc, *args, **kwargs) self.ops.append(op) return op def prepend_op(self, *args, **kwargs): - op_proto = self.proto.prepend_op() - op = Operator(self, op_proto, *args, **kwargs) + op_desc = self.desc.prepend_op() + op = Operator(self, op_desc, *args, **kwargs) self.ops.appendleft(op) return op @@ -104,7 +104,7 @@ class Program(object): def __init__(self): assert not hasattr(self.__class__, '_instance'), 'Do not call constructor directly!' - self.proto = core.ProgramDesc.instance() + self.desc = core.ProgramDesc.instance() self.blocks = [Block(self, 0)] self.current_block_idx = 0 @@ -116,7 +116,7 @@ class Program(object): def create_block(self): new_block_idx = len(self.blocks) - self.proto.append_block(self.current_block().proto) + self.desc.append_block(self.current_block().desc) self.current_block_idx = new_block_idx self.blocks.append(Block(self, self.current_block_idx)) return self.current_block() From ef4132051c0fc88394d75dc7c482a024fc70663f Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 10 Oct 2017 13:43:49 -0700 Subject: [PATCH 319/342] gan design with graph --- doc/design/gan_api.md | 7 ++++++- doc/design/test.dot.png | Bin 0 -> 59401 bytes 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 doc/design/test.dot.png diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 1a7d0df116..a1626e50d7 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -27,10 +27,15 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to | Concat op (done) | ? | N (Cond) | | Repmat op (done) | ? | N (Cond) | +

+
+The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. +

+


-Borrow this photo from the original DC-GAN paper. +Photo borrowed from the original DC-GAN paper.

## The Conditional-GAN might be a class. diff --git a/doc/design/test.dot.png b/doc/design/test.dot.png new file mode 100644 index 0000000000000000000000000000000000000000..768e5cac4b14201f378896bb3bcde9e6ee540414 GIT binary patch literal 59401 zcmc$`byQVf*EW1mLRuuHJ%oaiD$<}HTDp;v5|9#*E(0m)5-I6Y5Cj1!0g;vxknV1f zu5a%9e%|r^^Zxma@r`?o+n>jC_Fj9fx#qm)bzN%*sVK=16VMW%P$*(KS&SMAh2@Gu zT}-@$3!mf;)pNrixF!lR7}PoPzmGLJ(I^x%N)B^h!!2ob%w1n&?hJ3Ui%5hZkQrV0 zah60cSfP@~N}Ee6L#+V%Mz*_7^*H9y zQaEoDHa1APqc7dK_;kL5UfkZO`rU1(lm!vvRQGwI1yPyn=nH6cFGbBT&SkR5ZJKoq zhW4k!b#(7X0;Y9|(|_b8B_)5jUkA#nQVNWclap`#Bv=0Ex-outZ zO8(Khqve}-Y=rm&iMvG=)A!A?&eR&aY zx+RijaiUJ5_WaEAlYZruo7#n#o%zmU^LDB-1vWHiJOTN7fp(GfSBp-E&B^;eo0^_X zHTdW1SC$-Z-G7Zc^|#p6ZL=P)CqstxWGD3;V_}gUOi4}_n>^oGV_eEeN$HPu5tmt9 zoY4SAvsc+)pxx|yer$x?>e*nSl#ULK%ob)g;ts?5KHcZ|HyLhdN9PYjt_spCY*RN< z0_hXx8)`mf>_$0IC}YB8Z%-Bew5(x89wnm(Q*l#Fj|U(5{SA8I(a%Ev^OsBX0;9Hf z|N9rSE_&|KewzPYxkQ_Ua@{(7K%qfZlxⅅF7B+?f?ACbr+`fJ>mcP4e6@DD8rLd z!z*FaL9(_kSJ%S?{`)VPR8<77GK~NIjoRz6H&t5yGfo@1U5CNx{~6lnT-tT(&;f66 zZ!&oP=QF>aj8;6)HLMZ-=OMPYHu7}077MQ2f{Vck3JT&k{(sW;0Fh>aRuioD?JO#EFK%Lh=h*ib&kwiFz0Obf7yEL;xvIF)!osu=89!^i zkGs>Q2sNL{J$i&`4rAC_NcA-tEHtnjEKJVdy&P%!^)TrezO;ET6P{sI=ac{YZjnJ1 z)lIEWKSu29K6`vAGOVHPOuQFRZ!D>=PyaeOuub{%A@kjZ5AZei{(Q}7m$l)bdShOD z3FpNK#TAvEzZc1@60M)M=f3zA7&z%qZTJIzUTibOFDN2%5v8WCzO`09p;vQ$K~JJ6 zHSkZ?a$baW(*Ehh`5845Uk(nuRJU~ekTp2ioC*vKl$4b8(>%uI9B@CyWHKf)sc^NH zJsaGd4&j-u+Nf=W-LO@U$MmDUU16<+qReC0`ag54U&#-1+o5tDU7&hO(0@U%)+0LO z1-j&J75@GOy(*7rrxi>lZWW=cBwdDE*g%l1B18=i{R@h&9$~V)&ca3phfk`aGAkG} zo>lQt!oSBiQjnitCm~<=^A4L9(z}lKoK=}J{9!1)MgN&Rvc|E+0q+k(lT)b@X7Bft z#u~rRGXMDp7KZ}|Je%9W3zo@&^g-+mHGXCP{Y6HPM#aUp|6WQatI~l-W&A%koNc*7 zw=OyK-!D;F3Xc-Txcp~SR#N_#Tqno>JKSfh^Gc7t#k_wXsG1@Dv;XG=z$avvlp>QeUoW25xnAb;rnZe?b5-oxtDkqG)uJV1a=jm$`+- zG-N-iZ$=dkY2Ewm(XCo+WMsNh{vYDpm-BKy_ARX+(o<7Yiyp6j{l*a(*dovQr<4c< zJDC8Flo2Dx^Ejt(s;KVlL5;_*&0`v}$XA?yS{P7oNf-#OU(YdN>RZ{h?8_#GJwk8j zPq?3x;*tMT=tYfTc-q5&;R-YwJ*QdWKziwtWR6mDFhfnqB}nR;xxs<5x|H0bSj@u0 z-&>NZqNB;byKaQ}`JrO-(P%X#f1>@C7Fj_NYH>mh46j+s;jD+ce3@I5HaEdQQ}OPc1Fsqj+gUX++(= zv{Az6ISp^Cs;WeAiD;m%YUEw<*kAwbD22hasoK~k*qF`8oLpjgCx6%KMOkVd@hv<( zi6PpRv4H}EFe$x^yUS@Q!{(PGlhtevUzX{F$O`@yns!Gq;_wnmK8t-;+^z0LFfmD2 z*3OI;$@<$OM(U12y}Zb!IYVR|GAyODE+@LAk{S`%vd6ia1o0L3zaL+oLTs$8xds== zRLO|;9R*$2=~LY2uOb8Dd{8{Hs(B}#+u%W}xVQ10Z5LOJnXd*zWymGiMFH0yzC}`( zUt%JivvBeuA;8rj4I37!aCTDeW#rH75Wgu#sRXl^YS137t4R{>*>)drHUgZ-DLV_P+(Y? zbN~s1!@osU)DLPSub1T4m6-YX$TFlu`S)W}Qo``b*^PTMe=th0Y4}{ zc=(V_P*4YI<;IYa74gvo6FNN#immtY9%Zu9bs-@kH2Mk?6Avm=F;M`GE^*y3#8O!u zd`nD+!OXtIV~h})$ausajg5^RSWr;VWSg0pNs+(NNGcHn`%n*qc_*Fm=FOYHuV23& zZdEm&hTa>$jB=c7i+!+m>+!cosFpYQMbyldG?ET5tgDhCYtlleBOj{#$O(gy6r3h|AT7^NE1jw#8D5 z1{cl+yHX{LEIoIZ`>{~s;^JBP34yuQ8pN&{J%OgpVNW*3YYrw~lYX%q6RzGJcB;R+gjNmaw#OeKMWv|3Z=b^LJ?Bkl04p9 z~fS;?Hby1GxgU!^e^z*X0gI5RRb$}_C_{*GQq zt zceX?9nGKg(MI|SDPn^kz;Aub%I-^}X_+lD4KRuvGXTD+JgnpH!5Rc~Iz=r`~M@T3- z&9zaU9IX@~;bJa+L*8F3=sMQyc}GxSaK>H(JjBTQhn(8@8rPs0EM$F{|kkl!{=*Qzs_gp zXyHI2`hE(ik(%4?l8c_7l8w)lU!*&rB=gCuzc_gbOdC zwj%_=(1^ zT2E>iJs5woq65+n9eCPv#ps6q1||D9j9p@Jc|sjF85qTOg-q8!yHSjd4d?9ajB2Bt zVI=Z(5_-g7@|FAh9E=qPqm{Mf(ACo72Nk>NxCu>F=)hGnhPoJMDx@63imu98a;S6P zVdvD$H{0@|`+`;3!nMcuaD#6i6Xqh{sE^JB!!oXZc!F#3 zk>dlWE+biFzk}Yq_$#t^Yk$Kozqz47Nfx6TmO|ARU+mmKaTJnCM#dwK;zKWk}rrX@L$lT$H3L}neMp=u?32w(fp zpE2*_TBW$rmtT(k{j8!1O9m6AZ8Dn@AiCP@lZn<$%ZGJ+T~4lDs9z5oFCr=F9khN>;EYGU*+L5U zJ(pKjmQhk7fgbdCidYCVl6jAL$s#GO*hlLQ=jlXuI&M2WfBrI%nTMz7?}EFV8?%@g z12;FfKp|i%mpe|9FMjx8ZSL;+*Bjs9`@8EQ_Wi}mFXQXz&b~(#Rh5-bpr70)vIBT( zSmQDD1%~{>(n1fnCDpY^J%{9-v}sV6koVCYsNl9buzYJ)7PBNM$kM<)_y%R{fBeXd zD?+Au=>vf)ibll6rg){bqN3s>>_DpWeGc)a9XG?E-oQW`X{V{P7M5^kQPJua;dOU+ zcPNShO=}EIs`@{0vf5bCCHS5GBB<)^wwqGlmYjv1mwKC~zePp7vChrL=wdJe8LbY! zfX-F2ND_gFce|Kl_lU=3ms|kNkpyn@9ha)U&@qXv_xQ0UW&9 zK*;mnzBoQH@p1N{%YW8a?(39chBProTKWP?R7|WVTahkggtsB^P9|W4mMKdQ%@(Bf z1PHM>T6w!rzw%W~48>@*JNeMi5XukW%?^$-RN(HFT?a=;q#yzcVD86eHzIEw!^+CZ znJJ&}c^J+p2F=7D?tqs0JJ0>CA3V4a#v~EuwD3Ej?suw0IIuHlKEACK?N8&;w?Epa z$@KrpzvMtF3Hb0bJ-y9Y)sx1zmVa_1-RI=gCZKudvmY0&K0iB_RaBIMK8M%)=pR3m z(=|s866$q)eEh$)-XcEdr>^uuj@SXDOw!iYoTwjzgXJm*1D|xsA=_;3?A-Q+nXm8f zr$kN8&1qW78ArEblhp$ZibzNZnw>RcWn;SrscEpti09w@b0|WAmqtfN5ghv_ARq(o z>NXoGbD4??_xknm%GNlbDz=86rcRByPBg8Jl=In*3PLMFxKVd5jsRi-+hJo^ipBAo zs^P3FLvO|cJWWi{0(PT;P(w|Cg{W|tx}dMG4;Oq(#ia+aLI32*lS_nzuZxO`I+xp8 zTkBh+uRjMUj69P}7=3;HYrM#~I9zCXL{Byw0mfWp^jzhGv5kBx4E|-zJDF%-TmL6d z9T_?iod6MnvMyY_*x3~n9^MQHr5Jbt)snUtKtVRF|{qXQ?vi`MJew?gWe!%fmV}N-} z#I@=9dG=UN&8Wpi%b^nU5NJ!v6x2tT`f|*-XIs^Blo+s>*M`fYf$np6z z4w$UYv|0e~1-qKO0Zg>C)+vp3&P!4KM=T`*iT<8yYEcjlkN$boU@(}LZ?+>g{bdg; z;0vPXCp*)#v#g|I{~}KIM(nNQGe1oD94kWDvY=+C0+`|;REIxpK;qW@W|M$48PI_I z{Q0v5X2^CxB2vxf$~y2>vCx4lz{HtD#-L9af9T~^?YuIe2-VCCnl(!6{CQv;_$|Ac zfidj;X8RafH|w(5+1Zu`e?qn!H)Nrpvm)s8&mWEcb4&ISciYP}Dx8k{J6~S&5uqJH)s1e9Jy|8c{XyuZT6|O+cCyI5TtkocyT8&olb&1}{SzWcc zDq!8$ib&iBCMG8SDbQA?;?`f}&3T=jnTZglKSxF$LoR2BD@Ki! z+ZPy6sgeZ;9OtR$u=4V%2)nG_SXx?=zi0FWT0=$X!f)x8VAj;yZgFtPu`4CrfZkNC z%EeX}xEyFM%Gx=}xoW6-*`*p!d$SbO3beSappN} z$;imIi+oSp3HtR0VM=~>c6P3le4$FBL&{e{65YCN9gnB*(^O7&sRl82cIoAT&o`l+ zmcOI@vnhl+3yKZ0fY6@CyR&M1;g;?Ea5oF;3RO8BB_&(*b!DyZ`iV4R9;MhgIIR%s zf4G}0v|k8MZ>+2r*!rICYL_Xb6Qqw$r9?%cA){@lo}b7Bc?1J>)SLDY57OYRt<^+` zTUi~Q_nJrL+JIunTwPstzdPqy-*pg+>-{<>EQCQ5yg80H^5KAo*Afw#2adNKRk4xS z>^kB7H1=#w7pRv^=)(mz;%{=q8Q9o-f?cX(?yrz85-T#Ak)A%=Bg3Sb9%n-7m09CB z7i?lXKR4If7JJhiDgZJLGP2gTwy3%G_;z(=F*YQBja9iSK#k0V{;|ZgndERLTw)fc zomxUd0vL)Xk&*GRk)Z8vgA8d72xxkFnG@AvH1Nf;2c4&p7o|9us%lE?{Q0DIJ@eo2 zKaA_m{krKdUw8|CzaGffRPgfh3Kp9&-d`WvK0Dq>tP?*wFohv-YF1Hm>t9CAS9(AS zeEK_y425)z5+|Fl=_Pzdy%MYU$32zpMk}rXkw}5+g#~@E)|o7#4T*{M`t^{&mX3tm zIDYS#Bx=1R{5)^uHc@3*lx-Mh`cDjw#Z(kfsLHi5tRrkY4k6VIRfS?0Mx=4`Vu03?1vFB*}OVz@fP_4pe$3bs2b5KUQj+Bto()`{`) z+K~$fhliATIWpmlm&t5KE67lj>+2+_#o@ANdCy8>07{{lSy&pay*u9XU4*Us5O6}V z^O9Pr-Po|T_gDxrBG|-CDamQU}gRSd-FR<=rw>gI{WIaxBW+t z9zDWFVdLWJeGZ4qnJ)Eay%(^hT^=k7Cu5PG1b#^pNSMz9Ib6A0$DDFrUZUB`sbTQV z3}6}nTzu!W=g;JuV>l{*HmLL~jN$QF*JU;!Kw^YnEXxAElTT7^Hel*hD5yAHA0L26 zLF4@m93T?7En)z@6H@=)CKfdVpaw|7eDP{47`Mr#C(Bmfbs)YL~6~# z!s5aW(X8jkd$vwhI7GM)(-tlwJPqoH<(&`Zok>FEbOJU%06}c_DN8(oAQyxN01(ct z{PrtQ*K?sSfWZ1RAcLi?tqrws5(?B$;1!Eu^ZY3?Vp7jhDqAj9!QwipB>+Z|@%&^# zf($So{Fj7Jy^?&jxN47;qKDP7Fde?FD=^)`dxz^) z8w98wp*~k(&%IBjmOT%8&M+8Z*Y#TfkkqTkO1|IzvHT~`bP&s;U}7-Y-mA>IKT61H z{_iOrlT`qE3i@!a7b^p?pe(R`a1uN}XqLeCgMHU{Qf!2o1uE!2pt9JsnPSZz;8uFy zk{Lup@VA0DA~}qZ4%W8kDF@EHhaB62b6JB8p4Qlo2p|;}5DI61W}__7Vn8t|I5>Q% zsH~K$a#=H*{PD5`qNGW?O%z7#=IJ?ddgm|XVGuT@VbL$iB3;VT?3e_Qlfj53Xln(lr$ceMYyB+80PQ2@;)J9-$nU~ z|KkN9j0U)qXAm{LI(0`iy^4btX+W=By~;5@K8|qBY>9=Ny?uRbX!HZ?!9uQu-^uD# z^dSsl9)IY!k!k{|EZ0D-a&CUU4d7aqMdydej_xmO5V^!_YWfi)} z?Q33MCKcWEgQi>7*}1g8XoH#aW&xMJEyN;#SXeSvVc`!>5mfvMk4lW|FKqq$7X!b3 z<`Vnk?_V9O{`}ZQXJLTF934oBEF66G{sUNKh-O}Tw};{5s#!aQADKa*DraZ4unj3SeMRo3sQQ)kMax{bMt5io-bB?>|edU6rbQnFygx8K@+Qm5eaf>a(z4ft`O+2q4j1B!d z`Y=b~Y(e5Y5}4Fr0QyMN0BxW33jgW@dHG=28V{kJM5-W?)K}TI^jv}SwwkD|(EyI1 z+kF!!hZYwHCqemc>8#!3xWMpm9_M@u(ET-!u9Z-NLjMxLv2L|nF^CKpz_QL8;|8^0 z{OB~wMivZ89ly!XO9%itI67*}l#AwE&3O+g5D6mIJ9kQdE`5g!6d15NPyfUZ3Jarx zxrTf;I4h;4!Iww1jsvA0YN{$yO zY%~WMI4U6_&&4VqLC{DPfjp={IUsL1?t6CPzO#Kv@xz_sf;qCln2#P66C8Yes^UkB zEU@9IQOM<0RaLk9wT*PwW-7`?zrRqJo}M;?UIl6Tu$ZCp^Si9-2!SPFcrI@l3GUUe zUBS`O(T@`Nr2zj*9aG=xm|I)3-D=RM<}r-8T9DG9XU|1%%2BZ z^(5SWnh9wX>0BhvPn;@;kzWOoUk?rlX!ef(^CFWiC@AJ5dF2eFxeF%?%S92_^`B4x zzd#)Z1rG&zesg>K0jLW|9RnN)jo?S%%W}1g7=a+6C~7q0pMt`RNQ=O`2S5^<1V@6P z$G;m87Z*^Lf4^K4c3$*@OQu6lpZmDHaQcN{K9lbW*~7O)_W==X0w|`gudfd-^*lX# z;q|Xa<^b}5N{z>h>aAaNZfA!JQ>&}*fP_M15GL<`WTK*?ygnyx?-)d=*4NjA|0$Xk z(W6H}W*{d+aIV`-=)KL3+g--+qs@Q+G^$g9JedL0$1k8cFVoTn zr-*r)v_`SP7@8r{5t8mlW8=dupP<#qhi^UQdV702GM16wz{Db!DA1gyrl%1i4#xN1 zzQk)~fD8UhGN&&Mno<-Bm>X!xGT^_vUHfN#9OHFAljA9^`a=TKI$s1tC1kq0MuiVs zT3UV}=n-;9M~WC7tVfwgYL0U1FQ_q_K=~L02BW!cLBy$DXtN8W3CeD$5rj%{u$U3f z3)?ef-}|Edq(ODaH_H&Qzo!1nrE9R(f$!d707GdN8HPg8cjr-pwSHUdD5^hB=mwmjIWpQDFz$^aK-xgXe#9u#ZMMS z)Z_(fuUE=6ln95DWKCuEJ0R?jn&K!(e`Oj=B?K-tHNieZ&F=tGGuFW z(t$qbL3GA&Uzd@@J*^Q^bl7QJEO6qtPO+E7E^Ir$NZRL=p*Fta~2Mjh; z4r%C2yQ^I7SBJ|8uU)$avxaHvcCLswpRJ{zn#%JX6gx_{y2r6oam;jYFFfqV-v?9`-OE2|c2N zA}&Jt`+ZN_fuio4wo2fmdHq!|B>((4I#}afqoB<%i&9BK4Jl2*`c`?$k#NAKk5WXh zA`kfJ5z%~CYFB|`5Pe{FW6dp={WKcA#p74I=GN)#`Ssi8MD(g^YFAQxj;xSLP>APl z`r0L4;39`HyQUc6sXITqF+ksWt@~eoi$9VnBO@anUc6`m;uT4g$jb0{|4Z^zQVP?w zE6>q{p1c7d#5;gcPoU5cxuEf~|F6bF2|P+DG+6KB$)HKU=;iI*&m9ujcE7KWUpj+K zNoiQ(K$j7yZe)_7cmCqCrY8!1O6zzGhM@c3m$)e(`?|BEo4{}RCneF-i@HVBD`j3I z+9$|0zWS0;%;O`7f`4l~??E3|1Y0MQS*cxv7tDKZ>y-=tQO$RLi&PKC$Y#*2Q?|ej zc){Wal=u{w<)ndPL}vLD1_mRa#N|X+p7xrA?#unO{6EDYxR+Y>;nvpH3VI#zzD|~% zp;_O5Hsh;s9#{JNUCnLU(D!$&t^m;!4%{(xB|Mi12-K=>p-cD`mD_e1e_`vl>V$$!ik78hMm z@T@Wii7o6`0b5@lXmZG4He0W!egwb4Vs94Ve={K(MQJ?ivoo&gD(_qGJ1Nit|cmf zDx!OTOo|;%b++22l*naTB?+=u3!%)$tlSXumY`~#C=`%FjDSk8Q6LQ>{^9pxo&w?kWsrnkSXgKsCy#Ma zy6vfnoA0Lmi#U0v6 z(2aafkJ3PV5OiL=3^O6Gs6It9+oiiOH%B=H8fvh(_mR%%_?<_MuiaFFdgYx#&OzXf z)Up5#5%7qYh>87EQ&TmcF{5pJ^eY11zDWYVwyzbRSy}6u&g5gYeu7iO z0cyI5gze}OVnhYf_OliqnNeLie~SpCE^7q$?%fL+k$-R{KkY*=+XWN? z=0Jm@_7p!$b{1xEbadRDPqf#|B7u7H9*8?%(J+ftE8M0>Ck7SV< zzx|gmI}Tc!V8BA8Ak0*1E;FOodj8}^xVE*lNE#b6gPDvN1zt>v?k`UshKy_lNMt{u zHT)S8JW%d+=!oc7uIC4YaU6EtAF*&r8QSUtsd=n4lij$OH)mag=FjP(VkMZJCPED5fCZU$vQRW_sy3t~1Q| z&aNA^gY^%L=Q=gR)2Dp~Unj?#an3}-FAl~Qtn?r5KtvLwB#mP^1K0nOsjiL8q`hkD zP)g}IFL^ug`K8D1r#=C)q0sa`{%3<1A|p9tGpfU}{-KPOXC@<2qF5L#OS~W1H^)SG zDK--%?U%_ko&R@ir3G(2 z>xmKn<~It`eQ2AiA-jX$nE|i`f!F{jzK`DX-e2MlQTfjFxL*;INzv3e2EjAa9NrAM z-xw%VF)Dg`A|PY3s%|}4^7*9c`ryHMQubeEv|N{Z*?i0l$;rW1 zU{Fv!pa#U$GXtPxz8RblP~rW7sZRrZI=^Nw@_K0FhRpaV0=F@c~fIxlu(0lSNX^4e;m7JNR94Jn8ZLUczQgq-)hPc9%< zBydJ$04zmpi~#hF@6alEjUm#@32l(9!mQm*?O=+pm8`h0?}J~~>}MdE34a1u0fA_w zxPJY5<<~<(YVJ$_F&nx+wzSNJZu!H$=XUG$IiQiF5)*GG3%`g0^JZX=B?Va|n?l?r zlx~ej86X%1=$nA!N)l2xF*P*@?iadW-Atzc2VJj^LS)9dn*H%;x_fplAU)&w0XLr4 zy;Aj6EV`8yaEMx-8XFe#ZHtZwXj9s^=h|`nK)HCo|E&AtOGGLRl(n-fJUBRz?aNl| z44*jt3av*(cehFczj?6x2=F@4(`x{a#)jAcimEhliQCT6=+2w26+I2avL6qRj|oxG%7%e#3yO9;l()mpMp7IXw)VWoBmw2ew=! z6=g=iOO{Gq)2{mHw~rq`qIcl}AOm!HkK9~5{=M)&c1rh=04ARsPz%ZeXY1^*3l-a*>91{D(Gb8~O+7c`StC?FsCB?gCvu6Bv-(*w36EYpUMj`!3@1;Dz3?%LYf zdFbKsy=e{XAF!Z+UIwdeVRI~wl=C2G0bIx3@j%*YX>Y&TtMw8DOUlRE_5XT-%_7|O z-d|xye^;+;JcU(2`9Y5uvd;h^Nx~%l4#ImHxJ((a)owOX8%;upp!^WUzg)B*w0KhJ zS7c;mFO35y1T4;BN*9mYZBgGdue3EF^zl%zemYr7=-zC{08en?qEHYGrEU}D;~sK&ytr#WzIhC_>AT+KG}6SBe+UK*sAK=Ia*RL=bA z7LnP;Vfk0(8-}yc099x#fA>n{FN%cRVu#mqC$o(@oPH&Ma# zk8TyFLf*kgxlg{t>lguCgzaZ$X7;|EvNRz+-nDCwAVu4OS!Z@b=mcbMGP13`Jt|ZQ zNGQ6+CjJ@)T8gL^&{XC?>_=E0giwUd2x%7PF$?%xH4t(i?5rc~C;*Pc5w4{d@dOK_ zL7YT6kXe+tV)mNDFFqSUx!Q7rVUiix>US(MVa6ch=zO!e3iKL+Dc}YyH*Q=43f^sh z%(d8QL20Mj19lM~*fnj&tFJ<;&?&Vf%hxUngWb){%8CtV9~!}#y#rQfycjyYU%83LK+<4 ztWBUA2s+MQLP|GSc~{4(sDb192>>+`eB8jArh}ozvNsbSvB1EgF0g%h?F~IeT8M8p zLl?lWdP}eL67K7?pMwV01=ftIuU?^Q<~E$D#iFNdHzg?_vfZSxz`@6tTpPC5zK;=5xik9g4=O47yp}C7 zd4$kd$WI8RZ!_vT>+|C-Vc_zGcg4>q%m)gShbb>d5>qe~x-5bd_;6q4g?%QMDIfWJXwz6F!v-L`+j=*m6Q}O?iQBaSjl+TYDT!t z4o^s+#@wdUdfn+B-7t76z9q-=be(%{jGu525@w<)6I$Mo2x}ac5M=xEGQfT>t$rIW ze&i{CL!6O@@4}@^C_`^bLGR)1Jvg5*qJcot)Khdwl;rlOPHsHOKTX`3`=CTTjWipxg(Jiw+ZCpOOOahFyuX^44Lp>5Nh&93M!X*pEOL^3%K&r?yM*% z;fkMJzJ!m3W_h_Xs4yW;rYT6m@SoXa^jN$G44>{dQ!i16WsNCYZtfp#u~~N0w|=8Z z-RC!md5swc(v}Jo`P{e7s)4yXSS~1%H|l#yySY%R4~llY?9AtBp+L$yc*r5y^yz&HeiW#|PxT zRG;yslNyD9qpCh$uX3A<)#e2&%_pO}TIaG*8mu>OsgiwpKK5ZTd7&^e@r7dO{4a3$ z5E{%7(C9KCW5lghJXJo^%xil7yp^ecqM3M95|=;NyWwSKJ*PW#>WySJz;pBBL zRFnP6LkwnrvU`e=p8lPD>@A(aCQIzvGuoC;u0x^L)*s(&_ig5H{eBP-weSnp2}jGA zbWgHq&=iv)k$nSpVn~f*0b0mFQ%54*wmaAN_y2S9_F(&(pi;}70!$iO-2fM?V zNBZ;;A3Qb|Dk^5{utpoNCcV5VPjhy;gOK33@#OKg=E7@Z1>vkVfjeC_aN#$7a=XNwK@iAvBhfQ~0H zePH%PP%gsoLd#2eKKtSBOK`FrGz+`pezU=1^t|PJr|vn+a$L@8=qF8e$e$4w}$Bw ztXU(~7p>G;0y z)1Jj$51?$PKq{9yKRe|r#8Ui#5^XOi{zkK-;&XhPF(yffwoY*o; zjD}BJ=A{<(E%AcG6fS^7A0!nvh(l1}ZD1}v1QBIIlxlbP`Dt+sw8_wePcAJ5fgy|v{mPP? zM-nqP3yW9l?OE2Y@&{}$X@e_UqfTA*X8ccp`$H%_l8&hcWS4{H;BwDVy8(zExDUen zAZknHLds2}*IQdZhQ1yyG(wJ@!|&Zr?*Jk$-TGwP9q04uQsS zV4T-=U4L=cT!+MR`sc~+q>(#LO9%G2m-?YeP)ng8jG@-+Dqv@|s&^&^O`5Cqj6^?B zO~ds3W|zQ+d>PIzLuYWy;5)_drZL%LANEJ6(Xp4sraRQ~o~LK+td}Ea94j0Uf8^NP zlVi(9S&%FR}A!3Gw1v`YJ)uCej%agpT%F4`wI>p0*E#g9>QFX}h z!AaK7ouTtW1rHVBhut&+IQ*W^(b9F~+zJh^F>+MS;M?1`q+LdGQGs_trwTs~w@6Sq z;IoZplqG?!FAzcu2e|8pU^R+}R6X(54+^B`9JL?&?hi<)DmgCi73ZcOUg30p|B>5b zS^%QOC5d@b0q~b{bS&vVLZh4KRHjlQA|%G16u}|ZmzPSdD=tcc83XCRL4>>r%IQyN zem^hhzw!S+UVvdHG9ktQg8f&Q)t&X&1+;pcMs3N~QQ)`+Da{2kWd5<$II0974N+L;XxGLWat#fN=dC}!Rcv5DLHqv_c@hwa5CRjJ0L{H$zmqTG z5?@I148u~+#DcTyKpsDolteiTkDaD|ieg-Rsb%@%{5mHqGcyVytdWytBn-mFuw%`i z-4GN!$DJa>W0ThKt5qt{tB`OXVldd>otIug zn-KdsEk$hSA{f$k1~Z=gAQ!q7K)4SFNXy#4D5RvM=#<+rAT<8Jl|fR4xLX{L9}>Bt z6&-SKYQPb0i+vW?yGWh`Ehks^E4%bNnwQ`Quin$UDlSEVeKHfifOoPzcc?&hIkIuG z#lRHEuxvQK3TBc?P-2>a1$)BTj@6qY_Hv{4Vxb{Uo5$T-WO={R-kQ=^#*YIZg|Gz+ zDV`*Gd3ib|W(elJ^(<4n=!6IDC`fYGQCVJI&1ZHz1MiD%Vu>vrC4Y@)3V0tGG-B~K zv#`iFxI#7**!bocv8yL1e?Dmu<0h$aQ5+IjlP$L!LzKHoI8*?HE?rbqRIupvn>Pso z_1!u)F~gX-(zA~eK9K7neE-ReVPNVqNuI%5T`*dI0ee(tRu)3CRDfIOkPoO(kC(XMi~Jft0kr@1Am@U4Ee!m`$lDlDe#p)T zgA~dSjE#fUK3(2di3T`h11TnXr&7SSj>^?5%WsPvO_ex_e3RbCWKBp!^b^{-V4wux zyaP^A?GdN=@#<~l%&uRnSp#xvaU!A~<-CaMNHCoX4$A!mkD;!vJ_WQY^dfl%ZQhy> z@3JbjqwLT~_}--;mbV_sdDe{S}T!#a-&y zgwdcPapE+yw$6X}ZW!wasZWI;p>@1!I&fd6jcV>GmIis zvI-Xsy~uL)g?zr0=ZEi5}K1fX9HK6}b8S&m5 z^1LVT!ukwHsqrmVVc}tc{CG1EPl!t&*le)-U1X2OUs^hpxXHxC1O*6uL5M&Nev{43 z%^Qscte(*^F>hdBrh)zxXJmihyul$n6z9V|?NT{=HO|NIo zt*nHANADwGHOuj8A%wmKMPSoJ01x=U%hc2XU;{%`2N|HIp;hIx`OAx-K(JF-$LAum z0pSg2Z8U4A*2gIAdktmQfpG)A?4h9{V^VT*q;^BPG5QrS2*%)QKw8_4v0Np*j@0mK zx_ck#cv1G!($iJ1@qo_pMRaYj7+4OMa=>ZqP&l=d4s07JM-L$L!|@FqkPE;yj%ulk zyN`*8jC=`)+VpEYDB;AxFEC&M=|irmmqKZ0G_)2T3C|?1tjqX zAPKh4SI1m+G@6g_HaFH^p?ecAUFxN{|HUMTOIn4`SPs*7OBJJ-o*Tr?Vd%>~|2;FP zhaz2C)wKR6D|;IyMU}}lfe$!YLCg$GtJ`O1lO@^|vHoI7UwKm(rc`xZ<`uTap8QTq z0RH2hHg_h1SBNcAQ^LVaHx8oO;4ME6XP-t9$p`?EOO{b6*z`(35Os4K>|@RH_sYA zHcKfwTWFh@NG&ZbZTb86JuLJRko(GBpB|*C_emvj)rfeGQ3DwmSNMNsK%V-J$ z90plybmqH0QK!tG@D9$mE149( z2j>NckWj*gj;)fpIUBqdAOcJyK^Qb36>v+-gBzI3dLKfGf`&#Gj9TolhTHr5Ilu+y zfBvkgzP%@98eA0{OX>gi?KVS3X8})yi;GL!^!n=RBgj?Pf-uOd6w)DA_`?t<!SoUfH+amkYecJxoWNB? z-Qea{It_Kw7~St?49?OiD{s#NCNm0yPJ{durO&+}Lh^LhQ3wYY7x`>+)1h`|GySZg z68vF~_$F|o;FnUJxtZAw>I;EQCbaNQ1%H44yY6xpk2A@d5S|x%GU48zJ9IKlHKe;~ z;Y+4)gV+h*^JLa5_}(YQ^JaE=lat1vE~JB?yUCEHDeAKWd6Eq7*9)ldFf3a@`Ix>) zzp}x#R4WHA2?7bWAM(1U2|<}AZ`RPv{Y5djrau@ZdwY72L2ol;^~2BMoK*56v?X3L zQ;#exvK>#)Gcx{_lV04IjpqmnwnBz?_l=z^@Y$fv$B$d z$Ok>I2^p=*jYSx38?__6tLOu~xItQ9+Y&!i@Y4)vo0o$yv}HmuD)&e|M}hZIF*FPV zMPAJFd{00g9f`1&;0Z#y&57rFt8nZdPVY+tfZ_!%hk}aA1RTC#0tRevB*D? z8d;wF8iN@Y}~+Yr0${3*Hu?pyModOk1u0;kWw z4HK1>#da@LMuyz{X+g~p>C(exnbSYxg~IS6mv@F4aa zii*Ua4XWbcINicWoqo zgN8FG{f3@Y!9m!T1{f=6*Vli`jevOt+YhpIp;zjIw2xqIjHaij2d_jBcrz3G1qQ2B zJlx#6U*Ql6u4M@6c{cxqSzxDjP5&-t`7SgxFY9(q*YZ8E<7PeL+A2-v*%+;Cbe~vG zCu{PCM&J(U%eAJZR~;*p-o1Ob2xlv&K85F;UhimYLn|mKI66CP9O6cUWY`Vy(N9^d zijYB9>F5Ye=2Y(A$AXE7gm=ndF@rH5(s4{`>ICO$fu1b5pogZX&#HYM7gR_rLuO`; zmCplUNBm7BH$UIHpFY1gaE>f1E33P&Z$?Bq@i|yEC@3lGM@FjpkJrIyhG1mCjg#O( zw%gtT9drs%8%Px-eORD6+<)@q8kpAv*A}dnyBLz;*v2g`u3|(`XAq_UbPk8n{4_ZG z-@l$7A%OQ+w3zXaR=JXbLHTWQG2gh`46%-$9&)6l!sk?Yf{ZQ;5(8|iPZb3{;8QoY zw7dk5V|WAyz&L-J@TI4xe>h1(3@`AG7I3v81m4r3(Xp|#qM~r{S2+&Im_kOm4DZSS z-4bx^3=|y1+k|`wI|`0Dlv}6#LQWQgHsY_r*#s;HVlvbR8(vU&)r%D>kgJVtY>1SV zmFY!X9v$ic#sSF&VF-xm1uuh5;}us}IP56IpbE_(6t@lrM(<#${@&-e9P&mYh8deGhdzTVe$ zUgvq7$8nrXWR+1BqR|$8_bwBpz=TGF>q{B^y?x(`)1fX5f?|X^Yjb@F410Q52BB#P2%cOFDMw(y{J!=nX^CCI2H4ML^A`Pq(_XeP}G7aA8@oo;9FX0b`E< zA-*4 z_rTydlPrZ0^-!)KhH@IzN3!{PA6YxTtEr)eZXghHDP*Tkz`d|xSWv~PAbgu1id21D zdMiHOxp=lJnqNVSRmPs_CU!R?6O;Qlc|pNd($dm|;p(t4DLo~{=R@BG=y}+-Ze5dY z5P~eCrlDaK$ibu^f_%(+`B@vTKBCg1r>BSiU%?TYM`C7W6v_pAd%UPfB=UO?h>H$2 zKi`4iN?;o7bK*oLEF$8oA&8#t0darsmZ*VKMn)MyT+!b{1Osu z4Gj(8#$bQQ;K0O*UwqN;@xF2gBAxKAU14_{_IH{J-q_7RdqBy$t@p|?lJ&;NZ?HB^ zj*p9Dr$G8{H@yO4{U%9E+RIbD_YiUnE`DlljlOjG^8Q1Kr4JtXArDyw7?wEuLkgEV zqTH*R8sE9W!A(m%o4B~xD2T=PvD`7(1XL+VGGO!|gw&Nf+>805Fx@Mu5 zR92Rhl$63bqgFr$y^e$9eNJH5!3z6);MV=cEFXgg#5RQ5VxM z-fih#YA`BB1`79~t2ghm?(1y67u0xDc{Z1 z*5kj{BY6@L5(>n&0G@e!O8AB86H7W$HUl{6(@jXv$_lEfsmVf}0EWrQlj?%8w%R=pCuT5&>a|)VU=FZ#Kix^_-&7a>ZX3A zWu!=iHp5d*H*~S}MMZ_z-Gg$EN|UZ^;k|XtZb4vvAPIFNh4$$kJ=0(4RL0;nqOL$m z(2lUI3vkvJeP8_a^PfMR&q~rLfcInfq5iuqqMR~Zs06L72CvLo3e69ALG44WAmh+G zBzyF(p|P=gxyPK~@#CM2WEm70ApCw+U+)isk%{N;%$9a-r9%I~!G!x>2{{OcEE(D3 zpW2G)eZ?o?sz9646Ta8P6c{_Mv*^rB>*w=pqGDr}XjUrB0B+y+*zIRATp-ViO_o7u8>?3zqJCE&nu=!~VY6WC$o@%z7 zh;6nY6$5is_5Qmo)92JSiJK2-;ZeZIH)CUKUAG)|`_sb4&(Dtw+<+&aS2>a=CN(cl zGaw*xScBqHX}W$HKS=BIyw&et3EN$mmr6Dk7$SVp>dMIZ+DoEOTD^Yr#uoiG z0ghlBq=pyl|LV8(U7YIg-`3bQ@%aj~CKkTJp-+yw#e9ahXu!td&NFk=>JJ`2*2c45 z0(w$0DAfi25?PHi=H?2>NQY!2RI|2jjNBw0f{*<{K1{}`P|#v1XCk96eOy@&@s`wIKw z!P`S_)8>R64m#eDY`iL4Xg&0f4Tz{DH6yotPZK>kz);|FhV_ z-b1nVa&h)sE2u$QOP}04E6WZxL*9Wuj(pC#Z%b)&#^)g=xx~Vr&WJ7h74ZhhI-&LL zTVXX1&L9CAC^pmm)rlAEyW`sdaDaNEF2K=>|JiiVsB)7EJ}9?jJ>@kYRJjn z^4$2}U2Cg3=b#}>($Lf_{pM3Uz@vVBNaa*w;k#SNdfrFzDlt%ICwuO~_-$K|5+ee~ z7@CUP!H=a@K$!v-IqMJZV}osNZ93>|0M|G)6VU^d135+-I5O;%ahUAAI#Y1&^n-}V zNFhWE04r?}>;Igcwg2#K+VloCvrM0-l97>7_vI=JC+}Om>CsX#vCkH+Ae~`kU`WAb z5{oMr%&JxdJO!`^^Wu3$MZ<~ZF}@F| zM@6OVdYjX|Eq#)cA%hps3(8&0jTXZGx8Cb*QdHV!YbuPa#Rk_2DZs8?y-Mrz zePRNl!V_TO%j;gauq*E7&8+lfsEBoOz2+rJT<^BGhm*_KudN{c+kk82d!qGUWVL}E z0XZdrt?Z39Yu?k#PVW5LUF!0@vNh4@7`IQIYS$*PK)zL zQP9<84`M*_4dRgs>RnVraL5Tj2aS#W*RNkuh9`o*Mnsf&f9C>6^u7~)DZ@K>BS=Cy zhBw!b@)j)*JQ&SL%{W0_@H^n671G z$L0%#VDOWyFP=0=4C?5sEpU?1?e6l9JNxJ9nzzzvtMycQ1Kv7;qwH zu`FHe**4z6zK#0IByGp|tqrB9(O_*EiF|(mP7Y)DNB+{cx3}+e?5B{-9_bmhZp7*q z4M`u9C5t571r;a-$r__wF)ADKTCP5`BfLjlk)uzSb{HrxAxWTrWFT*`Lx?`V@Zr+) z>r_!UX^l^0%b$@&Bl!kQEs0zXjNHKM*QxrVxuiM%9B?f?$to&Z-OM9>o*}@Tb!3`b z39>oUu%`tD2Hrn!(EnZXP@$6uymsv_5q3=f_)!h+3j4;5uNXZx1I;p9>tIgp^MosO z*tp4+xmt419wnglcaL|wxcr@)OW3qcgt4Q{@Y!2K3VQ-Q;6BQ6*?mJh302*8> zjQ)yNYLgm+z%^0t;zwtGQliJa{|ZuX5T$(^J~pWaLNdIZYPFKl}1+V@(Q*q;bFA-si-(viMCL>L8!> zzpp4x*WU~8?h>@K`!=HertPELf*0R5VFvXhw(cc+1!QG6gA7;);-%Bip(7wLM@*?} z>}2&XZc4t~?Nb{iQ=6vGyYHvZ zrb%>yxDH$r20TJeBN!(?xPxuaVuce9j(RZ_v-z?Kr}WE)^y6<1r|R^h?VWSzM{9ai zcy=D0+&rbR(Z*{Z3n%z|gpiH=hKN9MX0cY&3e)vwv7_Cw(P8Ga#S6=46KcVRr8q*e z*`9R!)~ER;*-KeV`p1~yt#&aagS!)sUZe{`Owsni!tvMaEcp~=@+oSQ1Ao2TKQ}HP zLC7MFXJiBIzl7MS{`Lf}>g>GyhDYUo*#v{HuTNuR*~{icsxsG6f#RsV)0I4cB^pq0 z5{CE{7d=^_`JC)m{I09Vou3Z$?gJvyt~%+wKP8Rk_FWn|xvFF0?cr^i8reH1KYr}? zUoYXWcpb$n|E^ukD5~mYr(V>$3aC({hJPuUz3=Xi*xEKp5<6m0{zN$YTGCG0z;D)=R-dFO36?sax}4yfHA*~R-0tHHHv zRrTqQN{aU&w!v6#7myYZ8|oQlKltxgj)n}oFCX$cm`zbn^3-$i7g-{~vbK|Mte)nbPOapGGgafX!`=@aKlE6PXOKx5FYqd0KuuqrdO^xxH^| zYiZDw6W+1IV$@Ae6`PqDxuFmvST8h0crlxl`Bf(`Ja~TvpLZiD>h|qM*L8=pA3m%D zt&y-S2=V-3{vMIF|4wKVUPv73RBY7tJ<>BPOP7Uq+m`tHQWM|{&^FA;eSywm^;i&y zPnnv&nc%IsAn$q#@Nh|4Sx#oj-%FUxVfZ3n9VY3|@wvUtk0(E&*|r~UY7 z`MEaQRMM+S)gvus=j5bc_z=--C*ta=pmfWS2kq+Gz%_wDz0q4?K|h8(f5duM)+ZD6 z3FqK@CM+)Qu%Ke->bgyI!rP<5E(pyAZa@zNO5x+@x3Fovc-6{;{6ssa>wc4ceD;Gt z0s$LZpM>Q9EQ`Ba8~0f1;jWF~tx@5H)xU6`$wy*4Ha3Q@YRgZy#~p^t;z#{5w~W1| zP=Fs`V|@BKaM6y5Yol%53t9FrgqXm#Zk=D;&f^2f$A+(bp?sKR@S-Y32>%u;JL5?R ziFh+6AA{6se|=xw{USR_XG`>(VdD}XkW#rqJiNI9t-0gfyQ< zdwgI4Wz4zsPw@6=^cHfYr7jrvywacsq{j?L@PFd%pw!L+FMDHMBq`y~BmZ;|=+4-V4cF#cJd>l>?SbqHae5jN`HcxoLd z=OB8}|pJXb|_Bmcb z`Yt%lBrgDbv-#jNujQ+66Qr2{0V%t=l{9FJN+ziNcX2)fWITw*u-iy{lVVcL2?*mw zL`1gTdLs$K0|J$A5YQ}Qm;_)OeAlF&#JXw4Y5B{EoUYH3WgmmR$O4!8OrFN~MRv#A zH?ypv&(N^7D6l>TS4kTi;q&Lu>)`Q2LKwq%4sFuU&^U}gL~qX@9W$4N>lR7=>;HWY zA#ec&xIXr3bGZP=k(m$=_x+>d9q3kA-0*%g?2IQGKvw|%;8mD}(O~P^LOpqW#G`#xk-4}!<|33lsFGFf|I{gr_!I0un934 z-gB#lx6Hni&_ucV=#|Ul`YZ(rUni#G`3OF@zjNMy=jwhjvr9WP^>mg^Hb2}Yoo;7? zqEfqDOGjV-2BsUBqsG?}?l5X*_BM7JW$W@v`AQxFyG!PFlCJQ&cRMGIj4pdS>vW@+ z?}r_KaK@ay^kf8)*uwng{8X|=uQx_`bG)WLkJjN;eoIMn+w9*1D+uUB z`W3EKjAkd2{@b^2o5P`z*n^`})Yd6qo8@aSU&XsuG_7p0eGm2`rNw+J=__NY@28|t z)~{cmm6b)IkZbVaLuLxLCCR3>4;@q-Q2x8e0Is~OBr`!6=!(|jXk27`K7mR$#>5U^eq43EJ@BZM!4{nped z#);`1pbT zY-n_Jr#DZ%r6k%YB*wr$j{>O5_UkPli#9B@xcFq}invkt-^{{7(zgLWiw_`cR8*7? zm}v(+=h-MJnVFfpO0bxq3HjW)vk^I4T$+QGl@%hYg0}V=g`SvKi_6xik0Q53nVmct z27G5YsB)zlf@eHSKhphqka?ox;?hrc+MGR02Xr0x5E5$|WH3>$&b$uILctEsO$`{T zpDjLynFH&xD~K3l*zv&8ho(>0&1I_hkg}TEASx-sw80ll(GfPcwOvit6U=zfTFCSO zn6%NYT^qY$RtRqe26unv(!9On5oDk#=7)O&?k@GCM-`DWpl*41^6ABM=T?E_0iD`f zXpVdzJ-QfLu$%doa$85kYAIMbfELcH$%ayii!1~^|A&CPxAgHM$F z=roWPpoS&Q0GQf`qs_K&-#$3qGZBHXyw{7A;t9ic{3b^@(h!?gINQ|$6~pUZ^PklS zvAy?HJHz#b(-qBXgd`qtKEB8@&fNC%qlFji8sfeNl$cDbAk`Y;;HEvh9d_`M)qsL1 z5XjDY#rjl<4F!Ucyt8Za;Eki8N?EPR*(NtX7*O+|Sh zDAS<85xp4Fb>c@$XjhU@_E!u_PK5P4SiT>+JZXOfUD;<2eXPJYhGD|cdiAH1M&ut5jVIV%-}Y!zc(?uVgL)NdJqdG z;p9ntUVuB+;iUBUw|L#UvNtd?at$$rhLD_G9{}u(%Px@)%zQ*cp#_Px8m#*j*?hZ7 z-np)eLsYOr6e9$7^mK@c2G}wkpFjWL)=YCpc7|7shTqnFD6Nn=^#d3hk==ls9^3tA z;`1%eNS%V!fqKUFl)BI(O?rtn_rbs7L4I}^ww3b{?dK4<&>HJUM};KpAYla|5SQb@bN6kmn z7xF$VaZ!)m%Zk3xRw5e(FOPTc-VM<656S91pZ;j-zzcyqdS+Z`!vG9IeN@PfXScuJ z61oHm(KPxmRxqIHcxJ!3tKeDO>{u%eB9z`m+bA$?Tm>>%*gTyanD||JWnD&?;l?%% z`Ybbr3=54}S2^$s*HB?nsow_z_2q7^Q z#9;Pua$9>Mig!RI&c@Du9VJd;nsSZBg$rS5%oFEmQa|@Oak0D%d4B77&z-wlILDnnUvuKm9;_C2V4d>kWCXR<8#k3r2LzG8elQa4W?oe}Zd@CrXDc zf_4?K8gA+YZn2Kwo!F9T1qCrry>36#{I6K8XH#0W@oJh(pKj7Lj7X1_a=P1rd$N{X z*Ir3%;sA6!IANN&D)ZW(rM|zaa4N?+YO&CZzay^g1n+Fw?GRVAwNIg2mHG( z#wgr*BpX16hkXQ(Ej3_pPcThuc;zb**m+Eq2ZF;}Q&&fem!bkX0P2zCg^rmEUC&rB zxJyDxst$~ZY4p%?hF#9T#lrpo(#0nvL~pjQLaeUvuz(|l;CkT9=|gy`08+u1hs#Jq z;OoqWCYBC5a_sCq71!<}Lzf;^WBo_<2nxd4YyABA4kU)@o(IwV0O9jhZ*LM1(cmER z7vy?c*1o$p92+EQ>2{8L$<4cWk5Le7tPR{ZBq%B$Q_NLJ6VM%fAa`&;LN6pR6t4=Jzz>7lh@K{pykP3iXL=z%HbPmQ|J6iOrE{n!Ug{!PX|gZ1t0ikxf^_i5MW|1A6F3l zDHL?}&O1C`p&R&N`s>^10~Qt*DheriP`60H5{!(a1Azy}vdd-b+8C@YbQ?9GAWjK~ zt+K4YXT{5Kcy|madp;nxWiH)9OrlU=hL`pG**CSZ$YL}3%U?i&O}UtznU=;+RK6s> zi`^nkb@xdt&F#0|m@Z}?JdOj3aF?lXqp`VR?!n&~pKnY#6!yT08wwsrj5+<#-f-mz zt-pEa&Z{F!04@bVMFg>L-NubahKGlVRmJ;HpM)rvki`oY8yXp@QW~3@v=$t2OB6hN zmH|H0@JrHiBrMaMM~?*g$iNcP5@lyMJbpYD_#g_SvC>Fse|*cYQiW8TOX^b5|MxFyLND?CI?Vdk;$cs``2|BZ5q65j$DpgMljlSSc=}tm_!- zUAjO2;$)d!+_xO(e#BRK0ymkc5_xA6HQf3=GWSV28AZ z#391)M;JXg_atL_ZjL~ixD-0V9&3F7Wr`DtKl@PqDb;ypWr4Th+AGHdILLSzVuE^{ zo8mD?y`@Opm%-x+=Z7lYm2)PjEl~hRgQSh;|G^#T{uU+ZKeGbtZ8!8n79whGwgKSD zBMwf{pSZg=DkpGx08BnYM;jDsu0MVN0le&*#U@M#DHb9?)6 z!bQQ`C)ez!)83-DnZoQ1y}abejf!{LPp$_DJDa~c1!^qSc*SsS2UiAM_W?LFDPeFP zxOv32qr$4_2n6An1h@uyMF2jO)G?&+=Q8TPvkE{ldf9+F!vFm7bOVxtgKp4uc=LoA zY;YhWn(nlFOoA|}#4y|Jwd<}77Mx7mHo$4XP67##0EI?=aWPTPqlX7VWi)0Yu>+mJ zX62KY-%7l8(8{_gq$U|w_GF+J@5n*qiE$VJKukVhN3f}bAZ!&Ho}Td0zYzID@Ti7H z9a3rHb%zGi7trAA(Apv1aMRr3wu8n_4uMOhxv#-~AzU-y7&oB#9>n$NkDe~ZUcFfu zIgtSV3?pHeL+DMKZ@@Dwz^_AoU4xJ2jaHwJXe^{bVxbYVR4Q`|@B{c%j>zN5mmpOJ z%r^Z(H#AJ$S@5;}kP@t5Y9?5lY)wnCCMV=1NqUNM>)ZD&`pAvKGwZ_Il~?60$vM&h{3=yx3dd{bIbrb z+Jv5owS0mro`B$@4rkn|12{B72{MSCj92x?2OAw(05!mT!UO`nnpnahwvq`)gWBsr zmB-!q5?Lsz3h?%Upi;x+j}YzAxauDtf9=+?0)ipE>;1lNItpjNf3njm>&<}GwP&N6J znz5;g6@+p0fxdX&!TAtHgPJtCkhvgjrN@=7j;WT|wFHdW5K1iJ$hRduDm1;Q5v=#N z*D&zxoMawFOH2a3&%icJ7AjEDqJhhc8KP zfzS+Sr>e1$L7^x7_^kKi?Ciscx;HA}!mpHcCphBHouD{%9UUDo#kUYLApxGoPeon0 zaG?d?g)l}*zXpPBY{ykJG&epj>Pd!m<`3|2vy?oaxp%+kNl_7jm617r>R_U!BE zOUtIXMz95bAOfU9tRgHbs)8B?g_43YI4mk)no<;w9b>|sK_R}9ipP(0f=twqxv(&e z!5IagPOE_cV}9{s1Uds`1xW|7pujTs#O=NQ%TgZhKkoUsE8}8=m}s%i{}nN5{#Ojg z#89LC*1q}KQ<`;*o-&Tx*w#JBC}ie8d0`;RO0lomOdnS@aj|@Nmk+D2uKjpvsqC_X zecsht$uP>3fhI{!1lP3(-KHrcBO@Rq=g99DVa%%BDJ9Ah6lUPCFMu7u>iY<7%keq{5kpCuO+iiK7N@V+EuWR5QfGtxQ!vgt{tY`aAxDXAb^awFch z&)CUYig5E1WSjvly3OZ*St=}o@JwG@B7mKMdf_eKqdhKNGL!T+&}7NKyi-RfCCQPQ zU*TcN)8%eyMXW5k-iF=-sY;jiB_q*3Vmw$|sQcgF)*$T0$p}fGII^0JlV0jdOYzB5 zt`f8TPC z?{S<9NcTXVHP`70XM|0G(rHVaoQ`5xK!MH(iXrZI&>y-uR$Wr%E7v)Odp zUQw4n`y{MX-_S^gtwLdJbwPeEK39Cats`}Fj*c*8Z&7XGT14KkiP2u?&c8je<%=h} z=JwLqw3`ggA)I~PhQC!^u2fP2PiZ1d;Z%Ubokkxmzk5{7M)bs3k|26C_XrZipZBylL`w96B!*;w69$B0@wxFi{_iyE~3{#tUz#e4$3lqI7)11Krzd$ zUc!Zp-UR~KVdbps>}ufcPG4&1lJs_mBeMgFd3Q>7h)!F6SvnA=hMlJ{Nik>-f@w?%UCN@Qe|vOD;mpORh|hMlK+uWyj>v3BE-Z%s+uHdM z9u0t>1@5^5_zq->C@hL_`A|_lJ~9hMb0VjE@(mw*gX_4Tgsbbm0|#obL<`oi$UGt0 zVh;GUhB&^0iVPn!;;)74ya9I~YM}OhUr!Ib#jfUi$H9~#eNIvbfckI}B5{BRASMw8 z1qzTY+}s$LXanL$iud1@%lu9*E-2!J=dQ}EOkFyL9fSLmLP4pGj^JSk7Ez`mNQWMo zo8re%D8fkrr15aekFL&60yhznH?m+N)!o^3&M&i|fP+}E5aIN+%de z6Vo2>K9vBS5jcyCS0eYh?3G+g(-miAO>Q&}RY;iC*lCf0fTKQ$G?Un(p#jFr5!{c7 z*r@46m@c3SNli!yP6g4bM&E#rsG9~D9AwF_p4sp);thIr%3 zoI!^|l91fdw1dbrjG}{YvO`!n2r!`n{@hdT!nW<({UHZY1~z*DLaViuairc5zgk~{ zm1V_Xku2WEIvyS^Ag8oG00-c{Zg?{(srSL?aYMsaJYFL-hbT&*ON#+B9|j;!iHWBl zmh4F^_5Lddtx7?0u@GfweEcYs%lA?KRHRAYf$Rfybt_m47(fz2X@M+-@R$GP&oh+RwMDc~-bSI-qZe5eur%Ktio$OP=#zJk;y0HkQQ zv~)Tujniq2iyzQ~A;L*mhz67IXl&eqj7bZueyGd#0D0Jf-wNOHV-%QTpMw5T7}s~+ zGJYx~B0>-%CL-2EVgbhB1LAmJv{ep=IZG2$(KM6g?x6{QE58-6Y4-j5zEj4Duu4lo z^%q2l#dURls2er0e4a1g8_%X)!s$n0p1E<)Ih{+FF6I8ZhDJXl3k%q3#jDcXHUD}Q z3^WxQgBm4zq5#x@!p7R#8_1Q_Q*}V!Geop*0Xwd-qfg9%_y2GKI&Q=M@hB*`Hm!w+ zJ!0bGRG@#viP^lao$zbpzqWF21dKLp5G80?-}5GPY~E?I(_s*VMI|gUI@NpN+>_j? zsq1|bN9SF#d@>%1Wvg5wdMDMc0*9}99;j<<48qI!ioOfhEaiU`Sa4MSQDFDo;Tgc) zNnu}lr*L-7R=EeLY=}?_qh5%brZ1ZI3nZ3p7^+1`Zvd2ka;r&TTm`7Ju+6>KVAl9> z>$`8?zU5BTT(m=69vl$k-?&|u!+K(d7vfD!N<9p={V$}GKe*K#b8>QsymT9AnDDPl z0nfRD|Hu8DoDeKiBXkgW#h3q6^bwqE%I2M(uI^z}ldremLFvj5Iv+A%JjJ@5lUq1l z*=)p%?)5PPjy{IpAq6BKnk1e+Ct6YH-oO*H{WaF=Yx<@+9h>$)elL-g;$9Kv`KBs*% zV_8N59MQmupaDCosW~jWDHDNuvuE#Yb!X=$f&~D!`+=>_2P0b4S|~(3)rJhvaQQMd z6$yze>;rgVRfd8yfZUbkUMy|L&+R$qSV-f2sQZZ=r0QU|NtKp`G zps;@YcnmP{WQq>N^G5_@TmM1df(((57@+0Rd&E$Np>??jwno#U&-9VeG^`TWZxHu7f>{tIzIO}BO?G8hib&#Qu>sKmdqtTk3hTb#|Zp%i3C{($pjk!8BYBMZaqS!#og3 z&uuGnu^%r$OP#1V2}aqe?e4xGLbceOTRv!>OFhB_d;8A9V2HBmisj0@Ue+y|9sHKb zhU#?xSMBzx(=Ogx3Gb)SE`;({%YDx&oJYX9sJ-iS`+^1sT;#r@=ac z#@Y&Nn`kW|AwleE32K2B9Y!_K5gP~Sj=xMygk!)FaMs*5L0P3ZTZ1!a1h7?z_fJu= z+5Zo#Px9#0!v$gd^c)M~3C=ljTfk1U!W5v_E=HJ#7DUW(A2D;1G9lqGbacgAp_iV%-l7X<&KzwcImqNq1!&2GSzse39k= z`6H+V5mWK-uta9RfQSy+QvkYxwZyT?^9LPfSg}yB?W%~CI{8t&TEx;CAQb>NdTq!! zfK@(1O~w;&6QPhk@%V~V@+%C^Lnnkba~}>Y!!HJ4cLA?~LN^nrXO!J(P0IllIA6IU2<&V4{1%+R-IyZzTOM>c zjJ*VcJ&wdci!uiM<-Eg^6I!-JJ_ON|WAW?3C$<}LT$~*pxAEN{A3wiaN=g72$ME3n zhuz5JA(d2-UzsOxGpquP9`UaMmHNShzZ0WF6BC*nHf&&t|0&n$0B3uKs<+@=oh}N}O&06oeip{gvqfG&7~fVaAq*Oew3p2jW{c%29Q7n`tj=>(LFIbL4ua z0PZom5xIKJ8X-y{n6-R-!}}j0Pyo;!19|E7-}&aN8uxJ95K20tZhP-u-JXzJXg+S* z=a}--?iZ*xjz1OKIU+gdkT)fOYNDciIr#=y+PGkLY<(;~8R?}Be}mN&45o^OY3pi* zG>6EG&T}@E=o4H_ zU~UNAkO%gV0cfze*^Lq9%k3$c!zv5pjF9{AhonBi!K=ed(uG^Zk>qhlRGTHwpVxvO zi-6^a#MC8O|L-!AMHSq%=<{`rNpJC2Y#p2F>F%Zw>tU8&Ec!J{6^K5Di(5+cKpaBj zh*+ECKYdz@-VSC|1R*=c{WumJi6Z;|v~^Mo@Lmr7sC~o=OM8kBZoHiG2|%BFJFlQ) zLm~763h6d7LEu6BFtID>K*d2PaHupgTYve}N8F4P_Mi6b-K&a7Mc{3SNi0y5%)?LH z0uVZkNWY+UzKySG@&Bz7WZdLqJ@WAH3UD)mvEyE1SiRbIS`sK?2PP$^HIpk1G0h32 zh*eXO!NJ$z_W@aO_&IO6@u;{seuEs1IE*}PVDtfXciuj}8wHoN^roLEh z2LWIqzQZC`?iFTn?$_=!uEZ^a0wpfc5_&|~F|5ryRbJL!!4rA4bD2Kpf}G&|fEZLL zNLc>@;!dcRP54Q1K z`<>(1y7lQnLHA=ighSM$SxuX!)_8rlyM+N{<^SArir*&E9h8Mx-&@)Qd2hM!E&F3I zE7@lHzRlcJpHRBq5zvdv3?<3g{<6u?J%8w;&(MnOa?_EelY~h}#5z+FIroraBe`VP zxZIdWVP4q5jg#qQF?yEPjm;*P2isc3t)ejuvo1y;42 zXwl-{|K~r$sv-`3s{(!)6USM+JQG{u!r#Ap`!)-Nfim5))YWpOn>TK3#j-50rZv2I z^Ck_jeheHG?z-lspfMSxAvkFNPU=&V);ZkbQceTPr(!lHzwZ4GYn1u7cg;SRxyv!& zGd6D~<~j4MH+b&@5gEsj?EU-pEFhW7jAO$Zy>-Yf1g-z>@O=_fb?|1e?0iNx(ya*9+jqAP_aPdsX5hkx6 zmPVl%m9oHkL_=lp84id6mu$@r}JpRh&20?tCPgRM-`Y9 z79k66N%zf0j<4So9spXKCIaLcD=Q`R<*n~loJvD!4q~bnn1%Sapl+NH@0yq$hJ^(F z9m#C)jywa6czv}e;l*_ywYc*qsz6x-yBK7o`dAIJVz?CPTy%qvoiF;A)S$Q`RWpju zFX?6XZ`%}^n3P|YhT(z^YB^z)$?z>u2iW!4IrU*q2o!Vvim5vvjEs(|VertZ9S<7l z;=A{2Ox7D{9MyYYDGlpi5&}Nl zf{$HbJvI$AOc}a<0eBsOj??_-)L2@b{n>-=KZnw{NBLjbVsRGR=kA?5t&ls`n9*+$ zoD=%xr;w6%>1OP2|D(C>dT`pGw6cfOwk^h7q7^Dyp_D6CWTJ^Ul7i?7v}ik8ItiG6 znj)UH`W2C6VhPbMQbl9!+Ju$9o}PNPK@4WD?a9?eJ^X8f_UH3FxTx@xc}BVJuk(kR zES#G&E>K^OXqD9JEqa5#1*W+WScSkJiKY|44OpbsGjBP%Ds^ zldhL7JD*m@?&*wVIumX%cObvim^^V)Wm}8W0Yq8L`$oB6p}3U7Y$xnu%q)f47MNxl z<818&;Cfel{**tv*F;>3@O80>A{k5uPo`1_q>sKnYHnaLL9=cn}d_lp$G=G6Vk zh^9R>L*9o6V9lj4h-uyCP70iFPi5={!f z*1VS+Lf{Ob3lmau=}&!d8or~q<8J2;S?9>mZ0^)pw$)1l%y^rGX$7=-B!ecHw&N*1 z*CCTBO>17JXZb2zH7)ele)W1?)&;W`86#ougXaG~&{o&LfR>VF_IxiS5MRc|7@(H# z=;}%XLI7Ed5`HZe1u74aQGhQf{}VByqK2W48tfvFLIjdAZ=<7ws1|i{+bI-TS=mDS zt_}D(B`}RzDz14*UFe7^p7QU2zm7Sk7R|$kQW670cb%@-0+vSNui2qjYf06M@~#22 zHbCY6WP$);h=H79ZfzZiElcJQx=MIx@{P*+$b9)DkdZHua`qU=eM?6)0>I$+MI)e*Rby#s9+!u{qyv?(g;gYY`G|);wz{RaJ~^;9CpU?Kh|W zg!)p0`qI=C?j4A`I*ygz7W;}qK?d^@Oyk*&C&G)sX&}?GAfPRA{6dXlfhO2b^Dv`- zf2?dRMVPx(cAvp`Ht@9%+qseRgD^I}r(jcNem)y`!$jOSjnl;sp((E8FS5Hv_J8*k z;b)E0nA{Y;3J1LLXH0zrBvJq7Rww?Dhs z6M}*Vjwv(T8}tw3!n6EMt1Yo+#&PT10&BV|w!Mc%#j>ql_Bu?dk}y}UDgMv@MM>-y zKjHxzK|`_v88OJc>JK`L=Ze-&4}#f>29x{nGF}+?2eo*^%v^C2hunYCk`58-xeSdv zzNod06f}oA4!F{o``0o9wDKrH9SpxXKuK-qXPGM1&eqo>q4GgrZD!pL3{K%A12IX^ zK;Z`or9AfcRYX&t&H?V#zxIgy!ZMDf=r86%9tjyQg@&fB+dMf)SRZJ4C*a;{qla z6qo7{hfqUjdG4}B&c^EIQWvpZ%ww%LVlzG`K2NH930-CSc7_}tbng|mX^kWZFyzqCr#>$C$x za|OPq31=@N;#}}Ob}whhh?~YsnUag7TPhCe)Q9ig6rRz^E%gI;1sL}6>;G7us>(RK z?-y6(6FO4~I(_ps9u>9OKXSSn$smWam=_?AE3S#SVzOsIV7~8?q|n^D2j)B@yfhn; zQl~5mxE7g^Bb%z7EQh%_L;`y3`&%Y-0hG?)OzPRexl&xJn#KBIH&GCP(@3nk!RRwb z3GNwK$8fX%X~gZ==d044o(_pITW&3#g0=yWp5v7(4d{xKAx&5}T8-(-ua>>3CVQQ4 z9qvx6A6JBF@$S;mkr7KsoVX5Ml|t%Q&mpTX=DSrd#C@nKAp*4MSeghib?! z04tm9f`x@2+#LKIzFQj{le+gm9Bqnup445z;^qNmzG3zq5Cd=Dz58v4@Wr_Ge*nl{ z2PX>%+zCr-5#{LypYC@(v_pjYe%ke+GYaX{1qDa#UVO@9GDzVmHIDIs zdI&u)!eX+h_N6110?PaE)lDCpKB4yZ!|scYH?UU~kw~)Nn*9bleNMMOB~roG*0*|l z^SYi!+I?76;#u1LE`+yqE2x=B-m_4xVYzM~tD44l*Yn-V8nnMuM%79<)o-=NJgdq{ zOG_X9Obo<+-+J{_Z)l|C?wsjM2~n{GpTK2v0|m}~WKrlFTLs_C=Fk;zeex*YLd5p> zg?>%?!Yl)YO0V_jjj~=HLdi=ARR}M*#aPLJMW7fXvb$7=5Y3P++CXliC^L`0Cqpz6v zS?4KBK_UsOFV!{G<>Ugwsbc5g@Wr${d6&~Csvm)JfN8C}EB~noU*Gnx*Swqq+ls1;W_V!Xq0+-yV$Bd8mL2O9K8<5)n1*f7e2S1H#rt_d+~+{ z#~Ifxq0|yZBD81}Odu+{eB=`pdjR$UTLq#I4Bn;V$B#xbo5Xi+hcFf^PnB|M!Q#q^ zU~>v~69Os~=@G#}Vrh@4C<}QS=i&CJYVrSv3&04aK@gfPJyUm3Gr@#90M6T~n8rnf zFXRz6&Y}S`2qAHxG9T5lcl4e+8msuI*;Ao+A_^;51rf3Ft2ZCN?O_g7j_$5MAUa61 zwT-Iy?m0g(aB1+Bhy;(2R#2rKPHQEjR8U4L!w*Ojy-m2EZ2?QfFlp^Ab%EyZilS2y z26;xI=w0$7DiN_Dh(s`ZBwQQF>jChSToo%$B9 z_6r08o4$%eeP1M{!-V%rW*dlHY2#@&5Lpi$R0P0LEBJha zNKAy(Wb7D#*M|*4S5kgiK7xXexS@ljU=Au!iRZ#@K;Ny9f8c9RbMxun2;wWH>A$#N zAa``dMA;8LMAFuBn<}5{|Mo2u*a+TLH8?|}KRoDAs^ihVz!sS_On%PHz!Qm+PzE8L z^$dL3M!~b56>4*g?>=#8?syscMHs5FwZmW#N}E)vh-8catfE*jChL<;TQuyuByO+1uOOVk1Qa5>O$4?mW+` z*#=5bC=eNQwD3DhU4)3EK1$c0s3tU@3^k=18v9%AYjEXS?~II&Bz|a?Rl!Fz2(0~7 z02H2qhW2o+(YSYDJ>-wdP#Q|WcgqvL6ZiitpYsdf?#ddR_ooVbrxn=B&6_v3Ve&8* zi07{UXb>pXq=3P?ky#R?{{;xF&ed2$nhzk$m8!!aQ8z~$oMaDCj_t-43dP7sa}4W+ zYF!~#P;;I&Ta^$2@291anRIyNgMhY?s2Qxq8URqDrkJ@Om)xN$sbTTjmCK2Y1tib|f%x~w$0`?*5UNg!T}bn^A+BSi+`d8=QUG zwq27mizp@|HDNjH>rk1uLLc7M(b13UvJSjv(B2wd_k}g$;&qV|{gB!GH^TA5p47;0 z`Nc7Q6nQ{8xQ7o2J}N+-h|y<#_t9BFD{trA41~?oKv=9GAVQ)*Z~en?6}tsuFKEnT z?YW$w`$Pa}F)B4fyeFn3C_Nt^V(_!*4WiXbYmI`xH_Awqn-X+8#u&}M$cEj5fxQsq zR%#JjB-OpamL>S>`sKKtyLJgs+8!KP8^M>G2v3Gsz$Z9TpeEjjo}d-pGqWF89vq2n z?CH6On{M`_p#D>a3P&!T1JX||YFDAm0Fa*(j&iXUgng7I(#FYX4%CC@r`vjZf3UJI z?opyq-J)XTgukF;|9ZA?WjA`NWHvHT8~>D)ZCZ=wIWpDSZz$=n%))Zd`i%p-Ae9T! zEoP*udEgUjiOT_#BBPxN^0fckjqF}p@3%ZU2d2at+IK6?;$NqsZAt1->uX?~^`o_* z0A?62yOdU&U(cw4BnlD11Mm@IR|MI1p-n3_m=@`eysm;*L1G+yQ);0^z*|tkL4@S; zltL-l8o=nCI}VMNvFV5Zklkq@hZa}{ z<|IyYs}_+7isR6fSx@8|h>Sz#tO=k7NHRM+nPvdqp2)#bE#>C7rK3YbI%td804gq9 zf@>b4uqsTawGh*ll`*h^zu?J})>(NFN@%bzp0DXcfO3`>ddVnu42#13gw4ac)JFXf5|} zz0gxO8$P!>_-MK!kW5%x&a?Wn^PO-X=)7LXHc!QgBqBr9HZ0+)GYwZrkpqK2Dhk1b5f0KyO1L=1PFA6aC2S|6 zf4wd1Dh7a+7>4yc0a4taNG8z*MA=DR$REIk4lkxO2^_dEGfE?TZ<7gH>yJ*B>fLep zuzBM1Q+nJ@YyVR_K2e$lE3yiO9`O)bmJy4lc>IQ>@LSR6IOE`ULvPqqbk6{mgcBI-z@o5BQV#5K>r|74Iv zU`YxO1cMRTEvj|MOVJv|6pfdfgIj={EEY0TU>ZZbmXX2}4>X}~@9+GS1(ZrqEyS6p zN24_2g852Aoo@9iFxPQ~A&$!+3{-=<0b5#D`26;)5@^UL$Ql_UhbJSOq3?@CyN__p zbFk6SSVgzst*h|xUaGaztiAXEXWNc^UbHW$D6mo@t^m#K?OT-nkZqG$Z2!i;+In?c8jW=S~uOBh$T3bA9DRPU(j ztodDQqtyj)*)gnHqXvqKNzr+K+^HuW=AId0wGPX?A;kfC*%VF`RNJdi_gV65TCA|SGvq;4)@h30qmoT z&>i+K9R0*uCh9%ZQtWJOJ_zVla0COh#5(!@>#rd4FfcPKBV;?m%=Hh56R_J-#K~P? z4bL9I7fIIxMSEAt#?nt!S0~pG_xJzvZ-9p{q0o>J$g$zm0s6i8c_v-8t!Tp&cUDtV@@AqrHr`ae_R$ju{Pi(mh=i-BJyw(j@qIEClZc|((*4>rb7{cHH#2LmBF-zFl$2q=Z*H(?VZ z=?bz)+)=PUcL%brivQwYoZZ}lAy-1J`u>l&BKlJZ<$F+g5vMH~?}fPyCtmw=ljQ*% z0=h!9&g3p#<2key$eJ3&rr!+U3rkf;@{$oj6gsVLYSM_GTqE>Br_NaQ@BHiXzUsis zv`nn4ynV`Mt*|Yt!PH1hl@tY(NTli^33T4megB3iBb^h}*jU2HoA}8vfRzY~hpBcw zBoXcR00S2-F|tIc%zMRYq-Sk2{WXu)85r1hA))oe<5OF!0&oR^XUbJc+d_&K2pH(Hn z(+cio8$J(iI&iIR+aVKZzj+tG5vppRaT@9|5{uf_#`V8gR4B9X2{FNnn_w(ByAhHT z3dq8&uEIbl_0PEh$4{llcuEf>T%!M~6z^8;!+V|L=~>=UTK?gB?!`fDBvW9jh&vit z{h!-&jI!|PUP7xbKjRHrUh?dhYv%E%;ssd@UD;}qd@W~H^(Fgd=+9qVniHCUa=HRw zY955uWV-~hSTqzZ4uNc^-^_;5(ab4+uCVUwcK`N$$aqs)9M%MgWgS|W;ieWrJ3-JU zGQc%K$)=v)Gs9pVh%#K~_6A;Ytw`R)zVkLVtME+0@bpIbURE{8*sig$4FFp5O_#pE zcgQy-XBloP&6OxChpkqOm+oOK4>zS8pzDRG2Q-ClkJ=c)jPJupEJ}pR^mnXTjyJ8X zZ0J679Ng(EOTq?kEMi@H;pSh;@U=yx?Gs-euu)&0SXJxNnbD+Ev%){_fx{?=Vix4v?`n^ci3Sq&-U^+dr@H+7yKtNddG ziY56@7+=ARM9~`uD(PYdZ}^@$Ga%8jCUn;Zd*>k5POFl^VM7JR&{jzB!a*8uK{D=p zh#I>FkC%3SU{`yha=R^p4y0*=Wtx?hLiARjvqxi^#EZRF-IZA-WMojZ7S$c5HITrm zc&PdOm*$L{GYvJP6~*b(j-@8Ld3k!$=oIO`E2Wm0@orlQ{-qriqf;;$$%mb%qZXB)l3Po*a@b< zR|iiH>L_H>9K9#Jcel=3!p}m;MDiHI!|AZeSiW`Y1 zvR@fg8^_icB%-)Y4YZ*ej1VypTj2BJ0Z{A1joKn<7YH~PNsl9s6piY+iB|{M8^NMT z&w=U&ppK~4rU>!Ev-zg5Wez=3eEP-hwQKrEZOA!Dh!=L1_as;d$mE-UoQW8H0RrL+6&slMIg}+#JC`vaBF75=ly`620afF&>nO zm{e3_s}D>hhwgE#*mbSy$hfq+_L?w8+8j&TU!%LtIrP^8x?{)KGsrHS2WmOs`&1-1 zzVbk0WE+*0g@LxpcFKKTPS++yEv+FtISJNR-pt;Yy^_52GTn3`3!9;w+QXna->#At*0M9+@}REuoT( z8n0Vris|l}CqNe9Oux%h*Hm^?aF54~_&ywY&vVJEbMM~2n>g1UH!*w_0pOVo#3aF$ zzyP6s^f=rtMz1dyasCrGB~j8o2;B!FDl@tg?wwFq+0L1AeR<1P@zr@xsRT_TkW|Br z3rw&MqJ01$HX@}EW)*VeCf__zn{99u9$Hg1YfyjpdEa=C-cXf)S2-22!=g+^v1^TI z&4CG@6b}@2p#Q@&3$s+SqVs4@!T1n8MBp5c{7H_m3=WlqHf}C1 z6beG|7rdyg_&u$eXr{Q=fi5Ca6Eg1eJv5?usLK(`ILW=aQj})bM=7ta05-#!nHiF< zffDxwMsKd)yhkBTEsR@Hb6bXwwaA{;ncj}(T3YnT+M&XB@OovLVDy=$TJ)9Sx6n;b zZ2PF0ZNvw(9sNQDfb1C1DIk1s>cg1>Ne6*?h@ceUyQTVCnBa%bH}M?~)Ueprfoy14 zP@-^+h^7U835BQy8}=}^RQ5u)O`C`mo}icjIN|X`=>@8OTnzg&W!CFuW?5e!CI9TI z&$nv>PoJUYJA5n0yfZ&bFQWAEV>%v`Fa8)LOYq#zi}TzML`JS!#dE4|{C9rglqx!v z(`W|>%fdTJWqV^wGIK~0*L|zX&mS=fO#V*E${*Uvd0JOD%`Cl_XN|iDR{s3tA&3-V zyisx!tB@Q`z38?02TUkG zF8oTSf%PHeU&Fylhq)^8@$6XGVHd&#Zpctjdt1X?nA>hL)ZMK+6;_Bo_~z;-rfcjZ z1C7iQ0nE_)FLMA9a6h+H9UB;{ADY-=CwUw?ZxRgxg$J4l<8coOdj0H(UFg+O%p$Pu z@8?BaGd!+(Q)+m{<)KIE>Pz-+12@knCLUm-$1G|x19U}xdIirsco&2)DkyN4`4 z3l<62cw~=Z)92d-y?9VP5(xVD7_+;l(>o!9==r>@Uib+wJSV_-WDU}qwb5A3Eorcr#=QQ4v zU2oTCoc`ExO;+wwOV*|OLjgmTgjfOpUyeQ#y${)AX&e82O>5Ss8UmKXzx2B+nDKs# zLw8@_?7LOTfZC_J-mE%hYnuik89Zs;TkxfC$Jm@*kBSoX=s{TS8ylx0$L>~2{Vt~! z{}4FqjsbI;hx_Y=isic|EKMEmdrjPq8h3MbC86a&^l3@X7Rg~^XlV$><)$d0;PH+M zv7$e0kbgS)5#AhrIke|#=yi!8Eu4-_@y31tJsNSs0WjYVmJ8-_kr$1$<%X1ck+7T9 zr<~p0@letdL=5(b`-FA`5r@i&igMmrnJFlbvR)(h*R8!PX1sj2Z+|M5k){R&EMZHX zn=#LV&4{QR!PR`l4zVylk$cZcZ$n&cY%ml{oA8cm1_m1`ge?TO{}$*X>1W(R=qvZ? z@_XHPZ$`YMEty44j5kQmL?|{rx(+;mHlTqAYTrA7Zkm? z;)E(e8vBEX4~b)zWL6Tq0$;bbRtx**eUvhTzda8>S#;!%Iug~Dy~P7xQ{$0M-}L80 zOMf+3eLBlTa)oAA!s$SbzJ*PdNZ1G{g+S7MU0!)UQeaLA@d-i!8zt$k@m1DPNWuW5 z*TI;fho_9NsO~T@{|x5{GX|0^v$S>)kX;v%yYO;hb51Dzv`W(MWavZi8Hu7z2QZMgs)13AD>% zi&Rs_V}@~V+lZe5_^Td<^4*p5ec5&nK~u3xDcdj!2qwdafbn5m6@UtX0qsF0leQ}( zMz&yaGEp2rV`4C}KA440t09yQa9=RtYQ;jnPrfkRW0u}YMp@~EmqCZ~XL0ehyL)ws zse^+cbRW~Rvwj0!DY8zFb1{kR2z2YskB;$Rc&P!vZasmG?!da_)IN!Z?fl)hm6=%) zbRqF0;OTZpl4_Z%MU^))z$uz~xBn>+lX}PO2|m zh~_!;5K4{^ywqbnl^eOZw1`}Nc-Us#BK-f;0%-i36C$fi#C`aVt{xs_@)e*;n#C$I zz!l#TlEcHqFqM>qfB_v0>ic<7;+j02cp+HsSfCJCjQ_bh9%%rC)&LYh#^HeOe*lW3 zOqt~clDTXj9vS|UtqC6k1QOtwq9#Z|IeQ@n^`MMBD!uYLIf_=x!SWsa72i3WxwihpTM>shaIW4?1uNgu2AJ*UiJ=gsPG{IEvpm@$IdbR>(?&m zrG0)+@^*H!t9%vYUvYAA%6$0Bx-m=`cC39`&8 zTlSJa*yrKwOoZX_VwrMd7WlUYT(Asi{pxEEgDRWkPJ9Cko2;983KrN4=WeAUJTRRg zX}FxKBq4(Uu3f3oUBfcZs^#@6y zfSIcSLRkHjpQTTtqInmxGXb?XB9oMOa*F1b;NxL6Xy2ih$4TgR}&zWuVJPa_sCNa&!{MCrYV0QBg&eiJn zcJqe0Gtf#@+qcKT5z~aC$RpjmQw9DaJpR?tGM^Gp=vZuV$cK=8mjgf^Y*PBLhu8th zN;PoqS4nMt##lfgCGD}6Bz=E55|a$&1#lt3xw&1KoQ}JvSx@inkSQc065ZsV zF#NWwdI{tbcE*EddnYri;APi9o8LDyWRekGg7}pbG-4xsRmSMsS+x_yL!0D3W~h9>FPQO zO(-v=8Mawi%V4fL({L~(rVXx8O)|-Gz#j)3rV(QJt8Rappdc0jISor7Q^*qxsNVJI^mF!QN3pj@zr+@=|J9NU4^nj4Gw7c!+8)DLVfcaA2@X83C1c*9Pf zD0@MiJeSC}&_<)3J2rm-n;}?WdoXT3e)*gDU)*Ek)1o;xzIE$X+$CHO?G?qlNl712 z1%q8x#zs#i`r*4XZ}jSWD;5V}M*#~F{&+Sq63cm?Q^yIf029}H;2*dT`6m&b4?F^} zfMxo!#$$Leo}t0yCTM+#J(mAia>=vKpP@J}kSvMFlaaA}qfi}rd$EdBlYQq;pMEB_Bmz%Xn4Yf%#ZDRAXQ2SavAODE!$6oJea z&73@|k-cl6zfUv|kU3zinud023XUjDL=D4HJNbF#4urz1D8SyNMwI_cj3;w3NlYcA zIZ$GRKVP2hhPq1)5(xj_-L$~nQ$gjzR}%Uj#mexqJQ_nkM39rQ%fS8`A=Nrj?3w*p zY>!0=I}V_Cs8xm9X>NnNzf6!y#N|RZFOP7htAKV;FwbyvR}iCaZt zFC1EPmfrnDdq7PAj|58O5|(+%thRnyW69HOIOxj($0Ll?@NMvcKuEIon7*JRa4a32 z0y>_&b3gWj$C<)s;|u2ZLXC&O|1CvkfTC>Rg|=&YxZw<@#Z4~Pq>V0GJ2@dK1&}tu z>QKFqS!skIhBeae+q0d}hCM~d05&w|F9D}Pnkl5~S>d)(DA`8qDce$w<@tB-UQ6lv z`SUViMbYPAH}oA4BjXxy7+e0BM^#D$o2caF+!nFSh5kyOH1h^Zav{BxEog%O#_3iujpzu707D1Bm2|2icuy7cmQ=xf;u$827 zfR!h$9R}-^;em$kTrtCdWP%kPpxRDoDJw)`LN@5Svf%=8FQbeILn%f`7qp)h$#T9V zsP{uZ&B{bAT|x@~A7fNW*u{J-UwZr+HNmd&Y+T6%p3TPyZXzQCfkQsRiEZrg$NI#H zr1QHUK$D)uk2~BcP1%dFeBoi zSzr?7P{m*v$#8FhA7h4)Oyw!ogg3k`-tkF)_yAICi z;#`Zd-{sVq%;5cBeHUZad`~LLOWoKuTSU03a)q2@!?W;5+1?en+z}QEReoFj! zcQz67j~HkfZ08y`4E^ct{jY9UifLu0-~0dj&!6_vaRhhm#n@{9Js#NMQ53FSSiL<5 zj>M}lFyV-XORpntrv&tJp!NHDdzIlv-8cOE6{4uttzF9tXsX>MWV9|yY@XK&e*rBDH6FLXyGVSSdeyyW>gCgpyw}}S9J%WN)u#0I!T2!Q0jNOVxDk{dx;;<-Q z&pi_>IS4{3qp@4fXvZ25_^bxEvi#C)6X!nbfV;oM(|7eI0rV6@<%@=%-2UsFQN$;|k%J6+7j5EuOuk zZ{iM7(qXG6Lm@~)As}Berw>}v-2huiwzBC<$B5L_?Pzgv0K`n>t%luxJ)}d}PTt+v zsfkh~UC6%J@tZi(y*Ok^(7PiP%~(!Bn79t5*A&16g599|Bl+4t5^CsJJ12y5-2j-A z(WJneNF%GNZwc>36P6Gej*V7l5X(OI>C8w!z>K{RQ*8pv+X!)?M}cLrXC&%&{5M?N zP1&-#83svjr~j_wLC6qr0hTtG5c(PEprhj|>259X+HIqyr6uQb8Q2I#?zCFl0pvgtn+a)riV)zW$uskpW;fUYw28TXey3rKjMdB7K~!@_ zGYB{s%4Q4`+xUimUL1*%B!+|7ouM7Wmhuq)tg}|}n;SM@gyOw3eKXr7ujg9|CD7;TZeQeZNVG0ZO(P=4z-Q?PA!ggpqOwNRq@U zAQ0`AM!s%bl^+}nrKO6Z*O;dCrj0F0U^psa8>mx(jt3eDsuD9XL}Kp4&1=#80=~*kSQsFL;@gXR2gNuuO<7`f{ z^D=w524GMCCwc88%wRwml^RYeNzM&GD!PV-5Vph!nmGj$FA|guJskikJX|{PVWU1n zZPANk_K2pYGH(C4`b76|P?ip(!K!GDdM*-Z1>rZ*!vI{ITzaM&8cuWwen>BzoSCU4 zy5-ZS(_7^s3B?}$?`V{i+=#_S0>UXLfi4r1GT<|@(;qjI+`3X;i58=L_3?TkK|utm zQ^)*#=ZyQe4UavD$1Y+h5m zpO^Tu%@hxrOSAM$MF_f4)w4Y zCZ?f7z4t}9GM8`{bsKAeQ#mktajq zj%O5n&CEK4c`|swLQV)Dh73k?l)f_7;a=O>mQ2_HuC9|4+4AjGkWhWw!=QC5Zv(D} z?6??q{2b^cLS#bgSP6WTWU{{WOtSJe7>o5itv1H*OdZ0;#;nf!Hu|?NPl}-sTGK&v z?j)40?#@bXsW0P|Y1zet%jQN$KkCKBUVfl*eTUK6jgz9B$HE^Ze|{l&LsF2Y&dMS> zPB2pKtg>U0LvKa|XTt-V7YnI@`dX}4>GzAKUfTXuO{Lj@xBI?OjpFgI7dsAd%}3f^ zvNiE6l$W?xDepWH8u0g3;k}WdcSJLkmK&|#?cAQopEZM6TaC| z(!ZYO=BIy?Q`P?TPBJf zn{?UfX-Gc=j0PJPIO2>l{6eY+*MjZ`#{t67=_8gq^mWH9}zOtB^J z4}0!`5eUSXCC;31P7yMnDi4+(rsEBvi!HgQx9kyk=<($7fq_v*rX&a zpwVazF=LLkLQ%^7RhEl#OZav3(PxXjb9k-rsJAJV?eJTv+P9aDoT)8%_pE6}5bv34 z<2v~0G;lgGPO}33+A-%=>wp?ITo0A^_*&yVJ)ei2UthGlTw2$bPF|+2j3q0fna<1C z>=yZEt3T8(T_3eKQNz_`Do}rQNQqTjVUf`budMVp=HiomFJd^mbClnEzHEy27c@+l zCx6ja*Iii(4$IJHuwm-ZUq-pX6g|UX@^V$d2Wy{c;SNJ@kC{-e5Yv&M8w6JZ^20ug z$w;4|audB%2gP9!=!+jkeMZ-xxBwaF=oR{RpA?8$hr5RP4B3fOjHPPzT?59==KkWM0nFsJ8 zrY@!EKU(bM|1#tb=CZ)WkP*>X90*>T@Fh<$)82laZiYNK^cO#M08i}h?yl>2EgP|8 z|1;g}>2b_7tb@hk&$oL2cbGRa{_2b9QDSSoDaMFoi|bc!}Me7j;JAdZSc5^X4| zcLVZGvIj)#3s-cIz%A!9dp{@V)AV=!V{&AeMxxvoJ2i>;H;y)U9Om{(N|Iy{bS8f9 zSzx|o#ru&UZAb~#qRLs7Uc^_{wRL}qY{IHC*?QF-HwBH&Ud|d$iFq`hovfqIBgYk< zf2+m*zw;~JwkHM`5UOxZ@BRDsiGj-99GT|%3R338LrX))82wHGNA{3MgY4=Y@IlqDCqu_)^ZWy5_pGR{%&*HH3 zGt96BatfZV{prq%rTDrtpxcPU4OzsQB?U~WHZJj7GBK&)73Kb1r{bI_`8>Y&YT2t0wHt9{PmhebTr!#QF!UO3j~L$0;sJJ zlBkB06@d}r5Z~72n$b0Oe>?qZOQaeg#A+z9K2%rpV#F7o3<{|qAzI$UgKQ-r{HvgX z2UIgPJ6i<^3GU&d;}!b3t!D4`v(w{%(lIenKYe;9Qr7(z#|p?mAT)f$n~*91zXuD@ zYH)5K0Z$=L4fdZ-{oLwP)}Y8x2>yy%>Yreky>02Aa1VCs&rf!Q*zGG`JCD#*JPqoa zntXs*i;ARyv~xhckA4#Ch82}3=9ILY&B)DtbpL+&^wpjC+z<(GRJ8)o84?yIjzW%P zM;c}IM+f}nLvKMjzozJ$<2x`3Y!=>^P;!%c25FZqBb$}nxyGd1U^bLORfZ5mpi%H5 zYT}7NO^X+MnKgDkD>D;bnJ$3de|vdWx;j^u4}dR2oq9b!{s>k8t|)SMLLtz?>pJ{d zuSCb&{AHXxKWJnsPOltXsD(HyoxNQ^YVhRQ$wZmr_)eh)+N` zFmwaw%oCX^6u-%~ZJ=2NlKc#WzIfD7Ay*;qMipqOqscA9=GIoX$u32xJ<5?nh6ds! zBwZ}iN^~vX_OxwMB?GdaoG84pu;2qYsse2!s>`gsAMN$eJWl{Ji00>@*Eo?zoq`sA z`sOtzrG5MV-90pvQWvIo5VlHKI7d6#)!wQ0j09}Hh;+FEI~B16*qgE_Jx;A-dp?iE zaY(B$QH^Xr&}BYWrjcp;zGfQ65TxY9;43p@ei#uzUoh~XA4NNOL%5%sz7o1Z;*Str&yU9% zu)!Bjr{0EZYJ4P(>d?v`Mlf{w%JPdMA$bg0a(iQeL3lbzv;O6$0@c81ezYCjYY2Q^Xy$aSM7{HPo0Qq@*{?v3-&w9K z7!zwZ1b5jr%t|lZGV9xNPZU<*JG{fS`{B81l!7kQ7H7^&;WSkwYGh<)TB93zDXdCo zMQgw~k7KiEWTYn7e@-a>vQg-rJ9n}*?U(@xfhmTrqc~fWS?McPOXU)F@{-xlnx8K^ ztby{j56Ly%a2t`=Y5bJoGp%Z3!hsgP1Qd${!}>O#7jW5*5fMz&5OI}}qkUvALaJ3U zK>?nOw8jrO(mBw3Oq9$ppxR#5g&EwfSZX8bvJT`vBiA^5WPv^&s=Um`{+|j ze}ZkYE`EldCYS}&1fQ4}e{n6vB!ac{^ajw)rVY;dsOoOqDJrTn^d_GavW0Z^c&}d~my2)f0Nh@s0C1c_ zxKWTuWUGRQ6$7<`0VVR~W@tZZvJz!~BL0%8!sRjNI6gbJ7*Mlm19=p3)!~DV2L=J@ zJ85b0@p_RDelhD^=|%Yw^sy7(?a*tNyGnmXh&N_e0H{bptTZ&Tb=VQ7Kb&;{r&!Ru z3;jTdk)yI6JmF0Uy+CGG+wn;c{~zhNgN%2WU#MZOgUwu1#L+h2`gLIDN)HB}xW z+i|oJUnUX7qjq~@QM!OjZ)9pJ*ex95ExC_v(y5~)!+pHXBqBQ6*zGuO*l9dD8sHP+ zV29~Pqizi1D-MTLItpErUW`H!4D6YhaQfp1k)j4R%{Y9nnpVtFJkBBI`oLO}6SsLI z<|p^#VZk$VrP0m+i70WnbaK;)iLf1PL|7o{z~ph^o<1&HD!;i~9~n&iVq&^=R#Kj$ z?05zRQ4@HTjE{}gq2`DONyYb`m}JQ24%(VR2na|VZ)10z0#J4XJKeE>1$`99(s!fa z)H`0}xQ}xK2A=Uk8UauR3D#tWECy6x7~HpyK#{1MF=in9(yu{8T~i!QDnvVt-E)%eSD;GWaoM8 z#@>D3=T&i0(K8@+@_#3lQ41Mz%w*-3QJ)0fjwZs?qQ0(fC-R)Yh+!@A9UAqpyzu;+ z7J6C$UnGF;rSM_wKu@0aJL&7Qk~zFBEpf1kLe^!ruLJGI@Ae7*!|1P2z94O&vDnKA zJXr#W9Q+Y9?2w0zEdp}XJMdabz>@xN|6FAxb4KLQp?iSzS+;Dc0T%pZYBRSIv{X1W zY{7oul>7LlpN@Al#E|jB(IZEo{#*l`d?aelb;=t~bm9-ic>{JUqosa!J+CxF)jtlDKdGPAZ33G&X>gv5G~XO!2z%m;Ev%6ESMq1z4*s$U zkqlYr1W=05pV{qc$NKXEQvdIhP1A()nO5sNK zP0qC5JrM-dvjIH^IUG3=ZxJkW|D7L28}A4S2%N+@i-Z($>_^~X(O(sKm?+WFHVkw# z6ue|0l2;&ZgeXM@lQ_^!2G;47h1SW#q1B#q)c?2i(`Ef!o(e`U9q$uwJfYU zLBn&B1&1Pge zgoz>3Jaq9KCjI(hk`4)e=o=Uyha(2{RN-_28FQ_L;hs@&ssL(H2>)yGoDql;tUs10 zncu6R5SXpGQOTV$Cf*h>h_~aCb`M8E1}&Y7mzOQ@2OzMQ0MAN*Y`YUyidWjSX{tV8 zITkv`D4s*GymO%X?z{c^?@oy`wI@;JdoJF-%v2$tzv#S@va&q?`W8i7Q?nO2B9$nb z@BQuLN98SnF(}FY3-;JRLFgJUxl{m^Ak)I|NMke@)a%ev9v(*A>3z&}M&>wb9b&L0;|bxdAT!XhwLxx7Zr;yi-%s5ITQ@6x zWh`cxUu+ziBfSZVRiMgRxBxi6Zv{Xoi&CY+aladI_2H3`Mu+@c66zk(+(>Fcv(gK_ zE6FtR_m`b()(1n~0C!}vchA~F4#5z+RxaVTP$s3Xzooo*Lzuqs;{0G@I8tFw$IGR| k-xYAqJcLP!&SQzErPP`I*?D~?3jV089#zRvIvMnT0I@OVP5=M^ literal 0 HcmV?d00001 From c4721f5ab15a841e4d5f5e4791e49de34d333228 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 10 Oct 2017 13:46:09 -0700 Subject: [PATCH 320/342] gan design with graph --- doc/design/gan_api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index a1626e50d7..f99d1e6540 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -28,8 +28,8 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to | Repmat op (done) | ? | N (Cond) |

-
-The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. +
+The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers.

From ea56018907d89ac541bb1fa0e184159142a160d2 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 10 Oct 2017 13:50:06 -0700 Subject: [PATCH 321/342] gan design with graph --- doc/design/gan_api.md | 2 +- doc/design/test.dot | 35 +++++++++++++++++++++++++++++++++++ doc/design/test.dot.png | Bin 59401 -> 58935 bytes 3 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 doc/design/test.dot diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index f99d1e6540..2fb30432cb 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -29,7 +29,7 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to


-The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. +The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss mared in red and green are the two targets we would like to run.

diff --git a/doc/design/test.dot b/doc/design/test.dot new file mode 100644 index 0000000000..62c69b8fc8 --- /dev/null +++ b/doc/design/test.dot @@ -0,0 +1,35 @@ + +digraph Test { + z -> generator -> G_img; + G_img -> discriminator -> D_f -> d_loss_f; + label0 -> d_loss_f -> d_loss; + + img -> discriminator -> D_t -> d_loss_t; + label1 -> d_loss_t -> d_loss; + + d_loss -> d_loss_t[color=red, style=dashed]; + d_loss -> d_loss_f[color=red, style=dashed]; + d_loss_t -> D_t[color=red, style=dashed]; + d_loss_f -> D_f[color=red, style=dashed]; + D_t -> discriminator[color=red, style=dashed]; + D_f -> discriminator[color=red, style=dashed]; + + D_f -> g_loss; + label2 -> g_loss; + + g_loss -> D_f[color=green, style=dashed]; + D_f -> discriminator[color=green, style=dashed]; + discriminator -> G_img[color=green, style=dashed]; + G_img -> generator[color=green, style=dashed]; + + discriminator [color=red, shape=box]; + generator [color=green, shape=box]; + z [shape=diamond]; + img [shape=diamond]; + label0 [shape=diamond]; + label1 [shape=diamond]; + label2 [shape=diamond]; + + d_loss [color=red]; + g_loss [color=green]; +} diff --git a/doc/design/test.dot.png b/doc/design/test.dot.png index 768e5cac4b14201f378896bb3bcde9e6ee540414..4e121a40b9f7b2232d7cdda315bad15926446f55 100644 GIT binary patch delta 41475 zcmag`cRZH=8$S+TA|sMf$jB^P_Ff@m%Z!jw85xntI*qLCEhHm5Nl5mJqL3|H$jaXP zcbxCf_r4$B-~G@1*Sj~a>pIWZ`Fb74a~)UcFYL;n*dJ|fBlmTNj_}qs$+Q%9$*^uE zGKG+jW$SB|PFdfsq1F4^!(m&L*-|27d?fTNbby$)*zM9xVq#xq@jF+EI8BsL-sGK5-+K9#m32P-DDAD!Aptu(I~BhbzVMS_-_w&L%e~d{ z{e$_p8?BUP5B8DuX?%SAM!S+aTsk_s`WA;5%JQ|F9h`#RhtKL;@{_1uWgZB=UE^J1 zN&3Og!STw}dq*!;ndMz^u{vhyxVN`AD^`YAluU@_CzlNBbGt!AaLw{y7)#%3(qnvM2^BfU0Z^jv^a$!wKN-C<;{M%}4 zYH)gb`YtEuc3WH9WhyFVgA!BkCATXV0+-w>Dk|E4{fa(^jlHw^OlokKg-^0WCu?I8lCdS68ZBg`R{ZA_$#UYo|M$)OK z3fpJlT|%x29$C7tPhr`Ql!u}}9vK<2dh|$<-fJCeyxzO!x1~)4+@2$opGf@x$~S# zeT#L>fyuNcMIa-T~hm3b5xVqczL5o zDjhY&#l>gW)-<)XwRKHQigUjkQ%2lb)z{a*OikV4M#XKa?q6P3rgPox@#DuSVlK#y zJ1k^NgT*u}V^wi*k&~Iyarb%hRz@pzhCZ0wVT$?n?|0eabW5;TaLxu98JRLiLhS3; zSK&qILaeVl6&4oWwUar2{`_SM3dK6ly&IU>KY!%l@{=@!Qq(MCYyABFTiZ2Wqll#? zTXBzVwig~Au!WdZRaHky5zF9=xWl+vXEFQH&k^t6zyIFfA1`b_@>S9AqOOrqA$1Nm z11l@5QK`lGoZMVt)w3iXldM2O%Gl6Qf(rZ5#E8E%vdpobuC79GNh4*CFHL5aQ;Ug- z%`Pvi8W|atyKh)%XhaGpH8;x{m0Od-Q;T`~mY$|{NWSxOXnlP>!VqhF@7}%Q0tOly znpl;b_}JKHRml|eJ)b`1!^#N2yHwcl^yba1nQsG=A8;jH{u13YHjaDqhVG5DcgmVm z|KI)s9Vz~WU^RV}eN zxVww$>FaOOr63vB3eK>Nm@v=&d?nZsAtqTFFFJiL)bH4EqPQ|r!7L-wKv8AEC?b+% z`eN=%@4!Hg%;|o>niEduqxcU+MXK;OIuX5rtL@}9bP~^53UoX4RZirYV~^kw5@1@} z+TijdV~?(l$n#y!-hE!Rz11xfr@)l?V(V|reV%zlg0}1Xobv@DB9>4EIoPms611lM zX`*h<&Ud+Q5fKvh4KG%1Z$BH5!;~7T!D{4w{P@x1X8V&+L8dCRdau_Q$vyuR#TmA$ zy?5!`eBSTjBQ{4J8)D!iqGSfsl2bWEMU!ERFTgSN3hw1fuXX?a{d)*{hBxPacp<^L z!rdO>1|ROj8a>J*Z9jqa#PQ`95Ez&gRciO9x|qGUAXQQMBQQCcs<)>Hz3vS=OqHT)ld=aOW&4s#jG-WuG`g=k@E?EOG7MdwNO-XMDC7NGCIe2qTnN zNl8g@d{d*sX|KDa8Fvg~m{r#6(4-qoSe|xm|QAtL|%LD@HKEu8VjRo3DEN z_IWx{Cto;*a2ld>a<1RWxm)eJOaX5x5h!;nPvgzqx|rqVgc*&Htk%$9--0-FZTajPml!#-E?O@HyF~P+^LBusm4I#=+5yGzStM5a+c!jD7K& zntJeTsh@!ShMKI8kM#4kiHP!Y!S7c5qGDnjbQWdgVXbHxZ*Fdme*HSMr2g;%oX~*E zN|CX#F%mMetdX{|+tWwmInq9di>$Vj^->}cE`rW)HMbV8d)!%BDeS9qnO9~Z>w(hQ z<;GS!+JMM}Dj-ba$f@LHrXZCu_JpWV(aY`aZJg3XvvGL5$k9rrm5oii`i!==!o7R8 zj=S=Q(H=UE6_rIA=uSv7CEKOZa-kM~#&(fLI2@V;HxExF)Unse$y!iezYh&1HMg`3 zeClQAaf-bi^SV)9Qc(!YOKrWSYv~DEc9iZj{rZv_151`w18sc5?Mc^zez?`qL%zy3O0*LfVA=$aY{R{O_RRwS6Ev8u8qVb~YQ@dh5ctK#CwTU%R&3hh4>Y3(lb z1pMuK^TUQdqG#+1`Hzl{CFIRmXuObhctk`&fk0jNvWSpS#VtOe)-n#Wi7J=it@?r6 zaF+eg6Iw6)%7hkY0f&{JpFhj7{vfDKI(_FYuYf=t7dLl1Z1pC}rKP0|`oz$Zv<(fZs%mP;G0v{8*I%nCLK*7pMdZm58`wmtX=7&njxN{tzKz$u zh=T{{3LmCl6##wvQABp8pv4OXD2A{z_KCAwzxbSpU$}5#YGq~opSRn;VbZtWyH}wL zZR`x04IO<64=36`JcQE|+_K;s!>q(4BSXKjv5~gB#U*qUsgW*uH8C;Ku=*K0E-`J$ z@GxTd_1iajjs)AEiW}U4Wn}`;zE{U#|rv?2Ry!NM1w{ zx~~d!@+@FEzCg9nX>eN}iPfr(p{x5l{9-z<5moFa-zQ4D=zV4fpl=L8O zCXR)SptiP_l`XyiG_56tWvcOWODLc0Z^{XzhAB)0U~8Dx9`6stYGd z{#zs+7MoIOZf@=u_>E4(-eTU{=Eg<@gV8cIjl3O3F}+;U&<5wS)TZyolacbp0!!E| z1ZHMt4)hp2c(kybQ*(2Hc$coBA2eM1LI40Q=F+80KhvdXu&}Ud?=$3+w!i7Kx$5XR z6{qPt-BM*^=BuKr+VjCgV&bsm{d|HJm!>ufwcZElm^!L_xLy1KfuJV`P;oX)zzHr5ijEq>6 zH%(s_6>&Pw3w(KGZte#-ta)^l#?R01i~9!C2a}p0?QF$Nm-(*FYf*|nt0*ZcT?{;? zzLPRYq^5p8-vRrd>fqp@c88vhE~2oIBb-(&wvs9x5METDrZ7Cjkg%`-U5?gBI*A9C zme?{fGXF*^FI@=qFY`%Vg6nm1a|;d#z)rkrW?F(}5KK&m!F>Mw`Fm$)>v)aZa#|9_ z=FEqvtSqJ{qn|HMOiV0{*NDK;Wr%B!PE2f5wZIc(L8_ACGT>^c@@=3a?N}JpU*A;)PhQ+Us)? z5)wFgc+A(Y2lk!PSi)Y)FD&$rin@fE>$rwt$a;J&E#zQkCtQDjzQ(q*v%|r~ZHA)-*cCPj?z_7MOPvvvUYdtP!7(wUfq{Wf`$Uy0 z9`>@ok%^h+7{(XF3TRI=WnVqR3QEd@4N&^D}y88NxySuwMzK^=n?!anfM9G{c z!|Bpv)qT4&^5R&!zC^#fyF2gWN8Y91Wsi%OFTMOMQliM>22^Y0(?ddN&`A843wx4m z1{;~_y`B_K1%8hiXe19>H-;59931w3&Q`S82( zn{+B~ay8#ZzL9Xt%F6Oz(_$bCHQi5$kN^6D7JAxng&ieYIJ=GtZ^Gt*D$Byk%FMxW zftZ-seXVZq!NZ4vVlIEDIuba&f`|V|`Xqb=+zt&!F_Jcv)8NBYZ;mR0Pejxo;^OPC zxhmA22OLCFsh5{2BmAH5H7RO9 zr-ltabxR!6>4=o)CY?pg`tIw@^lQ>y%9w?Pp6sz_joMOfOiVibIcp0N8YB9_P=%6! z9OZxclDwwnzPHfB{HeVB9n>Je!U6rdVf_OG#d_Sa1#el4WMJ>cKpz42n!5HYMQy#k z?(gj6Vqe|?aZZ!CHtXT5`acrkk2|`$ZeU|$Bd}P1B`!~J8-ETbx_FTt`Zhd(oQw=# zz?u2bc5r-Q?*sj{`8P9dWMRPs=$BtaWZwHBnE}q*vADQ60*pzmdknnj$@SuQ*IV37Ja`e~z;sbb#O{D)e>Ocj zS{8$&qgHU;W2IsUWy8IFeQ>?Mf%Dyk&SA4T+wK*d^&ALtp%t#Lxa(4XR^0Eh`_<1v zDR}NHgHlku)Ty!Fegc#N$Jb$^w%v`5TOUBQ>)OPd`z9u-_RRQ<(3=>c058EKiQ7B9 zT%BSQlK2tvzVF?yrn3W>V^C19Gv#nlcD-Hd#*H9Md3BGX%a8Bh8KAapZf`%ia)$M( ztgOIsvfj$kv3vQvYUj#5RuWi%17daVbXWhXoT%Pqru|tT;Bn_NHY8%~{DDrzKzSCE zkf=;P`{!B(tU*j%JT^PqGR*Kwk0BH;76e$z;Gn+i-KWshE>4_AJMOJb=KW7os9BQB zhJV4MEl!wGHGI(zCzW}d=k*jpJ@ z!q{G+zsnhE`=cCZ^s_y)x3{kOp5AS#zu-~S+=iSjPFb-Z66%Pfq(%h> zU5=a@x3$8YlR*X8)<$JW5tS@}fm5juK$w*e%J$KC%v{Vf=u#s2rIb##Ka>hsE1c$yB%SAbO4gb zj*b9OT*G33f|?@jEp9*j(GNOxety0(_eq+BTO<@O>$&05SqD2+jIyR?)D6vaHP5}( zD?eWmtE1hITGWZ*bxKO_Z=~!o&48kJo~~`~bGVJkrJI6(Vkx5fPIm5UYiB;K7_+mM zr_sy*BSECGxV$_A9OoOHw@6Up;A4ncHPgR_lC!S=xY>~L?i~(O3_i4~AP@?lJbCiK z#)iPf#f9qX)mtoNP2a!cW3;riFidi{Zeck)JHr7E1gi87$<1XF61t**BPAtem6&At zDw}zgULpt!8`pYFkw?yDZ`B0&0j{2&o~Y+8SM{^S-u&m!o;{mdTf2lH2+Q^B=CBF> zlXHRYnQt%jEWn1}{r5Wn&cycWIE|Ut#`y`4Wo{~J>cRt3J`4ksN7vxox%D0;dPXLu zb4lVi&Hm;FFe_o)+A8 z)GL^Y4Z6_n64ScqPtlJsNlS1;M|KkYZT4=|4`ZcLivM-IreiwX#oP2q~%9G{2l^!H&KXgwK)B_M=~B)8n?*y7lyAtq_*= zt?nhl4_6Fpo^zv61%Q?XRPtYggMTGhg0SJ}TnLo2R9aOVS70IqCQFkvMlX${kcFJ7 z<8T>b-sKeBY@~PQRn7|MOUvgOiibvW@5%I>9g~=@afV7whFML~ra@+6y~7z6RVt zEyKMvYh6}&LDYwK_=`4oX@wvhKq?Uj+SB7LO_q8|_ z2}Wk->}fn{cg*)W*U3CBHiK?_PglpXI~p2oP-nGubzeed&2(Fv@CyDa$lS#0)}^$n z6v;NEA<)^;)P%LTxS0R`{gYw8y)_w$7Y2G*d79Xh$mBLgXbj2?pVO2q6%EY{2pvw& z&VLXImfY6_(p7ewX-C{+a`UhC2)Q*~ zNS4b)X)G#a0Qk2&JwAXn#`m3ytGN+1W*6%zv}9QQt+Bnmz?|95-hQeldxVlh%H!Sr zJ)dcR{`N}#D_5>03)@ql?$2EMH&Q_u$EpdtSdjroh8sSYJ1H(sr)7TZZS6TdGkT|| z+Bf#}k=%?745+1YR#q&az1*{SY)g6^oxBy}sT=<(T>{V08IcB!;$3cT)~8Qo_O==! z!fmxLdye8@HJ9FJnT^WO6z}!)|rTZ-^`*C+Vdgb9$aT4>)ZZ~gs zr95gsIe_waE>O;Iw9>Ia5Z`7#-uOoEelVBB=f3_xK}o6Q=T9UYwhk7#m-EnUjKr43 z>gwv3=f$q8e4`{XC)?zpAiWfsaK~;q-Xw&F`wcFu@xRab{^kXro12@kGvWuB<7bKx zA&9cl!j2O^aV6C1@K5!!wAc)t9h*G17yRPmF9QK7^_F~_p~beoTC--+m#dCXNGK1i zYZ77PqgG&6Tz&Gc*BRu!fDq8V8r|6Z?}Dbnu9N!!Xv;n{VR$aoKm^`w#Yhy*cX2}3 zTv%Q%dTjgmWG_L2-{uPl9H52G43}DlgonQ@DY-cb@2d>Vp}IO{RNd>Il$$94zc1C* zg{)O)Ye>huP5>Zti}fi48r%2A*8-1uVM(Cq2*#s(jZ+p&j>MnKYJ=`o#?C?yHmLjB zO7gzAZ_^^e!djvkrwYCPr`dx(juc1IV#_VMG|ViM43rnAq2jOZ}P+J||j6MkCvC zg{!Nwwo45{lR{s?NWrD2rw3_;4CH@aK`Y8SKJ{<062Z2yHMHuqF`u2_sabv(gd25bnu-G{GPTUQ1Xr{xJj`YxUp{dv~0?rkBjo%@1y;(r`*&~pz$UIf7FchWYmKAA}3P{Ni$d zf3G_|+*!t}x_{aq`yKdnkdUDJN9V^kusS+B13|RwdMgu^pU;*!d{u+MXZAz;NVe3w z=w{p}_eR4+MMWE%o6iBVZ32Kp0%8^D63oyN5D*j>Mk+Wb8HL*ch4MZ6Nu{f&Hw~WQ zLT?Ts>>}<7x>53$J#&SM`g>4hKdS%)^AAMMX%FHkh1;CIGZd zEp&sSoc{qF6bS3IyEfU)VaDGX)LA7^`)gp}_MJNf(1Jsh6NcaR=c?1d0T6L{wnKU4 zO5no6g815KsU@D5mlrQUb_E56k6*h4IiWS>KI|PFY)hB&G~I8r=a@>k32I$3NX_t< zPT(vUA$!Hpulw>eztz6j`Uq7%;;oT|U=)-snySkC-iO;wc;F+5bEgY zU`YDG2gny(2;cvA+fOt}`?@Z-ULxKVhn~`+1WtqI_V&fsG@qXR8!nxWa2UU!mNxwL zd-~1m*Ux8WW`e{fhkzgj@5yWN6(0pa@KaP2N#rrh<}o zyqnv=#UoT#b8^xe%#r=+;EGf3q#qzIMWN4AQj)f$;Z!{DMBrx_+6_NB$1~$FD&M*= znTc=oR@(a`pwGPA$6)3?FgNd6$1N`QLyo;3#?CjwrGA8Fo2PNkKu_;hrq+XjLOr&* ze8-@Io3M!L=`J_rulkdQ8-Hnq+O&tB4o&rL678A4V?b>u!RxzhVjo7q?eX`8KL|nh ztDMinb`FwjQjrokHgA6;|NdC^1l(Qpq_VZxsFTKu-^WjiAz10FCY3L@` zRTFESTAth6+k-gs8C0(u1fKYzZTjtFV>&B+;wJJOUn zc?rRXK}2kvi6V@VEcAheg+II#gP7P=fPXr6XGDD@?~gIT!{ur%1Mkx}z*X}#ay10a z9bIjxxA6%HneX2dVYKeu3yqB>!}uz*_=CtPxwEZBMIMkgYzVuu5wy@7H*Q2spooa* zN77AXX%`T52DDPBS;;bpG?J^H+V)(>0>|&V8}V{gna%4%Rdw}NC?BBk27zz;rK)Ng zPCb>7?PX^d7x?VS4D$Fc%!Sz4=&j|F3J1rb4|HHYnL9WT8yg!BkBnqNU0&!fApf_r z(gQ*r&=;wJ3D8SJ$HwjtETadOi^3A*nEk1MOTWqx5C=~Qs1%D0o%c5Xd{{v~&YqTl zu>Q5J?GGpiAyH9EUS5*1v9aqQSz&wupmR)%OGv;uy#~fr0N7Ku+xAbM5P=}Nu(*h& z#q;`ofsDKYoBNZ1e{kO86B7#+kA)`7%F2L1Miv>A{DB=R_vlfstxa}9!j=1oiOKZR z(yMDy9)M~H_+P8Jk_`Zc1mzsXF{3|INV(7rI7ng77>MywW+X~IRJBk*M3A0MHSW6)#|Sj%)hW?Rr6fDOd>BY?{%^>1W_Bf zgxqU@*q{Cz=A)JNLbYyGupouCBKMr@$`NU$bEh(iTY^dyNZy4(+Hu`_p)7;;t9K{7nW?8MP5$3)Z*HTp>jp z$S}l-(5eMT6p+l+xZC7y&voHe*OA&osOyR)yLVcr79Up zQge7c+P5Bcr$?B#1Wf}32zm9&A2cP;8E;(BvcRAC13g{z*#e1*iVCmO6e5eVbFffn zBqVdMJZOJ2;DY4BHnkZ3Xr3(QazinKIuPoF1<*<-C+j*@37QrlnSgF@17HSasi)G> zjLWo^3VaE#;I{GyD0@&N76bG&`lZv2?c!ai4iA4MHnp^11D^r2NdbJ^fB*jN6RT&x z0K|p>4}vmI{}JT0FJHb)!RY|CCpVA`MWkYGVzc;)yksaM+<89VMMrd%9|sj^>d#I+ z)5KhG0k`~>c#ocFW(Ed~h{IT8zIHa+0fBJEo>|_mD=*zo=?%T!Q z>)^N0P8mV#if=V}bvEz7{|;Y#@?Ffm|AKVr(Z4@PJmvhZgiuDjA>J zLTRZMK?>DXZSBVO=`=y^ZJXCn8f$k)?7+j~(l0R`=kk^|jjWUgftM64+isb&wCs?V zr~!@I_@6#~;sst--(oIEtGa4x+94$(#?n7q4}=|o`iOc1BwG8B}-#o718ro*muIk2eTHK^uZdL@9FSbA0$!kr}$ge~+Aq?_G2jVf8v)hfY_T zSis5A9t=h@ZEe)#Sj`Rl*mG^<^i-o`6=kV+nEV&3`BUW*PqHsR)HjDQhV?>HEnle z^6ZonbmQUA_B44Xyxf$ktHoOS`V<;zqJIBHZeF#ufB)_a)BwaQ(Uot2&)|`MLqInd zTM4$Q!fEKdrnCUP@bIv^ZtXUTieJ9O0UgU?b*yS1 znApH(@)E-Tu;ViNAisBYDdI(T6Y_DB5YS>qz|@^IFJ}YeH3u>X&VxRBdRKtNMV&i$ zE+05eNQlE^y*ea5sJCLVWno3GGXWrGa(@0i_X?+e{%SXvBan~#R9TsKeC)-9{^ABS z0lhPTCAo0?C?|$~xqpd|i$mlw3xMO(K%$iNf17CDe-m^vh=Qe6oXteZ`~fa*SnFQ8 zq>}``o>AeQ4lDk#YF9#+zdyCdQM*`E`Tz%TQ!8?IWQIXQKTr}-!jM&6O%3hdpmS1Y zO`4GpI^~BCJbA2+uM@+r@96^@$j`?I5k0K}p}%^S0Q0f3vc$DUJkdBE4mBn{MeeHb z6DrIU6j6*Xlvykf4-Z4o?g0k`LvJ3i^qKIMvwZO2*OVPL74C!fI=w-cwc%1`(8{~- z9f8V+jhX%Xx3F#{MI?mvW>3lMIAK9S9E@6uAOSGBCa4i4G&EtLYgqh161i{IYAQnr z5P~kKgIh0eE=Z!fN=tYPKQR$VM6;!!-m z-Pn&B_-DxOs3QwS7&|j_K-}(4vwAeeqo$_D2BA}M{ujYZdskATiCKV<$BskM|FMYQ zEKPzuZe?$;4n|zzr12LgMl`Gn#Vs@h&7{IPR#H#^#nKAwVDLmer@eQk7Z#MEr^lzH z7_C`STY?jS{_-L6@L}3+OS#&U*@+7W`}?wHW^c=WjCz8y1d=2aXFNOj1Ulh*g=Qo0Sxc)cQ(E_N2``O7Jie!^tzh)E{ zr-oQ6#ut(%D2?TRO(gK>XCsJdrGUTEDJKlT^El<@EC8?%p`N#uBiu|e$U>mmojaYLALQ8h*e_q;GAhUS z@$um*6^OX8OW!O+lfDmzNL87x?o^nj<7ckhVu51wM?*KMebOzi9gP(5cPE-MC< z#z+M3j0LU+T#{}KA!VMLpAY`|Qw@qP=J@nzZU0|c_eY0u@n?%nwYFK|e__{NU8*8P z5)_!4KxfVxItZ#8gPcl$E zh?U}jqN!uz`&F=H5;>!Tmjyzr2ek9UO;nArF;?Q6Rdgnv2ZX#_AW=FE7mx7;s5Aqo zT2iUsgM@VOJ#r5lWcb>xE(uUx^K8XfTLjOmt`cDyM@Fc@9C!&h!Fos*1no$7LQD}npZovDHit>k&T`%9K42fgVFXT|wJvB89_^l=M z;=(uCREp!EO9k$XEvo_z7%4|=l$Dhm!QEZ|{rGLu#KbkgebE}%n5HEZkpDoJr5wDOA-owr0YMY&{q?yd zGn9TT43*HsQ3|-(a2(r5y&-2{ViE!L#=Ob@JW6o@mEyvhAFm;2@&K*T|K;Y^UkR|0 z2FZnsstIfmui#95D6}ZmLc=_;5ZRhDA*+KjHb{!)Zb)2cpL2%#ISs`O&}ege9ILjzJ}xA^aBy&t2SCLDWY0;c1!2LC z&AtJJf=b{K5x{(GK+X+kr!+A!G3e_6sA}oyorfB|y*5b?=|)*l%cfRWiOI>y>*Y(x zNf|*_KsU+3);uXWc>nR5jZ00gG7x0eir*98be!<*oOn?C&ecT-zi(AGnLKs4Vw zI^=9^Il%9K3cCPQ5e7(A-MMpzjh)@kkV_GQ`@dkL*6s`%Q}I4Pnc1)|A+tMtd|F>$ zUkd`20u?_7?7?GHR#QU(1u=eeE>*#yv$VL_G&o2Bx53ND=Ldvy350z62Ub=% zvM;9fI>E)!gA4z#<6>jOK6~}5b!vx$jZ_(z+t?4Iw#4r z8R`nS#pn~DLa3+&hu*$2?+?1Zm(SKX{$`EMi0*$x+5*hD3WuBQ?Aj6U?CK#c9~~R( z54t`%C}@fhr-YvQKSK~C9FPUDFyNqqiGYVG2cZL64Jt`WgEFW2TsX0I0~cL8P`S{6 z@bl--VS@z%|20NBo6Fh$4g8yHb;J2-2?pjR1B0PF4YgC)(=BGmN#N|P6+kB8T5|~r z*~~__Yu9A8qEoUSQcYWs2h);!NV%!U#Ki@GsU__2YPO4xfm{7xD@91wwZbF7k_yWL z%4UjIw+XqV`#J{lP$Z!W7iC2G$?~f>1ck1Edx1DQ5ix^8DL~B#DWTaSQ=z zIwCqL2~S6-FNofKA=;Va@0|;Q{7nLCgz(Cu`46O^4ZN!sEtq)q%6Hf><3mLKobz1h zKuVQ+5L!X!4OI*0yYc`Ql|!<_9FI=w@K|VNZw(tc{axNjrI_*GpFnvT3&DT*@Zqn% zKD%K**Um~A4X#H&m|i0kVb|Rh*Fk~~=BSMS}(iRJ1~dR(~S5q>|_c5pg)&oy5N=JOxQp9Libpt@|Tuy1JS z=JMX8-;bn(IXpEr-?qyPmQ)v;VkMZ}BUDybMzJ{!29bJhK5en{mX0ND3sj29P}g_H z&X+veioorzk||KS#2{mKk(4id9UcO7OH)YeVPut*@@y{F9TskenHym}mrJmGpYGYm zep>ZQ`L53AB=clS3ud>-c7_=VoCt%H?m%UDx@srTotfQ*nB7W~>+WC8tsg$4P+TAW z{su9p29EgOKjB{|j^;Q~5o1?#r~h{$n)XEx@9OxOA$sx(M9g0QsgeG9pKV^xeDf>N zNxeqWVvPamzHO;W#6Ae$G z(L9g4t?H^tHHMtEkdY~fR621Cc2ARzvGd-Bwhx~gC))AhGut3Gu3&0<4f-i5B_$pP z@I4-6pJA7wK~MNh)>lHnU_QH_Wa!bH^yZL=o42A*E))xXK9+!kXtRg|4Zf z=_MvoKzKC-AL{Z4wPld3tW{Ja=&(gxOSng`tXKE3PesR2n9@ILj#RGL_0sikd5xcSSd>u!;y9;*=|9^2N4sJOUv zg>xJ9EDa5fi*{<6n9VIL7Z)ML@4U@$0RLN0@=yg}K~Ib6k|BRIy}UfS-sbEaU-6KO ztDlOo1s|wpct#!E2&nXWC&oNUA`S#r8j+hIp$in_pudC&vjx~Ng$aBBVxgHkx6pX3 z%N~GRjl9vGO zczE?jQLhcXw$S9phBQOn zi#uovkbNfTbW-iCJD!az+~+Ua@0rlIX!qR>9tP+kDea} zhxp8!U+QaD!-Ee(JmFdNWH;7(H##=3liTjCc6=3ce&_t|iS>84__JykrGqwdy5d@Y zF;={V-{i@H1j+p8ou2=dXx5(t)U^*Z;iLD_b6PQH9FQU1w>m{o@Ci+JG4Pv#zW$%} z=@wME0W;|mG+(v1(sU4_y4XOa5f|Nvl~L!3>}}HF_EGJvk+)kZ?CvUHVv0C>UR_bR zbH__bAGvV8r$9%6kdo})J$8Df$$ABIbA_B7FQuU@6G$$?o!o(F5+q7gfxdyqdmG?T zgo6NZS;(lmu8zINXo2F14zYmP*iTi(3z}NKncTS^V@FaOJMz~lWOc|{{iEyKkG0uC zi0PQ9WzvG^E0@bICRQftKav#cm-CVpN9em+K{ zs@VK|BxEgLP`{JS#K0hFduPW60(@{I16p$gkZwYPk+>1tW!24p`t!5V%3^@y=J+?zC zldmChJv}|z>gR2k636OlLn7(tKb#~wbR*ZYi0MLp{tOX*Vl8#&q{=yP?UyY(8(Rh%&IW@BivhBs8o9w=zQ#(( zewDr|IF_xXAn*vvvx3V!htfx)o*Y#L??aNsMbeXFfyo|xMGp`0cEpMwG{e3xWT+#Na$@{KJ!Snd{+Rbw7dCYK4y{{yTk`6bRy?7zLWg+E(dE9+2FtBldL-yX) zNzwv67!L6F_kUn-9|5sO8c6yAm6QP}eeSgya0x`BUVc^=nZ4_sCmu zwf{8RB}q@pYe$h$a_RVxvnYPpaJ-uR%;#v2bCN+w>;IZ|IDfvWr-u|AAYd*pbvYgi z^`9|Pgh6i({9l%wQB;%)Joj6`WWcfTLoW^j;779i>DvgYlee4@2l9%IGpy{d(CJHC z!{p7oM=tZCYaB2#daD73a&Cj(lcQIp|MW5`p7g%#i&!Do8q{hP&anh1bJD zd5TX>9i9D5b98(~aAQ`Gz9cne=LBeS4&aU$m>)Qgi`xrK;RAYnv9Oi7C}8>v6xtNv%2*k|mYpfyjwB?Iiiwe)$62Rq7mzAiReAE{ z{m7y9!WP)Xx4|aK1qO^pe?1$}L@acj6!6x)w|kTXp$fxP_~Y!O7wIX7Vp1O444B!a zB^8(}h5$kSpbxoRXfUHJw@*gOwRytAL#DV5pI5Qnm9ozQXZF2O1;m$e5GW;JU&@1MTREw#Z3w9cxK3s z7ahH+B*tCLq3!iCk z+l0hiF7Kzi?+-_Pz4d7DjFD?V+@^v>_q+pFYLHFa|VFbyuXbTun=h zFH9Gq8Z_wLi%WWQRmHx(7(_+wxH3Hs(mVGI4Yj5%Qwt?09HCqgU=pND%k&}h zf|UVT;`}o|kzWuBfa(3ake668>4m&Nu0u(KhTzkzkyI0yf5ggw*o{#o$TK2hRR|ox z|Aw&bPP$;F2{7Cy)$t+FXMf^j0)v8L@4t`FoDkEA$78g9lhgcw2ml#o3A{(vs5(B_G9Zh13j*^X3v2Dqlt=K4K0moS zUSiqvA8qj*_wr7-a&h2icXvsbcC5HYJ~6#?8b-3>cMBX40?fs0lF1<<;7==oB~o~W zasHZd%USJFzlKt<^|HQ3O1Ho{_xj2-Gz5X@Kgbs#=Fc|QH zbv;KxWG^aP0Vf9o!?Bc@DVei_OSE)!Pxsf&(U7Tg742Ja3DH|hX(-OgIUGF=E-mGQ zQu-9KU@+oC0M)7W8$Ch<(xn>A4?{=9B7HiDSgNH8BNtI#y|ICnp=OOqr+M7D1(W!M zfaCx~PkC@Y5) zQr(l2I|s!dO+&z9N*#6P{g%-bBoX|QUgLim=iG^0gHx|`p3lMUd(UM%?YwRx|Nktf zzb}*Cy!rncP9}B}-qX#2g4Vwn2HNwWzZUT57rcMZ0#tr!sALjdgO9vfyDO>Q%uy*T zGoW2(r=&!;TWv$Y_)$c_!s2{X$oIxZUyK&S2Udnl1CXDco$xIWU%x`^k-F;37ZQx# zK>$3j6vz>x-^Buf6?lI&SJFY?aW*hXwGGB8l#5E+^!g7z5BaacY*LYMD-RbJjLxwd zRXIN_JCVA!_kQ`3tp@c_Fl6_XS+b-&cb(9|4-*aNPW9FE=R1nn+}89x`&c#7kc?Xa zxae#LjJp4VF;R5xQV@`kxbrN2``jkw=a{rKH9`t5txmBI_sW8xy@JgZ0#3CGNZjS% zYa78NxcA{=p6;n3*?C#mxw&Bb_JMYMr=)iC)sp($i@kz@mjoSK)RK_?CF8#TB^vy_ zd>AZ7XY}z9V%q$n$%caTo#SG&hIB~&fA8yy1E)Xw(F9*5o0M7c7r=(16Sg`ycyV_N zSRfpHQkV?Uidv_LLPVU=es%9XY)$4vnxD89x0>>Z_p8tCP+zZ zFvVzr0HU0!^$YImfVfx@zF>l5>*S_Xq%B0l3^7@L==auT_*Y3cl)&g@6xU><YGpv)_h~=7sntrV*f@)7hyvTY`%Fix+wY-k|WY9L`SEi2N!b=XK** zpCmLM@O$=_&Fo0e!(9JaZ0s`$9hoB{UV#~j6iOJGqEg8rLmd+sWq{p{MoLdM&@b@$ zBcY(p37QeT5+ts4AQFik$BJn3xX@zVv&=K6 zUZ0APC{})<>qe#iYsz0Wvl*8LBl2DCm`Ld;-oWlu$%%!)L5FS_G>v%)!5+5aQ)^Iq z*O#J7V7P%N>4#WR(a(zB*;Bj|5IpoiB2cKXB{lid=*IGA7ip!e7U|$L*oUvZNH5uP zfG>i9sc`>shStj7ggfv(LGX1rGc)q=8887_n1jGtykok;u6%1~VI-BF%ZlSlkHi|u zK!H5Ww7LoYxlCQiDf00t^JEOwR{(~}1-h}7m2R(lj<;$8?6P{bt06`K;R#h4l@bTY zjoi3_+$ab6qQ2$9=4OcaGltD=mcx1a`fSo9`_X5fxb8**ofVJcUIVmJPv-SYvTDog z6{!Jvvgl{3$zt^jX-Ijd!W0z9|NF!Nj7%Nh8Ehv_-L|i#X0g-f+!e8SE0MIOENbB4 zMQheaGO(iejr799lve;9;A@aVaEX4cARlJ)7ZS9;|6ul5lVU1}x8AX>&n|d={~G& z=+Y4D1;QB#>eIQus0?6C+E)?m+dkL3T$8Geo5(WxYjBZFirj)xC_JR%Eh?@XClZIz&oRH1Fv9xM!4?^+kq@FEnT2~Ry?$gcbN|vsp#wE zyF{bN5*?f^10`k+ z9f6+;IT#uJAV^amZ3H>Rz(6EWPnA)K2ZSMW=bO0}cVlB6Vt5r<()w&&zqM7^WYe@? zqRL2hp;$yB4Lmj#DlIF`<;TgsZOnvzFq@a)X!3;b#zDWp1itbKzN04oPD&8lQ{$^( zaeCVjSY^^F8D52Z09$7pCfxq(0VnKI*GU$5>Hfa+ZtAslMedAGnkCIH4-FM>pktTJ zMUoXM3&VU_84Tka?)v%)eYvITCUe_EW=IS`Z2*S9AW3L?Gz{O0ahD?j@?Uz7gj7sT z)0_UVn{-;j$S1f=iWMn&mjP7V*n84(gJc1pZu*_Kwz3M-;Xi@#5qIDGhpHxG0h|WU z9M8!i`dYHj_IZ1b)|Q{opHrx>uO82R!uqHxUzY=2HGtRDd=@WH8R9qDOq8VItHuiT ziYNfY=_o7X!*?U;b0mV+bm8K~913nD5{&IgIXioT%1IM_o8o%=cyDhksyzNTb@^DE z!cSIdk&SgF?3Ix^jLG(dUsd4YL8{kk$^?3bORKB@B4?e*r<+{;CKBLXjklV+xR|7^ zjF57h{#0DN@(iqeG=~lIQQFql)(ImXdKNy%L2=FL^K!$nCtgLL+U-hwy1 z%zIywM@Yp9Vm^lD7~i{R4)q01xQoy!vp^|_L?EuCIN`mI!BR{lSjaw2t#`KeeWLsI zME7g)zqTTar?ozG|4&WV0hVLm_U|+(X&?=yDG5nR(a@H(Bt=R~Noa|tuF%#%QfX02 zi!!28Q7IbQNtC7%8ibb|e*KhE>FP6^4@9;;_N`bM)AAAhLb z>)EW8yel9mXk3L>@Pz4@%&h{WX(bUM#kH@SmdeOXPJUGG*3}QP@>g-4Sa-|xYsyF; z?ePiv$ER~=ojTdPzHad^XV)g`3?i$Cdm|Vt#&)zdLt0Mm)1CaghN%NsP1V@LkRnp+ z(Y5^3hQe|m#lLe2jJphsj5eTR2JS6dF*4<_Gzm1ae+><#nLqBkbD~A??S9LKh5$AJ z537vOfa|OKQYjBBH#b!;1);dF*QN$vm1w3U=KA^>u-$3_>O#4^XpBnjJQbLho=yZVLn9-D5Lpna`tCep=&ZrR6CK3B zz(4`o@Vvv1m>?ah!uhZbHDKe%Lz)*x4B1>rmfKn_bMar>l(FD>1R#j{uIQm7Z*N>{ zycAMzh5&S^NX_FZG}p&nLMkAjRHzCmdYEoPlW*5;JQJ@$$EehYD{3I_>g~1tM~;}y zWJ6+9ffJOD24pwn312j;>^Y=Fcp=y6fos3|X3LV*sEvYw)b-kIvp$~XUbpgwkvg-B z*OWl`3~1BDNsed_(Wj>|?%{5YB$YWamBJ21@J1pqR1_PgxIjX3hUgm9 zTNJs{6=!U6TBvVtUk#D#rQ0I8oT1B*ICT-CMS!Q5fKAWx^KOxIEH#gN39NLftv9}d@M#wpFgLs2o=TVwqvPyp02|Vq0|gVfT?}R8{3r!OA>cFD^a+qu%7t zpF&Fl?4u-=OTSeFZ=acQV@~mVJdH1?3TxR=)Ga~_#H@?Wp z$=z$IEqmP82`vi;=wAG~$EUikO%(7%6`bH@J6y%bB4NaR76!y_U3tE}xjWWm*VZaa zNwMJ>Q(6uV;*f{^)4A!ezQS(MI9G6KrEdG4^W_?xPg{qMR-c}b+g2R6X6_4=?P$?L z0RP>1cM=X4PlN>;G-i+LQVR$5P)v#&Wt%W9)8U6=A`sY;AKJ1REa$@&`Fz@wZ8E@K zI~KD>@!=~)L`0V&J4lHJkRmZC#|WUHEJLesRCV_7rk zO+FvB{dn7amwTLAce<9{?G{JPgl;g7+W=Y+uLn-yV`0e~xhvQ=R>KnBCR3Sanrr2o zN#$^$jIl*CLs*>uq~kpEIPMeW|o%kd9sAmg<)F0mapBho7yPX`%vN zMW5ZPq!@*YBA15yZlDB+gGIfvB?;Ip_#eg{hIg5F7IRaG=7BPR+_yyNhftq_myIT264#y` zGOL%4l|8v;GLzoiL=vP9i`=I6p_KUeP=n-MBs762-Jdx3gk!%?39J!y$H``#ZSq*u zGxM7HQ>I;Kea>^~6!RnRL;VfQzx@LYx{5f^;g7&OiwH#95KrB;kC&X888j0N-Ex%% zZ3!2D&+y2|atBR_EFy8c5NAw0j36*a189Hac_@ODh&M8As%XbY;(9m4(bF$AEvmhE zaS5cdCff?+pPirG0ij?du-3Sb6tZMwT|V-|{DYeP)ol|=t)~kqoGRG6#TIOZ=5Hv= zr4VaK*M54@kWjKa)hz7KIU2yMKwwUE7k%Y$F2hkS1bzTGqU0H?6BYRgxVwdegE+IM zSq!#p)ANRRC^0_XHIb-c`dOI0y;pc1Idbb@E{lBJ)vFhfwZkJ+V0J(KA=VJ$LA%NLrE$NIcp*DQA1iAIXu_z$uuW@ zD&E|V;?!z*Vkh7u?RIE-t6z1UYIWKqw|;7<7GV{~MW>ZG%g-lv-i$317kG=h|*qky`GOTRH;B!ax`9j z^ZZjykqZgD9~x>*1ow-1K8V6yA%Ep~oYoHKyxffk)S?ATI>%sRxI5$mE;*5NIl8a%sxQuQMA^qV>~wY5pGS;bac74IJmIPSKZ+2q5& zH~=b)X)M*|sMcha?zX1llegf}*SJ!Dc0DuF^L*-W=#+Vp2sDY!n;B@hb%kJ!2ymwU zRv|rT(FCwP^mU@35+J8%+U?+if(Oblj0_oS}0btvyPAB^c(J1D&C_> z2jJ8OkhhsRLMaxAHdou~=6GGH2DfT*6HWs?6xFfk4r- zXH`8tlmJ{6dr+YCLkB7-m@4DKmj4KpE0JvHmp=E14o3bZWURrsPk!EeS#2Gh&02AM zUE|g)ceogMs`EsK&HxnT;N}j&wMPWbD0HfbeK9d{ISpd`Cn}>p-rl#tRl0h3U|9>` z*SpqyJi422>z4IGaa6sHy*;Dq(>dv9k2R$O&>W;8kN`~sivQL)!1AEN3C=G zHUFc2wHn!EOMeIdL0&b37}!xxX+amh$yvIbfbU|~#= z+VlXVA3L@zf$F0IJa}{7;q7Q!OuP*&M6O=%3NrsFkZpc%4SD*IA){hM(n%=D>{H!E zoXAQoc=(W%u&D1s@n-B=(zHMqbVN69*}S=#!tDy-jx7SUkyDV<`qer<=Nd;@_7lnH z1pX%>g~?+gTtO&r|7Z47L%Iom1wg$h@F3|p3jXE;dz=z*D$eR~BKlhds6lHH8Wp9D zPec1`Wd{0*Q1DJpb!Q&4PVL4)3G6gP{kE#9DTNqZ>HJ$Qj-(GDTJ7iazt^HKKvE-g z02Hi&>D1!F-xI6shynCpg@y!x3-n8`+`P#G-7xv=L$9xX{yP`8pS6k}u3KUpglFZp zzZ`=oKUoaeTEyK3o|7apk>C!gwtot5?7D zzAY2%%|(75HO~c8QHtY8Ph=#DK|I0-@DULE8~F5>#59Iu8cR8OV$w@XllUe9hXtc< zR!wiQhKBc_(e=KiVtJLF6VLurwzwp|Whtj*n@FDODWSNq->YW6586051cS8i#x6o0 z>fuPkZvo@B_hBa>VZ7K$NO|HS3V6Ts=UeU?3L*~lyMV}nOQ9mg^nK1d#d1d$%rygX z@T4K^k61ri%6vVM?n68oXhcqh^=>+ZWEC+n2GmiNI4X#X7+oYq9&eHRCzjs+ZF0!@ z@Ev8AOCl=2`B>r|CF=zj5OQ|5l2j~0wL${o7a}f+g-qNgu0KETNApQcVYtPjp#`q; zbd2Ccpv6{!;>&K0=hr^z7c&6G9)#3AYgp4rgrj!48mxSvN&3NmRd_n4y~Q_5+D)wk z6;_2s)pBR=s&Bo|Q&GW+5)ZbiHDgrYvR)gL*T|%9DLiSGcjPG>i6(7KQN9d6GeUh@ z;9-cDv1cK)EppUtn{TPjbUt5LU)SrR{SI9Jm3MXQL~wyT4r1iDwX*}=971B z@YIp3#0!MfllVI1>Ki54tT zo^lB;E}pd8As)#mR|%YgY%A>f0X%qQH}KHmqDIyhO% zOP19TTp8&R5wc)C*;jFD&yEB3o5|^{?Bm+%!5Pk=>~4tc9JI|O{045(BJ;m~PxxKD zkv&Ec?!Wi^@wuguAG*5w0nWdKTY{6HzX7h7*SOiwsB9UWSbQ$p&8qiVbQ%mY4|qS6 zK1Yh9!gBL)U;0h=cz*6K-j1%QZoU-GW>8}CL|}mLS##JgR!x&I_(+B)aXIAS4!=n} z=-i32(ZE_Rk)(F3ksU8 zPs_+%?pq#0SCXB8&dLVvCch9{S&9fVVPL@8D0=_*RO=mPvV!pA5nV^91audD5)Z>q z;(rKzEtLK%ZtuQ*8XzPeAbky?y{@fG?Yfa}L;v~OGaj@0WgU2UZeQ9r;^ih!7-??W zxig{dauaY~9^xUM9g}bTYaNl;7u`kgF*R;$u|X5{VveZ};FmlQ3D} znicS)^F`KJZ&HQqAe^+Hux%kQ5`Gy~EZ6U_sC@Z)_tndneF%&skt;CZkO(O3#tP&@ z7fzSeVpsz)ibBfU2Qd;cwA1=i+U=+7Z2nZ~r*ap2fKZ z61xfMh#Kf=vT(B}73LdfXAyD>R>@YULQTz|SSDBW#B(Q;6gguP&NS85-o_Q(iUiP~ z(oMT6}XQUv&*arEAupxU$cUr$L{sF{^qwaN@f{4!dyf*Wws(<< zhVLwlFcDxq!KS&@2ph=mP5M}$8rytY|;3er%X#C9ob9S0Jvas-M z&@_`Yt5DidV^@;zioB7dPn|6ZSO4fYx=T8mVT#f=uMZh698E4QTa#m+Ja|AyBidB7 zO5`vFoYxHD+o_LK>7U4H<}lq1b(D25ryz@-Qy9hKwxdS#OBby--cVoOFTj2%|ERLT z*Erb|tqe3GdAan|a+f+)KGvGO9G4{Rg_+fPM%d8_$&M7+0NhgC5Kq+128tn0f-f?- zZKG^W=y&B&t95wonOF;F+7whDpNy+Wq~;_2MR{vkKi(IHX<$(pVYaAak{(lVX+uzi zq)J2RJ<_o4P(JXWF5;w>fG{flP}kJ70s4jh+l!vSHBeBHDHlJZ6Cq|hJK7oq!71Lo z={@u72?={OHL-4qT~c6|hu7fwPv22)FHuF7!zX{rz#dAVaL9ERG z3{|qY95sIx0_h2oT*4oXjp1smK`}|L24Z0ZCGyV`S#tKIUh*z37`}tG)4lB!@m~mN zKu3e1s0ydTehZ5Tn2GGr;6f8D0(0mp;-Ms-QUAH3g+o*gv`8f94xC{{MFlY#og44| zQ#1Q<2v7$})1mdWHv=z@BzD^24?ndb%&)`&!|44Wj#Fj-H=}u^84#fX%A?B3$bJ8h zd7nPF^M>7e(*c#h!>PV#Es5nHcJ5!le0lmwT+~PZ+0*zd)Dn>6LyG*`ftuUhxj!U5 zyRv5-F~^-&Bpe4gz+eUyrum~L|BIBo(Dis(z7-@-8AZ=Sbg4HT&t-+5HWz32zGyIx zJwZNg`NH3mto#ZbxaaP@T?m@FuwP=^c^t?j4jG}^hy;iT4YM6;9ChN2`cgb1LU9+` zyc_!*{2OMUh%R>j&t5hu>5-%chpP|#I&}+` z`QH^fdcXccmWeio6GnBXm1+hpOY1#(KK(!*r?8C7 zN*b|wBIR?EfBzfYGzeQE*}T&qec7Kc{LS@;pVA~q;+pU14fh9;AFE$Npn?)#8_jGp z$_O-yl%6y{FPen=tjSEKCs&ZqzXsq3gki1<0^Xq z$wSL&@m1ltH;JWzK*Zz+10zM|`CiI>brsJ|bQEB05#cK@I?abRsTBVe1-L6%Ou}v8 z^8=uKiSXjCyHu8Y3DOn;qO=s)Xz(j}{mUIgc?7jGce#dLL=t|Oi6jx%{qKjRB_$f@F8!ZC|l&YV9cezthbng5vm}8Keu7d~~d_3X=gp%nN7BdO_zbiKoWRTl2 zfVV^t5eQzQZ`~3?)zgQI3<_9H^ch3$`%Px8BLW?-J&TNtbR6y$91tG1Q%QND;OS?6 z_O6-n}8haWlg zqbz%eXaMv(uZR(oL#x(`23sW&k%b|_p67`To9;r5$eOX1b)K$ES2Vw{9%D(+&8Ymf zwOqAg;<=sCxgG0vdoRJn0j!jz&?Qk?IHo69xnsw>VWXS-;(^Wbu%JJKv%L^(&&v z$UG1*0LV7EjV_HCRuRV?0HPB5$drlCR^g$p)6}>n3Fm}CoaD$vMrr}yLnViJ&}e)g zB4y69dalyjFVnDGGp4&*c4UFUP$+2ixh%T+ZU`?@eEyfdz;iwK=vUd zB5R9`iMVg|JZi1HGl43Mvsa_dCNB!aT?in!+jt+Ra)LOOTEsj7)q@X+o{+Bl#GLYi zX~&g{*JfI{&tBZw8xY6s`dy@NY`!(5xrx%@HKi=F#IdFF zmcE0?DmGVX$Ha^341P1hlY`L!RL09TO5g{wm{mZht!OGw1A(0u-SGX6$v?EUtKh#11lbd7{f`1-yFjh57l(U0xr=P z_{zgHds5H`C(0Z+F2agn)BJpz+u?*ZllY_U*SVZH5r%#LaCr>t0j)IMaodD56PHvR zFN*txRNQkZ_x@xFiiRRS0D?4ttF1!wuzP;Lu6Rs8;$4C+UE-uFZFts&i5Gu(|6W91 zp4RX{lxTIU`FvF#?8`95O1Ht4(;*;I)y|W)cO37n*{Kk~ z_L^@$if?{{p__D{dg7PN@A;3}0pk1TzlZbFDLl%?X`K!Gh1C5(rf4sKs-fd!f+N41 z{d7kT7mV8MxQ5@qN{Ejq7Z#7KqtU^1JDy=VO~kP(`@B3OCKsT~=r^~m`Oe4(fBy+IK~qGoQF$v_0xJ zEG#H^f#$q_VBklqeGnKG!lc9CbMkd4Z?HErmO6TsLr0FRRUyG_i)VgsZoZ!^RgtQ= zG(zA=zc@-TY$V~hdMk<3o8sypIJSsrXw^$MoD3B&rZ2hnh{=#x^BVvDdhx-9{`7|> z+)|W@lU*m_#{d@~!I5->z$bPu-5OI%Oz>%F*n*>yY~tfM4!Y+bxEs}X*WS60sr+S} zgnA3~N2@Qdq1*02L-`pPLciEkXT=G=lq6u0+#)TQW|)`LtVCE;oLH{oMj2yI)>q<&M}pHv_kVp-t7+m8r zn}Uq988~ga9@SLKg!5zQUi2%N;uxQ()!iS?QdkH*T5q-RjY(0a6a zn^?Xj`E8-^JVS+sxL%A=`;aFa%QExxfb|FL|{g&v&?ot>P;lHyJ+YJ zasFU8$+_NQt#VZ}skM}fOUJ2vu{>)R{q`9)x*~T*9vKFs+zT-q&aP*aB!OH!2$iw0 zwB$r$A}-LiI}Czt6e!W0oum-3Yu@Q;7ju&Z0j;u*w-g_jTnjr-9vPGd#37V~*-h)u z1#|MYtoKB*ap%sRNr7P3;$k{Q&n0`JmqbNz`TGFYL6&>YbJr4JG$2DF<4zxjX=d zBw@tW2!1Q#b(mDKykj#Bb__WFcj#TpOF_5wjt-nU>CCvLEL3s8gFUl}mt1M@-sx0` z#uIf?d+|#{Y35M+dzUI0A+E__=p% zl3XHg&_1ScuhM4`$P!6i!0xI<4O!sSk;83ZV9TRjs_CiLfi4#zv=;k<#PcY?@wzinQvNY>vpcw7N z5wFXMY*I(tBj0Y{e&MrY-}=I~dOPUlNn1qT+}gGE*bF=y_ltW1dqon_Wn8&B>MQkZ zSUii?mVe)J#A9uY`4j9qnqVr6JPLAHEYKBPBqi>vXu;!L0=zY$VKT+?PVLL&T|N#e zQV}xOt}qySsXKp8xGzXCeDpOmo7tqqU+^H?brn<%WX(Wq8-QFcDE+@gSK9H^D163P8`T6MhGu0z$VjS zZk~?5L3!~KRkU|J7KrsEnSTkqjYq{#0Y5z>Kd4Ds?~PF1(D=|01LOo14bN6B`|v=3 ze%qA^qe-;9B;|$F9OO1YvCa%hBgWTNOY6z4`jn(N`TTMY_*7zs0XTa9XjwRnwCOfg zUj)?l31$1(Zr#j~<8`Ws5Qu&=t~eElfF4yrp1B@tRp^bt(9j8?x_$RdMafVhDGP#K z03#1a-ivB(hq_dp2Dqigg?VxaAslc{S2tj^?d&s3pIu;yr1u3cK(7QK0$Udd8nv=^ zuh5?PZLC$x_eXSJ4i9ezO+Y446&3NI-9s&g5@-=(q7}4Y#UmX??(PBa3+YDpIlO=> zfm77uWG6LWXdFgTrU1nY6A>I~tri!ip9`NqY{gpDE`gR1*P9NtZ(rMnGhjF(wWlfv zQC*e)ju09eqPdi1*Z`!SDEp#Ojf$?2WP6s6}BKJlbPXGlUJFentqye+? zC{;<@iiZoQ;Nr!*{hrAyh62YXneVaJ180x&GaIC-g+=G+2yCXp2XwY}n_}Mqqm4#y zK|!0;kHP@^oAp4j#5F}4fq&5M`iwQ|Zrz$6F5hrad3xxWbv27>I^AP)%QP!c(tnQM z@^tBW|99{e9d{gyTyBeF0pw5w7luRx3CMZfUmbXAvQW9uzW@kaD~aU~i@A&Isp*j4lGOLQXtev(vi{V^Hr$?^ho?erECU-wScOA?y5HN zu$((!QR1`EnmBpNnNLMjuDUaG`A=lDR{W zYaV&tD6Ll$%i&_cst+GUO&>1V%QUq)72?*dM$8ZC zD+nd083;0M5KEW$TsR`in<|MdQ8>_arA@IMEisyFH+@-8T%J+p~%C%lMe!thjJ7 zZ*P##4#r^83ZkzBkRbxu3a#(!S3*r#BZ{fu$&=xo>cRatvSJi(tv!puEX?HL!cz+l z=n}31WS_I5^PotNi!RCNx&L|0b>aq8jxf3q$f==UKuR!OtZooOGtkCB&n$v$JQCBh zZ(kL@gBzL;H0(m#x39uUO7PiTrEPR4RgPTJxTfJXt8ID9`%I5VgvI^Y?Lw+?&9gtf zy)6?KMMUzBmhmA#PYvAQgY(qvIsDqEe9te{d;)Ix4nbD1w%+P++C~zQ6J!`^1#W}d zq)5P_hhw3V(64|-QbZkiqORy<+_}??yKN6TGf^5DID-J#G{_YXz@!U;@6xn4WDZ1) zt`BuK(p{Gg3(2#~mwX=6XW-bWxa#~oCAVK!xA=X1V`C;v9RHJ1U(rtA=G7oat}dA6 zjB)8Nqsg6qfBm}ddS(B)nV?q=$B!Stq+l|?mSzM~#ola_Rv^1xp-DDHqB%qNJDf&e zUlo-_&g-@MPf)GvKr!S2z)ccCMZdk^S5*hADfh;yIe=oKYtOXb_ZMyOMtS+^$fn4s zC~@5FIcQ#!RnC|2rO8PFK*mecz|Sy&(B|QjCz;5c-*skKulNJjoDgPJVul$s$hr_2 z-2sPboZG>#-6-9mAxmoHi)6S9VdBqWfUquFW()%6Ub~jq9Pq%Xy|D9n*9x3J> z#m7N9LB@vhyv6_*7*g9)r)R#~{T+%ieEsreAQ^%NHxaQP15WyH$nUD4|BN~maq2)c z3Ei*R>C|_i8cEy&L|9~CG<3Dd!~>K*Q0fxAjX_O(ddi>g_5X_r0QtFldTxL;;5`y1 zdizak@hq}QMnkM_Hi&yT*s48sE}9}Az#;q&8aZMtz){z~B@ z`!XjVol~ij2{+ZGiQ0`w<1w`uc2>p{h34fNGDVOgko6?z@-YdsU;eO#cs3n<&7g|v zZ3BhX0|abP@v*!QhG;`P@ALKEQ`U1faW<@bIchC4j0u}ZrcIJTBjr{7?YK0u0sWZZ zz&LuBZ)P=?5~^+y-1h)s(h=4PDQci&Z}UDgU07N%Cg-1Z1(FJAHnaP^49Bs?#0`>k zFdP#TE~#2ig$@ynKn1UkHI@i-IP(5F(mbZf09tIj6Rj^l?_iI0&U6CON+$&13QtaZ zd)e#ICKxrztTPI%>I@DRXV*o z)qv-Fzg+PmFn+u!>I9k(zu<~Qyf-o+{5+WlzZ5o5=t8dtYVYZV3=7pOi zBqYvQ-nOXeAlZ+q9#e4yAx(!O1BLlV`a)MMe#PtV z8>OT&aVqd3R>gmM7z*~!U{($~lW}B^Pg0#ye-XjC>1SzN$M@_g|J*N#G|YsR#0Zzn zudlODPj!i8mF$gB_8n++S_x*el>NB*er5p_Vu`Sss7$ z_WKxLbfO*E2SR|f9-DlE=Z9tF*sMw=YNy}cy*L+xd*JiErytY6AL~+uWf<^|AC0x3 z1$s>uJz98F)DDx+*W=?c%nxQ$rju;rlFG)Ew-2fuvPH$HFx_rsS)RxgK~O=K{rKkuONSnWLamDp8E zN8!kp$()x+d)Jndk{TKtv&@bz#f6@U2ANmnrP&uCuH^D+Q>n{+{I(dWg$bDM_0Ky5 z>(ITaa4}Djo~9a(kw&Et6IgL4z_i(q??xaiosf&mgyi(Wbs5~*8}#ChG9Auis#W37 zrqCmW8>W#Lx*s$QrEqfbY#3Jb3Wi(8seC{uC9k_mlEE`c)8BW&N0foi4+y6y4fL@n zwjL_GoFO z(zBM({1x&|i>xjmOZTGQyo->5W@tXLv7_yW)Aq&8BL7n#wLiHvFcjVcxsCI7$T!E% zfqW*0+R_J)dHlyB=O-%n-Xp(OI3YW*!@F$d@2R||ud;BTFODmO1LF-ooGrnrw6beeOi|GD$uCjgRe z;CZKfbhjgr@+@2$OfH=q*xSrB0cD@-k5oXMoPgen9D4ZOWVmI8Sgvssl^?g|h&Ip= zA(0-X$8c#Qa*}+g`7TYezL!A1qK1kz!%FMdTqpE7N4q8-L9B6hg8FDF7}5-mi;N4b zt*Ozb)}7MN?s!?6#NUwzB_8Z=IpfZBG$MR3Gz`JjyLJ9~7)F=UKnkQjQo+>^LCkV| zY?3NZGB4q6QTW=#vG6OO65hHM6SpVn@%h06vjPgQ90b$+ixNbn4Tq;U;>El z3#uE_Q69pd5;_<}QMSig3*xu}+QtZo#}6m-JB*G4#>&CVTkGk_YIvuk=Qb3D&^hEh zJ3>R~)~nmb+0A&*NB>eJgNR13!Q(=R+y$)(5?UdNU+ zMjQ(N09SarUHqa@C(~X)A_Ex#8JKePZ)SFjI~k5+TVT9S*0?jF!Lj3Rqpd{TjA`Mt)w+yc?mc_

p|K ziy{&GD5;1hmuMZ9v9s^NG!0VD_+6Ia&au znFRkrO$4}SZ^A1K+7`gZIDfhq3JWb{b3#dbWcW9jm4h>9d5jUBLdH829kJUWRYQnP zB-UNVcfltYl8^{}YWO-r+GO^z9`1El=nxFvOX=%^E3kKdj+w~hy72B>a@MDh+jSVE>dz>|U^4QkaeqK$-J zmK-i1_TBm`8PO(;#Lh3^W;c?I1#%U?pdS!Qj0u@f`1wh<|Q^b;Y=vCk?^52%k2> zq5;Qr6>GI@abfoFCH13kCm=b>#)io+>WLx9X4(3uHT^(ZS4^ zd@cYY=<{rm^n@bn1yTnvPB$$&mC8=Se8N1d=j(`u0q_yvB*;I--q}BkST^}R%J|SB zoptN}eS4J>?j*?m!ku%z{BgR9%T@IMYggG80QjVK{qg&^)Peb{ms{89y{Y=JR}a5U zG(pdy`C+lGiII^izETF*9Ta65Fbe&vqN%fAT#ZM1;*}`=+bI8n0t3-q#?7!rZj&ip z?}?jjqI8N4gBs$xb{uUZs!SoBNSr7%QP#ENhK*LYB*^B`FVa$jbO5Mqk5H68nT z-YxLko2YE5QgKg_Gz<_ys0Z(xq@6JnMmZfQv0t~hUssv`d7xaQzdbioxaj>RAqFVr ztlLajCs1K)YWn{ECY+$BjW#=*^Q4!kh)5RBHCbRkeBXWcA-simjPa$JrFFS4H7$iA zQtrCYHN~srOLD`)m|--p$l9ZT*Fh$h@&Vx7|GcrY#ZlG+I{`DJTLcB=;lm9N4ee8W z%yKz0GJ_O!Wo2dM?asjY3;FlFmd+QAA2PU;m;>c=8qRgTo@*r#F#kmaxDJ$rcT@e% z)3v|lA1FC-mqPl~MMc@6gSzr(QMQ{}(WjjLVhiU;ZE`57Q`?agKvMjn(wP*9QiGy) zKMaOMYv}*y6T-kb^N!aGEOUFNtoSjCGsbD_pILDNuK>XfW*3Y?OHUBuBBa9f+8or~ zzn_!bFGR_WtYDb^BH3OO zrs3{Em@+P}SuUfE26snGnMU`G8XcOoRE0A-*m(+&Dy%OlLG@4oe*g@qnqxU{2XKIox@ zf#ctVZj1~b?s#$(1oQ#CTD;+CoXP66*Y)+6*Pvx)7gWi+!ShlIlSQEHC$cx(FqyiA z`(t1FO%3Z`DpQk=;P`I*qtgiG`z4%Aws62AQ~RO`C8>o&f>2z4PymO#jAOpuBZGcWxLkO@CsG__Q(Z zP@-7+NG;B@?DiC58sK27;EdAIlGL-QQwyw_02xWxX-c)_msub-cCp z#F`Bo?y~HD%CKG`EQS`Kcg$A)+**Z_n}iUUoSM2660(#K&bG(noow4{=OOl?8D$Tx z3zAr!X0$l5ShdKz`TosY;9d@7>X88v4sUJ-K~%Iupi;Uc%c)+6;l5<8@)AzzppCjl z)RHZai!N?bh@Sc!(HP9u_8BLf&C5_uqQHX7LK@?l;8ly~3yJKu>Zh^YVp#u|N_@dk z|DfCY0dFq4*;RxBw~!(y-o9_FRjbnfMOIvljXfBY5*K;OqUMv#)9hQ7)TE%8Xx9~C zz7=5i$&6dzwWM07usUD8joQu{9idWe=FslX`byQyjq~bia6ql}(pb8HTnN^WVP6vb%5TkZ3PQKkvT*k)54r zb$H=LsX-e?s97E!Z}+D9dJVD)=St}*1sa#h*0OvW ziJ*ZZ|GqfZ<7?7>cH!0ckp@@Jvl0wy88TXRJ2q|2O4gma81GeU$#qY^y4v~K4f8yo zhR4fqrex_UXAIkq6+I~y`+T@t&TNJJPLVHZMTwzV_te}TT-Ue3Q!ALKEx(nF=5^DvmwvAaxbQ2lGfnfUNKdAE5xZqmcYR$Q#QkT= z%12m_LwUS>>-dnt`_>!ob1|F4y)cckwHCA_nmN!OPT8tR0V2$$&66ifgrI*B82{+i z$)srXY}HU~PtKMgk3^RKatRIT?ATTO z-k;Q-&h4HdRl-Y!IxCzCWiWgVjDE|p7vmX3o;uZ3b z`kl2St_sJ7{a5V#U=n&mt+W4r^+P?bgQc%8^N$p1A1pS2`4DKx2DI(~Tai~KxqB|K zd81{>vo(TQ?+YH>D0HSuQ#=wWmX?Q~cXdZ12l#0dIZjl-Ndx95G z{HZ53W6WoEcIy}Q{vgkz#eH-aB5X_f1jggo$6TK#JR|?|VX>KQN6ec3ap!uO*3*)K zw1nc$+TNk;VjLAV?Xr+ z6Z!7K&T-C_Jt%}Q*QwKtT?a{%Q9Hb7~JpJ$o?2k9Pr!EX4B+rvi zU%si>DjOdd47m}m`q}rW=IZgyzAC?^F*V1ea!_;t|K6s!|FPH@kTmDgGvUpmUl zPz!!YE0oyN$1KdJ=egap=x4kX#l(y+}~VB4^gUEF#&Y+ zV()@DBGc+!mqOX=aN5h%%xpV!UrdqdXN!QlFCAPd7r&eHc5JXS!*XxL(nx?X&93 z+%}bTDSXq$jJ`MHJ=8PiuWPk~To^}o8g9(EZnQ*oV}^;_iB6r}b2LY-O*rakOI+J6 zE-*M5H(gu54_d-#BM6O(R!eY{Km zt;XSZcHgqPR?Hsv8Pwmft0ne+Acwkq38(rp<@n1yIoekb-Fp}lWr+A0-vmvP_NJJY&yOBB^D9$+VMej+28-ltu}8m`)^kR=CJjogW}lMQ;HlKn{vl>) zTS@GZO|VJ^?7J}TssPpbF--Ts9I!A*ni1sx^(R$=_qgW~8-B&iwXJ)FF6hZHi(PT8 z&sMDc(A8-E_F;(f%wQ8H7+zqGux422{DXILXXAt9u~Q5F+e3PH>OJcp zqe9p5zRBD1s3`H7rDY0-`p8ZFq9YF;UW@f>cy*{i`esY%i{+Xe7h2w=zH1PSS)X zU5nA=tX5y6`+B*s?8rk`Gl?CK5{er#F0wehwV*uR?{2C)Va&!ac14xnEon$>dwhk+ z8n>lCu19dFuazRtK2$jP6915Q%7&;Saqqf}Vz)kPfv;Ydt7R$kbU_-sGr!BiGeXc1_nd zw0}8OE_CD2KxkD=qM$?1%G1Xc+Y+L7FMP9JI_;L4yKkj>QMNeq^ks{o&bgbcGI&E_ zFXq{@oh6HhWTM=Lu2l8Tteqd7_;M|f!};}SzyF2htPav^Hh(*|fn9j1QfEezyZAP& z8=IwyJatB9abkh4+YMGsTg?QyR@RC}&wq;>rjmWKGH-6w6HYj0taE#VvZduUj><@d zE!|w<%zD;>vkEWhO4@pOUD8kPIHkF=min_bWM^Le@x>sH#{S25?cY915xe%TW(V_c zrXQ7;PoJuaNEp@I)3W(ySP+Nvi;evY%Dq!QZ+?jx?i$>}w;6~rs`iI4$(hBuAMyKc zFS?fcb2I;NlaE=PXrSvVgTpue2s|I8EmdfAEB*e~XNGAOdQ2>U#b)Lj(}h|cMC3*LTTN{jq`pC4K(e~w+LcvhPGsemStK|UZZtN1q#JK$e^ zqA8Xlh4a2}Fzy!&o!rh8%g!1}=dwxmjlSmF?P|h1HrnCH(X6+kKbMot5+u2Qy*XR%b>q5z*Q_R$ zSv~0avLUs#gq~Tu45`o<*_89q&aL`k0|KA8ZUK~~^8en%C=JFe;Pmf1!P4AA9kTis z0wvqg>$-eRxe=V(FR5LvV1Eili{q{{>>|qolzu$sUqi$7kJtwVuSp>YvrhGvCqsKQ z(BD$j6u7midF!IKYZnBBbZDHHTK881SPUecCc<~sF)s>4hRU0h8zIqh=H0Q%qYn1 zk}(kc$j3HgPA!jH^vq(?Hy+%K+7j5z{YKGbkIYIzKOG^pdYdj#FHOqR{HQa{%GPmk zyZ{2k3prh~9j=#PNNyeh#i0>$03zUUw8)71mO|^N^s`wbUQfKiVLQOciVw4`znPgX zT>!cR88WhuC~@ev@&J%SPboGOMIVnOg&!m%gbwkFz$&OreT*SL^|dFauoJ#>0Ac!R>C@vHP*we6#Of@edc3tQHPj z+pN@G_I>#5(UDau!Xf4=>g+pjPm#)Io1bx1VVb>p)OqaS;er(-?)aG+xs zGi`fKPNwdF15f7eW!~K5z1pR;tI1z4o=@&07K#^C7EmXPQy_#tmiZ#nN)NdXAV90@ z>-VMCCEjJ93HZ~nA=zWyu+6{2k={VjVM5{6=sv6AZP8D?Kk&M=h}^kaW6dqFkojhFck=Z%FVnDL%*pxzaTk3+?wa}0!v!%Ey8?sMuYOwB}q~AIv zqcS7=L7DoPn11SEn9#-h9-sD=U$^E~Z@=8O%8v6g3XNJl@o1%m*Mi9y)G@&k!c#3# z(j0W`+6n!W|GP?d3eB3=AF6S`u|3*b@AxkJ>vGi}N;33M*)G%gyl40ChuqZMzx!l` z+}wG)jHsup_ICEQMoL89GHAjuLW^wBST@UE58K>Ft zXfW|IaB5P2@uOufFMaYbcHK{0M>9(og;Kl@+i|PQ>c#{szOd}BUiTY^`^iHZo+VR^ zR(&s8+B1qQ#1mNUC1T=VN(Z|o4Qg`qe|Z?lv8#EgpUt}R<)%{gHHS4hD$Xdqs#5Bv zY}FTf=Mvo_(l;n?%#1fN?s~y=H_3W^pkso?%JDeHm9Kr?p1HV6sZvzWL56*jCOEZi z&F1@?_0OF9hD%zMM@{grYt$LPYO^~rjp<5XzWl&18f+KO{JX1qqe>tBlLAJ?bK){r zH|p64(XTBFx{#YRu-a;p@#|qjsp7y@jk#2*uGm!`>7o)P;XnhqT-WD$fh!jpQvXCW zDb(#>!hCGA9q%gzCYdXy3L}#zwn(f_ICf2QrPX(?S33xWZ38{KLIy%kKNG{^*a|yP^Vr zRy?O{lm>ZU(4Nk}(OA}>a`SwV)hIQWze9DpAVlyFr=ijxuIFj&=Ra$09UoLt$90q} zMe@D~Z;e^@IXqZjqo!Zh!3rX1`^8oYHJ!Oe*uc`RqI26Ln_p23$f-O<{#A^iA0p=s}* z*Gr_w+wv>S1X*R>TCyth{r0_Y;=fmf1l-J39%B{>^(~I$Q2(qgKRzJ!M;Z-d-smo= z?jR2JqM4nZ!hsx(!u?Wse`5ds{}A0AW^ye1Z zu$;`i<+PmX^jQ<1Xp3^jlyPw=`R! z(|E;?S4NZNtQSp*rc@m|I=YV;Z;I);xVVbVb5A7gG4WWu5`FagjxNz?zDJjSbGiDA zvjv5J9bn<&(s=V>ie8m(Yr~Hp{FI<#D=Vw|?@uM-TlEzcX+~SJ?(=KLs4T@EufLoX zW#ZgJL(jzYqN++||Gs@XZmYe$myX{a-a?IJ4KU-wVdj&p7|Yo4|Eo}@kLZuRa7XQ|F%nLthKdO%&H+tAkOK}4oR7F*17k?c2BE`qreju#v91!W!%qSWoW?aTWkvm4S=o2&*pqPsYt1WH_9`kUgg$=E>Zic-_U&7q zG`-XN_U#k8Hs6q}#=^#?5-a7zk8PZqpHCScwoHHZ>g<`RIYo2-!pO+&Uf$kQT#4T^ zghL%29l!LB{r#(MKS+Gr$d{OLoF8-ux|QY~uZ_`*~yJ3qKQ zeEhi5>0r>~s3;ZPw1-$76`nBiE^ss28X7@{%lr24f1a5sGV=4yu64UvSy{uYFMs%V zHMJVM!@Jyc#8yh1Oc$m){8aC7ru~Ia~O}hjhf3;ek8@+tx zisFkGFUXw{r`OYQamlxP$mAIl6Jyz&e)i#`M?dzjTr9Zlw?o)Oou8k-Da%}0Jo?;S zC6@WtR>N==zHEzH?&8F3j{N-m{y%@z3=IvlZ~xZSJQV!?5n|8q?d6S#*Rc5bT^!{S zCs$bfii?Yh-Tv1U6&3HN?dIU%2tS?tFgp5+8n;FQb9qsA_CreDr~9nHn0 zY|h<}J!bFy^3|*AnwntEc$x1`oH@rFMp($e2nu4v;s=jQ-a8}9)sSm*alIT?mFJ1N z67I%b`TFjC`~0xOPo6$KAl$2jb#rhy+Eq_YDDwKdIXa53OV}o$nOb7&X*q5(o15{> zV{OH0q$zFN;^JaX^>zmQF*Eg5IO6r|Lo16z@$Qw(50-Qa@HdB%Ce>r7SqmzK|2mGg zM&cWc_RU^o*J-!uD%PL~9y+AC5@fH}K3~V}pKOqm`!)HMQEs>b&oeIu*6<65hNK84 zi+;OfG#fUMKb)AjRopl6e6=(`A|k?W<0S)ws0z)JE0-?|vaa_nzr-$ngU!p!>!^$B zRih;4@|CH9uhiX7B$~uHl*@}Y&pr$JZH{k<>>}1;`L=Gn^Dk@g^3AmFZqtT_hUZkY zOcPwmXQe%+L&L(D@qG;gUmq_9zaYf5mO0;C$S-xu)=$ATo6^oudm^%%x+Im$@9+gB z%@3czMZH>D3>xa|$=jadN^ZvIbyGBBhfBQ}+?9LOuU>s&x3ZR)IP>=H;o?Nw{Ra*N z;J6+;eq4Ov!!8;3DqQ;+bNPwGhr=nirVsEcmv@Bxv)u4q+pTbf(7$j&or#I5zppP* z=PPHkB4WQK%e=a}TGc#-kNwQqvx>A#oVy8vKzv=cvQkC--|Q$TCvR6YiM)7bs3tbwDkd>( zS!#x_Tv9;mY!SgbLsDX=12$noIG@4MykbrZ-y~8XLEj z8G1N0`u47l=5~cAC-0RAQdZt}#Ui8V*XG~yBg!jHov)8Ga)<|-xa+hC%e}HH^JNyZ z>sKi@7d!9nF0GoZdstNT857s>&^K>{ksea7B%eWek9HJ3{_*mBV#Bn*zrXYE+`Xqy zpB9vszTiTzzJB-aNcH8zUj`EnW%CCKj-g;3!+(=p@_VmaG9HUK(j^)<^7q#}YPnH~ zX$3{a`i059$mNC)3%vm|y#c}vp6jTpYHLHpty}q8tX=*Hw=*pEUbD3oMWpJit*&gO z%>Mlqfp8e;?@xUdQdXwmIuz^la8y=HLxb^^F;V!)n~_6ltKWkNk0VEpSbcpY)IUD1 zCGWk-%5hh1L}+N;{^gc589r#(D zo)ZNpECYjr(vpu9)s9FA327v&3r%Ln3Xiwve|G5_dCpvZr~bL3*rhVMV)N{scKI=Z zfq}zMZHjwdhNP?reEaf+M@>!5?)GiH#SIZsPGfb5k>>4LIv4NEZr{nl!Ek9n`{c=W zScwzL!c( zPrug8ncm<#z?__%T-VsxC#Ce%c4f(dW&8F|ZEc0k%VVuM2V|E29!W|{>i+YlKAN{$X$jns0*id(ZE6cWmEU$ooAb)3c4)>cJD zh49qd-yC(nw~gMy!lLT(;r2qO%|4rVd1qybb~^Vl8WSE8?q}i+rE7HvJku|>%KNp$I?eshNR?nc|09w zv-Z~8pS+H0!>v}@xamLdaP*!;MWyCwIQ(wQ6~433;}bJKoo%oD#<;M;rAuRS;b$A7 z3qj`Vn_5s1y@`>r`ulf{Mdy`lb#3h}C~5T}va2cu1qFON0wOKaHc(S*>*|J29qa7u zyliH6vb?-pYjpnn6JlX**pU{~Ldv8is*~dRjGl|=D1_Fo4 zPR2wCXqEo_`M6EXlj>3y8&(!eT6&*$^A>LIU?HRT-MB>Q$GcRbHl5Pc)YKDp_Y;&@ zoHSFed?>oBC9_T`sQ1O4b^C>dl~E>~|CAIIup_7761_s7-`yT---(Bd$tiIfCrsRd zgDIZn0jzxyS~7R@^N(4qqf?PRYaf`-w&GFTCr+~`Dz(zpD_j~?0O z&VCHzmD6OBux5CO5T|D#7+n6y^7C&vdh{sUk*k|*cE%+qBzQJ<-MDw}-j-dvHmB+N z16Gt)RBZ6@@My`k*#(GZvGTqNg|K^UjER<(_T`2Ax2jaH4;dFwC^xLFu~#7g_~OTf zh5Bm?_aV<_-dT1;v?pFH@etg<|DHO(ysPVRVK1(_&|VXLLjQ#=tDc(ny`UD~8<(fK z1~y*3dNtmnSHkDzhI4 zltbcXzu?haqPMqKgjjVqFf!s55!q5vQQ;dLj3AM~xrr3M`Jvmbp=2L&p5VcQfln)r zwHoo?KTJn`T;8Io(@~AY!jB&9urP{$6dB1(dHe3&^@-KIJV|!ZOVkeIow3M0dN+U8 z*N5G9a5#1%CO;|Z0f5QM(w{dmvy)<|@qCfQ>FUzbd-HvL?91HjY;0R7$ji^M;^7E{ zbqPQnr?s>M0BF}IT=2PQWTeKxdML?nn}Dvhwa{5Tz0jhf6YCO=*$>tsmHb9&<;MXS zplME7`76D|*5#DJx?_i~fdSj>@5Yo*E3~Jio}T0NFANT5LY4}`MroCT*cA~&du3&1T?Zo1S@u-~efQAW$i&2ttgp#n zbNjZKlLf69b64B!i=?DF0F8%TpB?`E&_U9-ZqEzbv}scnz&3798N}rB<;w~(+*JpFrti>&&}%?{?=h{_yQRv?IU@(cUgrqhl+aVz>S-Q>cj6;_qMY1cXdJ zM2)lEn;t71*99k92!7UNvbl8&y@m`iAwOY1hUiay^~wti)1{;LoQlhLkJ7O(i@^=4 z=igoJd3@31^yL5JM^^0466ch8!CHTvi(|=oY7;H*b#E0%2jm*h83ginuwFRjb zXX3+rr(5RRx269Ur)>wmY%k`0e=MYP?+ptJ+9&GXqUIIdUm_3W-Tu9GJrzN9w3j`h zJHYMErT?_U-+M|f0mR)E#>&PTZ%q)Orpx7I6p7%R-Ad;duzc1 z2R@@dNRF$Wo&A%>%fk~I7Z@z>mNjeJG3D<{DnIv0lV|}Y0s{fW+3XXW++|%LI zwfDe*1N()92Bhf6xn!zSnfcc_~RrTPOw2lRUWg z`)K_}o!(&yrlT7f77wthEX~qr$^Woy2mSTWJR0%bd0e><(B)W}x$| zh*x;M5gb90q!oYvwqIR+Cou@UrGI!>B{(?P`qEhidl3Byb$(tWBf@Avu^hZBGDNn& z4WN9VnAk(t`LS0Y>IKEb^jw^-yY0oJElilLmjjA(SGHnBmb3ckx$g4St4hS$ibR)f z(-`XDg7US8C5J?_js3L)I#&tARK@b>m5HG;K&)0@FO zP+mv5_|<=o$2 zH>}srGi*WaLw3$UpwL?$kma09TIdBV`HQ#)bwG(i(nn7Bw#MC~AIZ zxizT8L(a&E0~mc=aKDs);sRsIBv<0EtKXvTA}C2%C0z3FFre8)t?b$aS4K5rBk9`- z=Lu83y?cAUOLX;1JyO~dP_Ck>S%KV2qBXMU`vwJ-`x4B&-T%X&G(&i(N0|C?lmAa1 zh5!T)t6ZA>Q(x$0qq+Rk*}bGdy7rNf5q=y}$Rx>`U;E-$zr0LLc1LS4=dn}R1ahT% z%lz&Yn=kZNN03Sc#d4ziQ_KzP_D9W-K+?}oqNK16N%j@gww>x|^vn3vPCWncI zN4#Hj1i+!Eqf0<#i9iK*adD|f>XZBZ`}eCydxV67iq}@%a6>#oLPFq_i7wNF^~q|F z4Ta>5bPWwpZ{t@dKbbL88bySLCLH{h-o86|jFs-i9z1c*^`GJQ?^8K(-7;u@+@Q;T zN?y{+NDw&2619coX0EQT)SiDwN3B38XBxF+b#B}^82RvFYI3~&P>l|%?W;JEyY1H2 z8!5P*`fRI4%kj>ld*|!_%dwWjb~^%jUgl=W|=ye7f%)g-)U#|D3UV-5|fikj>t;*4NkHEh&+OAQ%=C z69^P+`sc^11!PT;o9&XMvI|0YQdf5?qSZ&ZcwxOw@ghHnLG!nG}! zckMZRcZ}AAUZ#M%&!nEP1P&LXe0bkZoak*?g=$pH0Nn({R}= zjt@BnJds@Hq1yN%DgQ4r7Cm@&3(~2mTDjzj^GQi@@G1wF zHD{c&o$6N&~1^c-aj_V7$f<~cv(I1<;$0$PoAVLZQCVH>7T0Dbw+SR zzn0F;!6rjhRrQlEGrg(6~ zqki+_vPI56n0mNvcMER#&jd(b7X647pqGZ4pf9 zmh$h$znpxg-96S(n0{iiJ~=cf?G=s80;Q_TlJJmZWaZ)t0w_!X;*oTn5yaWWa!trh zlp9A3wMT>nno{R3u3U-!{Oi|F%B|%&>*lzq$3GXArEaG#pHX8N?kc(I|AMQv_>L6W zH~BF2<$$E~#O|5dS@J3hygl)cN$<{`kLAg)^Ky?M%dts09)uXPW&8GW zdZ|XwZlAK4koLsHKXdj&lmlb6gZ=#vkoxq_olBHoA!zCc4f3{#96ftcgz?te>Yas! z1*8aCNsoV$OEbSeJ4rJM9HLNmQ&bgB>hUR=;?|Gt%6D_I{UL@+{YX^aEXYFNUVMk+ z_3PK)ZH9~Hn^;d%@$vJQR#wttu~Fl_UcC~+>0h_?Uv?mWZN8JdgvuVTL$?=$c~tNj zqOPIg6oktz9Wf=;5fCmKK{q5pvA!EWkbRfAIg2`5V#q^s)=csDsoOp={Rbii(nH0^^tFu~_`2#%jqy*IBjuKv$i_EwuOb|LF9pRy?|%~jy;sd zF*=WR`TQY7`=0=*Y4`gZ7)!A`hCh5&sAv9eZ2yoJr_CAmb-*C+)}Hs)ZQdGB#mx{E zh?v&h`t8TsYm2x!@@3;wQUr=u=8PUjMm_`fLHz%DLsOLNcK-a%7aC8!!S%bbXeeSm zQd=99Hm=Ug)dUniX01zR5YTuR=GMr_E=JJV<^S{B8-{K~`cbhP1|8Mv>!A;ChK>;< zOK<~mAXs9y{;M*Ma8Rox|qsEO_ILf;G`EaU_^1>#HTY987Zau+X_L^d8ZhN5!m z(k1pgH~v4>U#c-3JN@LnDu@VBmdhAJZkp%0VzO0(Q%&LoNIV?U?>$yrzeOKqfJ~&| z?3`DjB_tZH{6A&%HRR{ziBl7@4l%4w@8hN3=)W;x6c9*B;jU`PxqhyhgK;zM3w0~Aw-#y|8j6;d zg4eEH)5Ze$`_l(+e)ZT z`@6Xu8xn3of5YrP@pZrs&(w=Hg0HXd$Lnv3Dy-HvO-qv*?d8@;x|4LeVPB9?{&bbd zJ`%pTS~e31_(cM%P6 z+1J(fW7|{ZMDE!#%qHfeHZ>is1!J`@4<1dvZVASzRivqX=FI&A z=U$sa1JD+1(XZV^7}ySNY4b?`O_9wG~5OKEg%|+Rl23e<$?bpW#iKmZ#H_+5ItRL$*b6p1I^2*mo zC+0soBK*M7P@^((2yY!P2t-C7{oNX08RpQ})YN=ge+@lHSKdpardDZly{ky>eaON= znz?9`f(%4OMVbD3dqo8h2i>KnR@u*~_pD_BDr5RMatheklzmEdVjr7@e5qs|26a}X^yiEm26Iy-w zOF%04JRLoK>9;3GewqCUG=(B|{cZ3t-M*w-o>W~r&V>DV=LrR6N3 z3J?N>=H-QVH?LmZ0Hesam}A=#m4fcQetk3G?cq(26`ERJ-Ccjl($bggc%XYoxGS4Q zC!jq68u1yuE%b_lK)kR3BzZyn>K8-L4H{3?;CcMj77`br$@+~OH7><^=1=($XslAb`m zri)Vpqs5t24y^V*j2zy(h=0uBw4Vzc{t&mYrlf>~z8JvbfD}(-aAt4KoISzMQ;J&$ z@7_$_4oP)jognpPtjS#IGRxA^65XavWyr3!!*zR5g3TZ;kedLUT;U}Bg3Gi#c64Y} z@7x=c@~SXiQHvVRKrX2r5Gc!VUb?&MJRUoW`w$@^n``1u1f-@4lC=cForY#1e_@V2*soCV6W&`aozu`NDM5^6S?mmqwxs02{U$jlx}- z_CuP$Qr!@b2BmhO@+0V{VhTC!0H*v@ppQm$L29b40(CZsEia zBbz;k4w0=kHneV_7=b!vGvn#4@74t@!;3V5o{f1=saMN;YmT#2~cbZ=UP z!m#7S_kwYYyR8iauhSkW`@v1?*RP*gT^{F!f@)s!Bf16xMPl!X&-!vSnke^e=pTuH(s5&aKyMVN4<$RhhupzUA#Jf_nP;y{HS7u)3LD zyVlx21c=oGxvKnsfq2c-M!#3D2q7LGo=jrRosi!)z5IptZ zh((PlYz%aajAyxBKW&G(>!8v5O%gZTpnMJ<)$$wNLg7q=P&=hi0WGee2L%UWiKwuFfcHhWTPJ{CE3s{a{iia{4y{1^Gdtg>C>jBrXSBQ>*(lEC~j`c8^+I7+I>ar zP(E%uBZ%ZhO^G?-wu6FPElXBbG_XG4XNEoL7i;16&s$*kgOLe!Qe5T-vUsu}U zc<_gGX8&Uqm4HG)NAQ|W;VMnd6{;}1d_S_&||VE?=ohtvG?&;qop+Ib2z6+d?{xv)j2BWlh&p^|1hLe zn0-d?%ekol=Xs8jM0Qh|na`d(7Xcv=oM|{e zJ^Y_RI)NI3u_(o2MbOJCm0GD-V?|NZBs!g`(2*$x z&y>nZ=}>v_v6O~ik_aH>9;6R039+RDFw3KMMjmL&w4R2KTnFDC=e}yZ!{a> zzG4L-VD2wBQCd(?;QJP&wW`Lq)ePr?h=MEU92doHem!T7@I5-WEY`-4I;&R=K|H~a z!yZ0-fEAeJ@^SeFemc>hCftJvcSieUG5E9PZiqlBASOXTlAUumnf(-M@2F|s?6|)D zRCSWJU?zH$3yX_cr8QztyT`S!ztI~V#Lj=sNr zUcaT*I2DK3ajbo#v9U3H8c!k|eycZfNjYw^DVp7ZJLJ+LF$BW^@0s#{j zX*viDR6r(E3g%4vu+GoQnD{qI%dZ|qL9HjL*tn!R$47su%FCgjsJPuLo%vxPijld) zP7%4$B61<`*4-h6QCeomW)4{SN>KbDxM>yhVeYw;loNX^GHwsEhz3*yeg~if+3e3U2!v3`17`=aPMJmgoegM}x-Skqz7aCX ze_CC=3;?_5@83XlrFtrN5oKrx0bL6d6I?-7eFnBRKFj$TUtppkooi4!?*Dpc(T8g3 zg?{j0P3+gj!Sm6u;n0)b^;DfivS$ay!whyk!0u99drz3kLU2_!6_AFy?{dDxW9SpG zX59k3C+~u!9~&6iACM#*X7m2rAU8`X+`1dd zzdqX0HhSOc1TK1z?Gw5q?v>gHaL!4tMfxwvb;cEW(cK5nV3T$hDP9^mP*zzv%@tt} zcMK^f!^mTL{dzzcuW~Q?;jny)Vl}f+@kz;Ch_Jcr{KShunFqDZrWoX2$gv{>N8N2RO;G$q*rA^>9R`kOazl0JU4yShh4zBs9SuS38l z^1}d6Vp4QpHiO(kucSqZAT2gnGMzJLsEF&=uali4Gz`gtKzfR{ZQE9yDBDR>J2w&-a!HPQxE zR#DN5D=Z8?wF!-lWu#E7Me<3ariKZ2bTp58+@Ii>2%)`A_Qrr^0A^`X z;#Jhu6%f0mV6!8j9&m@eU9z)Uq&hWX5_;ptjXK;6X+tG@Jnil6h{+99RQG=@ z>52xnX7+M(G3FIc$3E}K%E=)$XyECe-78nFT=^1qd&LMP1z|AxlekDcbDJL%6cSPd zdjW@%Q-Bjj5&l(*oSGT~3JAp$`1jPgbF6SBRirE~PE=Qadi(RDGJG_qH*W@`mGB8` zNVXo|zP*%jK1$qcMa<1*GS<^Y0cEadWMrw#t9tF<5QrS8a(MW9bnTis-oK+_p?8FE1Y9&CV$)|=(!0tY$kSFWE$11VX(HKH2G zv)iPdCn$r1gRnWL%O2ZLpIUN2Oo-7pP|rrvn;8n_&Y)1Z0T~zq22U(b^sKEIt1&DL za?C!s#I_D3qMi;OG7$-hb1QxXJn-mGct!B z`|${x_!%p!eXvZwY>f!!zB3m+w>*EhWcAwl|M~u7KXxGHSt~q%*am!?Hdj_L*tTo$ zj(1nW)Ss67OYu5G&e0ZRNe^k?%uSu15x<&uYF*56nhPbJZ$1CJ%_H~D;Ve#XSPq*^=&02Xjf;26Xa<>mL-jihRAw; zef;PBE)mgn(eKvn{A@{w1tEWJynUxZQ9;jgc=*T*)4!w5QNd;p{7B8Ou)BFz@w2Iz z8P*4mn2>k&zS%7gy|~qcdL)IM{4zWXfbn=QLl=&yDFVkziLH z$sN7IHkjX16>EODY9Tmb^4gjk?^YbQ#@jdgYv@c?s*Dd_m3_UbQSUFo!0U{kiKOG& z%+lg^7HrK~YeIqDSz%J2Q&Dlr8%eXyQ8#3tXtTMdQYKPb=`TgJ)f!JUi3{2PlN2#m z`bs4lxBDq!6@tn`u+mM6++)akAm8tINsmxq%_fV+|B|;P5_Zb9TL>OQ_0~* zp9pzFMB1yo`QdfyPo8@#Y~pqQFGOhY;f~6bu^_Y7b%=+GIeVKpNOdeMEW1(tp!85Y zaTZSjMuV^l3JQ9n{h+(>)xRnZ=ME*u9BOgu)Y+`e}KD!4(KFJtG6X>%4FPoU9_JJ3d z*uIl%q;m4)$@G=1b;fEor3I+6Y;Cngi%4>`%F2}j z`Zb`0pD;v`Q^7)?khw)f;+**@Z{|b)yPPs%Di2Uk#3k<%q@a#DoOFptPmOYO1HHW1 zw;c|=g*-#oiYH+NkQ*o&uydJyBsVztW2IUNJN^kzV~NL$lCX)hyAr6vSE?pll;kSE znY-!#{sm`vcVAD1{sm?MdsLWJva|p5e)*`lzWs6R(tBGrUT2i-C_=<+{p#wn4T>=n z7;V+puktwgaCxkwz;8tv8SI~(nV~dwY=UEp76SrNL;Z4>Z`K6=y8ao0`>nq%qBDPf zZXUd`6!c$!XJW*u7hW$o+vVVxxY<=4^zb1ID$zR1kJrWl=w_t1kA3A~u5cQ8AmZdC zd7L<2id9;)=qkQ*UbM~>N^2>g6=`z*`}fA};ktDw?-i(oU`Nf}_uSpElWn!J=U})Y zJESB(fO+CVvHMP_3$QqmCJZnWPA)D{ltP$C5{R``2VPryd+ijtG-n}%Wvjw8ZOU}u*)>cxJf-QprZ0v`|?18qc4^rN~ z-CD9T$3c2Gpa^_~I~gWWvbAZu`p@M?=ZBr}lFCD2BSo(dD|2mRd6#m`xMZ|y2OQ;0 zW%Cg*TA7*J>lAUfuQH?$*wfd~?-TlFd$#&@_w?Ka=ikq81N`J!V&Z3)I5dg=0U!GY z1jt{wz)9*}sPDrgBbOjdlASz$tvHl@*IRL3F2%5fJ@D}{N36m#Tn6I|GaH*P%plUX z64S7j|JR6xZ3a928DT>_03`rb$iH*L_~d$8Pc$yRI&FGE1C~j8=+W$}`t~i5qK#YG zCn)F(Jy8xncUQi#d+%N^%$z7d$32dru5FYNAo4qNzbtM~4{8+mYo0}^@ z=^q(6ouU8gIfzkl;?XCVEdlMWhwq{K%NKu29fn^>XY_wC{pSl~%*Mfet_nXH+9N6; zo8-hM#DniQ5xV3M3|!GgpXT4be5oE(?R6K)gc#krZQE~nyXW!|KlZSi7Id$JuO8z{rSX`1A3wSm&!Z71`IFQj%1(*}koP7L@H|J4ZU+qm zLvdH`*F%q{XM8*aleJ07$$JC^>A*(4$;w$&lm*dywRng$hi(Q~azZU>LYo#I$mbZO zDDJaNLJ=ZI<50LU!B{?WE*uJaJcZD%jXR;rA5s6GQqE5o@|q4qL%Of&3S4KeWej3h zx6fN(jyDG7uAF{rzh>t$VSJDns315sUK+~#{e@FqVn_cB=e34 zAk7D4wgB0vJ--WUn|<{=JNx0wbbjT%{IbHLqG6pbLtF#do-fev+$}%|2*5|$E%oKh z3G1#%>_N`#vyu5XFJ5?~UaEm*^7<(}pYs4qwStXY%wa@tX*e;&5()u1GcHybZ;{p| zTlx93eEzdIb^b&Yik!L6P->F+k~5kL?5#)Mq9adEj*?<#tCow)F^t7Uq(AmV?_KP5 z-Zp~Sx;5Jm6X7(uG6f!$WlP4=e^R!9yB_ zdVR5^6T^2)E7!+rtKTgV8X6jV;gVgNFIns8CLM$FKsMKQ=}5fD%Zq{!-x33A)Sl#7 zMtXW{By#23qf>{23ZN1r&pgW#Fx_|_Jzx>520my-Q;&MkjKL_WH;xOim=AP0a!vxX zPjn0n@<{$JUw^cEEDo@u>4xEj7oUjbi|{XDzS|tj8)G3NA|i(l9jR-ewWt2%Fb#yP zy_k6J866F}vpfe$HlvAON7xw?AeMfXV>r4VjL3S#dg#_j^168J2(7iqN=S_b5gPRn@cTN^wDY79ny zyyBpcP3DnRvH0rdW-=2X73q7FywHkg!hMikMiig4NeKo`PmQk=JU|Gfa-5TEbmoja zJU!1nJt+epdPm!G@wS6(XG09>D7a7?6+ud{b9Cf~LPiW+nv`K^G}mmFZPD(RFqv^Ahl_{t3dxX?`F8Plakg(hx$iHHFoUS!T4xOrqw21<__J*Z35Rx{>Mzg z@$_AiwcXy4XL}#&A?ALPtBsDM4JtO0^V1{|;3>7J14F-CD5q6aEGIlHEQWTl&Xd)e z0!c{!e#pknn*}J@FzoX13>ai$2q~RS2xVckYr^Ej-x|Zyc zw-TnV&k(Ti+?qPoyfuV%9#AS8h80Bxhxq$&fTP;%+x13xEW|?#uUObf`5tpD;*z=f zM!7_rkehb%mL+H~?xtvQ9z0`WqDe^w(2a2)`>unDZHylzB436X9DIRMfI9!}2iT2g zAcujW=zwd8nOAJe$^A1l^mAB{P~~FPQ^uGTB`)t|KMbPOp5#2Rf =d*Z~20sm0t zpA)iwL>(HDL@=nK*;uOOot8~qIjbI zr5BAtB^7UVM3;v`8WbpGpOOJnB$Dn6B8UtcSbESx?9UHLJG1ih2kPXg_0J>X;z~$a zq!^Bg6*I7v1vCiEptxH#zUV>ceK$|d(*Jubk)GYtV({hKD%3TC)Z|g6=r(S&nh=HD z*pg>!o>)s3IAk|l_$oGxhxqy3#fwKUj~sNBkQoV$jPxQW?yp|PucJ{@jt&{dhv#Nk zoKmdoF1o^!w0q0&xuwX6x%?TpJ@Ik{on`he0O1G)o;?s){R0D&U}ib)O3VoU z{_|%Wr2sAah_>{S?kg4+DgSceI5}@*BoXCw5qfbEgz_%|TCv9+bUR<_8XIeD*swv& zvMymr=7lEedJ`H)`m$B>Do;I5tgi6#92ODTO(F9Dta*Ge@~oG`OcR+^pkCJwW46H8dU>b5qcV_IDXB`7(dE|ET$Fae2ILTr0w zQdH1f%pKy|x4w2aSULU6%p(VjQ@r7dIPqTm!wH`TTb5|9{j7`~loW4*cgf43PYH zR*Xihx=Juq8jhxzi>qq`Rt)1+57DkC$VnZPKV`_`9Qy37H_yW8vaA+bt)aTh5Pn2_);P!nj;;R+K#BJ?v*) zGW&0O_OD8<)Tl0BAdPwCP|&zYdMuDP5GS*`LshhEowxQahi;$lq4vFC*n>1*1GL$5 zskg=vylFADGRvDt%8!}iO|5SfcwG8^266Nj2J2i!X97BQ;9rX?iAIm32F!M;R9vg49AAz2~gCHl&`%%685fCL)s;m@t$CH z{>E2s1Jpje^IP}t*)m*0%v{uLm3ROE8v&cKl^A=meA*6jcn+i@hBYZYeQ0y8qn+K> z*`6P#kfcmA<=>yT8_JAx6WtyllV4d;5spDsxbpnRwH34y>$M>|q@jiN{P@A+c;U*! z(|bWPPn;17k4-f4xYE2!=-|N_laCnB8hzcN9}6~vI|Vfr;AsYu7fA; zAZau%HyR*Zyc=dT|A2r#sfVXsifi0{P1>I7*j=9cHc}O^aSx?`U_kM7ayaN2V}q>P z7^iNSKTA@=_1mN0H}4RMJgrF1r#7dZ@d3wPWKw>J?k;e^e{aKKQWVBit=wrMIb8b0 z2}Te1n7YWIhVS3LrNVJgpi}T{qf9=>&YksGm_??HLegv@CpkYW%CfMTQR7;NT$N<+H$af6sGhCL?vv zsE$cWX6}o&)7>8lMnI4e?e0^W|F*AhA=+Cc`y7|D;6tnZkJ_KmSK~_|RFI?47K(4u z?*m-B!gr;*KeC}U=lUX3Wp|#mOt8(BN@j7(I=MR5D4O_o4Rv)(lumb_PtuCXy7lPv zsbg*r?WbB*)eE8l!ND(J!9gR4>%X@VF})$6JXs=;3zCT|vvcEJCAChL+ja3y7Jvw` zYfY&ZkG$y@*;&-a)RVc6**yS1PxDw=!t`;eljMi5-@hm0D3c+8w=4yOM?@?#scHjZ zYr-y%2{9Tc%c=}CJMTly!Ban64&C%@i@V*>+rgvES*~N`8-)lZC|6jARJ_6Ix$)q+ z?d(C`aLH*PS6((Zmq%aU;%Tu-BB(p;u9~na;&);HI4|5bIy-X3btb=WG_tD>T zPirjaTJv*Ib-SPIe5CZQhXbab!FrHuh7w6*uj8k7E$2U!1KI^nO`HZb?MW_p9%Y$h zXjtTW6fb?y2G0`&i^i)HR3M4Hv3vG1-5&#bUZ#3jCZ7AUjWZw8Q+fZnl&e_mkb zfm}l3?S~O_UZ*pfbuBGZ$IPu)mS(Lcd&|*86)Kj`5X&*tKt4%89)+T`7j2+unL7mu z!l@gp$uUqYB3evJ=*Qg|H7CyK>Z+vah5Pta9?8%FU7p#Y_3Iip$`(HQN;)|B@P3MQa=+5p>7ij-Js zTiesovX>MbMEJ4!AYcHRG;ux9<-YQ#3OA^Ny&}kAQ_$V#U<9Yj!=F4+{q*Tm)7vX* z>*dG|O4d!2T1YHlcDDn@PQj~bo?zrteeC>0#ocwb$B}R^J=f3pjVVtt`$2VtG7p8E z@`d`Di0QmK#DTNc9CYN{kVTi~+HA=8gkkzHo#Pn5&%>aG1H%MZTGMM|skfP#3Ekb@ z`*UW~%l^HHD2?m1ybPPE+kNzEU+YR=+P%SMyr3C_n!hdsPrCpl| zwc)u?#8MRZHoDvQG$*+H7r2h;9{19()};lApq3*h1a|v{8NT8Ns!&o=BJq{$MEAO` zJ)&hb+5->wsXy~vfAdzK6w9QV7OlEEMeQrS>@2cf9yDr-Dad@?wVb8k*A=kKl^WR; zc_{|$FoW5aG(KQBY=J3Y-N$e#pKoSNdmm0b5a@dLf3@}9;as-i-(M6enH9-M5>d7a z5kh5DB9$T|87V>$a?4g($w(SVg^ZN4vnoOeO*12-Y!Tu8Ts^!YeE8VxngTWFZ4)*cJZg)9OJ21Xcn^PIZq(M2$X z`0J}L59o|iuAqtW=PU0p8cYQY*CfJ^UI6N|LAe~>N7>oz5G2D*C_IWqCJcKkPUkM= zEq#hVeZ_@YqxFV)Lm9E)8#2NS(pz>Qu`ScA_-YKF>E0~rJp!5towZR^v>S*(MVyS) z3~R`xrQk0s6K^!hqUigO@p!Wp#Gi0TsGp2%Vb=6lIPW&jP{zTyQ}Ge&4Cprn$fS8-*c{CbAadB0|r=$W3u`msD7` z`-n`JT)qOiZsuj}UE9vmpML*jvST$pdnppI{cLIZBOppopjg42IkQ?&8$ds>vt+Z{?> zC1LzT+FY~z<$qx`yCq|Vu`h6mQu}+9H$J?9dhGR1XQ}k96{Fn{%);PysdpYX4GM+| z++`M?Xla#8<4XvwgkJBV-Ys9W;>K$FJ)f1_7|a7ISO9H#=AlbQm>uw@X8WlXul88K zevKN-2Xx%Ts?Cc)xuw1Tx|7WVO(9&HyYP^EP}o={Pd^dgNTG-J!zss!O(Oe0#m2%+ zHye?!;K#}Tfexp`Vq7P~xEuLfW$?u_Kpp1rC1)u?hl5`Chjd02#_bv2l0Wel8aon9_G7bn7g;n=mBHk>7Y-D9SUXQs7cA1&meuFGNN!>2Z}f|L_$8t6 zZsl8eIa5TK(;!Rf&vhA9cnyhm7}zFa#>PH%YU!)A&1Ir)@AnK}o~4C_wdS&IbI!`C z)$9Cw>HgIekI>N2Gp^235;F=&W`h8a7`{;>e^=fTDC}SoxH~4>pMH6>mTAdu4(er2 zsP>1nP%V3B=Wxh^^3Hd|uN%_Pa1c-G1V3km%r@sA5i1?;`*Thi+#5MqnXx7R$oX%c z`g+6GOJ9LPxr%xpTJ9K;-@F>5)u!UnIZ-b51v&Sq)lr2=`ZE%b7NF-0+7m8NR$tF{ z&wF;W>f&D)7fah){B>42Dv5?sx$&TFWGI$qBkn+7Sp=+ zvdDD2w6m0?xymcnk@Gm|%4b4|8BI9wzqaSGz-Qiv4K!>UVU z!bltFRNff4$9GZxhu8Z%EiT+80xCX_#X1$VO5ah*N}|&_z9)$WFf8nJ$Q=%EXBe2@ z59Y79m#ffFqH^$c=r-QahhO>S$8c~!!5_2wuccY(L+c%rJmF859+zHfXwK$YQjt2A z_N{6j`s0EdMNp<|u!UVTk^8=Jro&Z6bb8eTbAdh~hP5cP@6T>@%QYb@H}Swd)lusA zZ{j=E{aL#Lw83usn|Iqbb1%Hi*M7{}BuhLBkV(Qw8vuo%IXZN&pi1VOozKGJq6!&P z&OhssWU&;?AAx%U(4W)Ai*G=xH=uaIm0O}W`f9+kJ<(*FbF6x6^6Np33$g3>VV_U)m~V&|e){{?n?1^O-kb;m4i#5G5c z1x@s{KVq}df2QcBvRRxyDfvpzDYX>(oE)7qMV~UL6-M_3@{iu|gii@n*%-(wi!$G( zB-B7t@lNHa?&z^SwN0|;oc!>_YM{F0h@Bd`vbezQ{x?rP#KtDJy*=aGv+y%bjCo%9 zt#wz0@&zIHL{0w~jT|nR9}2BeeE37Yf~>)c`P;Rv#1j3N>CLceov^g@F?>Y%s2X4joTC z9T3-Av)ua@6&FVeBsA1Nun)%nw*j*lE)~2yImqx-g!BHHBlmrizu^*>T@QZt>=~3B zJS2e(ieL?Q_Z{dR|FB+5)jsNM<8L?PRN6S!J6+vddS89@xg5C4&_o|*`B_I~2r#&> zmy?@FnMPXP@~tM%Nw@Db-b3vZi`Hq(cxB^%0IOFrc6Wxc%O|?@ACh{`iqdC{2kzCU zS9&SaP+cN}Oq;E&n4CsfTpXO7zSU8pyIf8lE6ak$g7}$<+1hdGqZpotMfl>O*RSX4 z4?nqE=(KccaZz9)R#U-+fZMtU4(tJvHOc-teRH&qCZ(lCM+3ae!N!tfdT;cI^mb|0;@T={X7KhXWlH-_%FyFBTmPOuiuZ#!6~uYzOk zz}=l!idEb$Od&{%s5;vQB{vXp=u^wr%SoAtQj!Qk(|f9T{IOd{Mn+P$QFDq36)$IE zMx-mxzEAjI7bk0SHs#?d@T_W@38AepR3cp?kNeyw`&{dm>w2kM_FMQY`qw>ccZk7#qBRV z6&za5+(T8=9T7K`OCHBgMQut^uomw}=|IcdN9a26J{1yKD^qcFAAiY;5Yh(E4web= zE%`2cS1P3UX0lZT&O07^@QO-_M-)vEVif1JY9X71hb8euKryk|z{u4P`{Je5Wo#0! zcmBw{5*XS#r7kJ8b)Ph)`oSbuLr0Hd`Ojv+Ov9P04v^R9!vGuDwl>4F4h{f(uA<&X zeLgfFR}45+NJ3&L)KdnBEJ?9KB!Sn$pC2BfS1io5M{i-F3#?&ua*e#EUvskqpr{j+ z5UsB$LD0P9XhwSu`JpeIIDR=fqnmvKxI~l0saGrUkU}4#j)wxu)#bp+QG^PjK@9@f z0lxU|B^UQQ06aROOrUz_tv%%tdMNER$Z;n!lPR}0;X zij57zO(N=PoW4YDXAdtw=ayotdXuO?QNua1E*=Xpr{R&p-bzP^CFEGl$!*=bl6)WM-j-*3V*Xc5upC~7VDK|-Be&2^Av!zo zbX|Pn;dEmJ#B!1EuyzWZO*b6OL7E%r=BY@zTY8Ke=s6OtshSw#=0bQ8u{l@Qd>oQv z3DS&geXX>$n?bOd?&a;%{*sX7;4=}+ACbPGgHum#ECro~9#azmMKWMT5zNL1smbBH zT(NsM*8kMlIud4-VSEj&r!clB;!cDj35k?MvIBQN2mw;S-ch115PLs~V8qpF>u*Kn zf|x5Gq|87qy@ZJ=NW7ReFjah6<8Ax&@iXZ2k0M3+!)M7u26$e|KxPvOlz%_~QQAUO z3kw{B(uKhXb4b-v?MRHdW|Ep~~1ssz_zSw85MP<8`xhtKL)ZwNYcr z*f)z2akaegMB0W8nwpyCs3(bGlK9!59Fv1EKxqAXp9u|XtU$aTTFvspkrIiom!=1` zGmNdsPa{W^s0whZb`?*zlRn~~5Zk?I!v^A{M}}M}`d2jV(%8a_up!uXl&Ex2T`~&S z#kR?$8i`+Q7N|E8Uk%Gu7;sc8B$Pv_Kr(qHdq{T?j<7hMV zpZ2^@JbX03YR4Nlp4HB%@<=fVj;$1lr@?RqBm%+k3Oali89I7*lvcxWsR?VNH1fc_ zkbaT$cmA}0sE8YDNXFmNMk;wvBjA^tH*c;N7hjHRByijCM=<2tq_x4Nvx0<^&}LAV zRk|J3Q`i6;=3=Qj^x5^1MkhF~YDTr$E0jLu_&Gga{na$cVdX&XGHy+35s2hkm6vCWRlo1y8;(z1PqW}#N zTe7r*f)=i{)ATS3cFQz6oH`I6(bTXj;WH%gJhVnX9)zX%9cHOA(A8h$Sq`B2c7hul z-cSpCE{8qa46V212L;eUB=^CU=L6{2N&b$sZNqv*4K=U7tZEweawpU$NXYet zh;)G|^xNmpooGEPA(@7XuF7r4^(vghE~;Cb9@Q^~IW@^8hPUpT?H5J^2rPuZS7}pb z4r<3yWJf1~i~$8GF+BlG^(a6XD>#)n`AX_gAJzeRiSlKshz5eOznkBMGXdga1gc#< zA1$$E%SKxBgMD0K8`2Yy3lRl`1q%ye$0QgQ8j*xE^>N|BzNo6UwmUfXrao>mq@FRC zIPn}*Qg~)j!HE6?z<*jOddUhH0?@Q59K8oN;Wc!F28|~bLo|JHtP*D|pO*1?{KZm^ z0qf@rvY=yan;&xk*$KFRfBpVh^R(?{2Wn_3)uz|et^Ef9ZjhD+6&MTZ*9-_0_e-e) zxU#~Xp$I;uIOLbwzJ#h3IH)ut`KCoyV+uc>48oIisWz9PU+V-Hp$3^5j=uYiwv#XD z0Xd2iPX!y@DykRxr>=TwB|%%vo7%O@uuL_3qTuU%+k{I;JdPqzdNjp@?qGD8F{D%;j0v zK{(iKex)0U55i%s4=@L?GY1D5$N(Ln_|6~uHS25hfAsVm1es`?IB&**qnYO{Ml~` zTi%HR*JUV-BhL-OWMU}Q0#$?vs1+ILCfd$I%?mGvl+%y*{9a%wXg*J#Y=H)cJZZqF zWI^BHJ}+w(zD(G91LCZ74o*w{y&by|7Kx#P*e)82J>4AI3Z_D* zmxbu0XaMs>0_O~C0EZCT4;izA^Gu88iZQ!abn&wt%`#KjKNKr}_UkH|6CCKpFHN^; zP$j=|Qm^5Jq|t~I33i1k8NqiTp#i z9Glkt`tpnk2NlpoM3Zc-hI(i=8ijZ}V=ZoMPz$K7iunghLGCJ%ihhD_qo>gW)} z4(Ap0pYxKKI!Q-@O`%VjPrDMuDp*xa19_|8%?EI0_Bjg;2{t4j8kIeXXA>RzaBEx{ zs4Ky~u-`n>iO`D<*^qzjd~7?V#!|_19eetA><49#e;`Gd70JYHj2qu%69A5dSDqQ) zjXV$?UD7Q+VmAt*Z38C@dZE@$talq7*C=Z{UclMzbG8r&4>}s+ok%P}Jx2?lray{s z3v4$eFyiT%Pm*oU`vX*H4;+1hKluJ{@ia_!m*f)k*QNrDC&ZD)YFq_pu7JdMQ9 z2k+rE;=m@Fsz-16WfiW8c}GDHM|bn%*y%hgkOf5Ug0LFW8=_{Z`Lb`ff+?Jn;8#LH zN+l?|Fk%nsCCxoLJ{w`Tipaeiar7~>vF(A*g)+&t55N(7!jb80vn#RnLz*@EUU`HF z_t8*m0e=RHo*IbD{H3aG6veeaIvbL&aC_TXU$jV&w{0q@JEd_R^#Mo$CIS!Rg>fZF zepp+z0ZX!D(%xYZ0iYdPum9VZ#<8S{TX1TK`ozJ`?t{&}6j^1^r`X&}dN%+;5Hkyl zCbn-UB+veZ_5v|r8d$-9SVL3$@JUh*LH@lZZ*BgkQjg))y`7!^G9(Z>Oq4i;Mu)-M z{N2sXDCv5Xcb07iZby44xCqcl50Xc>Gl>>Wv1xi-M3=Sz*lZ-W>5?_OmI{3=_GLkDw zVTZ%ftpz;-{;ZF^mh_d$P(OH6{_$hSLQcP*7ZR|fTeF^_#pUgzloUz?X(S=IQSb~T z!*hQ0g~M8;1jAn=>a{pO4WH!?Wxe+h2z4O`GEUBh^@OP@nQ;I?p%tXy1hD{X9ggzu zS#Kzdl%~NrHw|LVE2ssC9eDO^oZpRC2EiIkuBH5~>J0J?#|t$c9!hnZzb18XbZ&%) zO1KmH7Re|VFKWXA(g^xxW_I@Kf~F8KLLBM|NP8izA3`lqu)vPsLe2|NoZ0|@W>}*v zXu%J}H?Gf&^ptd_QyUGan0)w{S=*)W{?tSL(w|@4^BmS%e*fkOY)9 zX-na5yB@}m{Ar|n0N(CZo{inGp%n=bIE@HMf5 ziE0Z7g6EOi?ivGV`IXq;SJ{2+Wn1^f1otU9_^=4wx!QG=|(gHg)r41h?F>=z>P&JV{hy>)hkM}IY=|y5h!KnSZ_3kEqG!U>RqV8el zT>(`^+dV+W1@mm2A)3=}(K;#V>rH-7qq7E5wn0K-1@RKq)@lOPB&n%~QfW7htDYRm z5+ENL0cm>oRQXXMZQ+s=vI9u*lmG-}C-UmC6l0Fa=H3l>br61Kg?=1QXwV7CAd`$YSOM7=f0?v(ql>#BJdl%> zQ<(rKE$Eu{_&~VvK)=>+fXtxn<{kV-fR;wLZ+ z&T@qI1OU`HURQ1;k@V+mpTqm&1A@idZ(U*etP$r@?I9FNyLhp*y|}b0GV5|baGNpk zTf~@+VFMr9GL6&m{9ZvPuR7rlR$uodJ*sC$`uxrDW1~%}(&s zOQFvokQ8$Ix^8J$)r($CHChhc8ZLEPgOIFVs8D>*IV&p$JXf$*l}X-9uLh4xb^iLb z5@1ZO$^6&1_PHkHWW$YRyc%X#aQDFa#IV?|i{P4k4pKw+B_-$L=d}FIm5i|j3!u(gO?f|OhZ3smW*@KZeS?qc&5rhh{ zI-xTdkh2W5Fw>B)ZbPF2!hH4spw_v?1ZAe@jwj+dnW2~Bt6!nqBa9JW8Hn=hgf zCzx|8q$CN3P6hO>S3cZ$=0TC)^Gpq0cjaX}{nuOuNz=6EH<>uoO7X3bQwCk+6+nx1 zWi_W{iI4vA@t2t;!Ztj8BAU^yL(QqeGj_Mul-4awYN%-oTN)k zEM0W$%?c)5DX1S@lsD*Eh<^fWnL%tTTW5cNa5$wpySF1!j%8x`mi(b(TP&Zh(zRgW z*gCO-nZ_!}xMqu3+hr3v`&?cUDj<5#%a@HzL#m=KFyYI87COn!{-ebmRXo~HG-F#sn8CL;oJ_W`Pzx9EII};ZNw(09 z;f^wcgt1YoOW0@TwRtW`9M;W*QnD1L)wkgczkjdx_JEEQW!=LGu!0UeQh)l<<&|^F zJAGSuikGB|VS9N?qK<%Jmi?nyWLPS;8*Fm>$B1N|FVbW8b>iUT-X?b*WE*gebbQWiUMuP_K~{*TO7(aU9a3bth7&mX&@U@ z=1HdCKR27MHCzUSk7pa@zys-qQ(x^`m`&Bm&9B(r7}`o_VP>SSxq)_Hh}XGET~BYo zRz;30&z-}4->=DWlG}|d$_n2C=5al(d@tQ{Q!J_wwdlh9^vRM6ckKYSTD;tI-cglrRk4ygCwQ7WBLB$pNtzLEz)ERHcb<_3&5vW(Aag&jph8q z5ksC2hu#WaaqHN+_3vul} zw$QaH{|G^>xlejsAy^>7kO<^+A$N3bVCq`UTdn+jd?*|w;x4(-TZmq?WM>@XUk-{1 z{O>o%Zv5n12v4(7uOm5flz&ZyG~=dsif)}0_u+|&36k=G!uLe6$LbYpwx}g*2lA`y zu21!_l-jZ+&E3INPmcxJKXlj@UM(&F)4z~rqQ_DZ9D`mcfHRaIQ&skB^So`gSB*!{`!*%S4nhuw1;S8LV3(+p=;!KutfTL4{V?g^S38e8si z_$Tn)x2X8=4l~YFZx=28VH%xga(2;8KlI|vk*>bQKR*NsY(@BTRN};wq{3J)eLXJ3 zGFUXKIve98i|iPyv9gXnV%%zKqNKF=YrA=w?_!$(C16(2-TZEO@YOUU%U#IrhH;h| zLt^6Mc(A|&F9nL-P^O_Iw}hQBtL@l8SC_$9U_QF!n@gUUEVY&QF+Ls-?1ssEVJ#2? z*R%80Q9eXY{I~*^8g?Ha8%NMAp9{I&2YsbzKFptA3GztaFXujx40s8{9gykYi9V5e z`SLW&9u}&?$OfA|O&St8~q%zzC?xZ#};$$XRvV2xg7n`kFD^6bn%b6kOv3yt4CZ9vtx!zx6WKyFvefA~9(~5>KHB+uMXL3? z_Uk)UuC%0IsozBnR1hu(0)kRBq3DXpcFWZG?~7V;HPsMo9Zsp4PCvH$E%seKJ(F)3 z?gE(~>nvk9Wn+^JRT>;{?(6WiV`i6^b*ZOq6Vu&!j8n z=jrb0t(PcN?HoC2Vt?OlBsOf=#o3wUt%CtzBKcpSKkHdr8vJm@X=*5e+{64WSoRp@ zTHk$ycZWR>-Fq@RVIpq}W=6{E9_$Mcw-MtV;PnmQ#^79+dfv!XdYD!(6?n7yl#{C~ zg-4T#0B3Mu+$V%32s{*1bhL9$yV-rSX)AQ1e#dNGJT4?08~eUcS&1HSv78NkR_Zh- zb|s>l1i$kcJH_nuNY*{aLo1`BqWnnQC*D%q$Y>Rf(2URqyao(O8XXsZmWr(gqHgzH zn~_0j<4PfYml%AI@QE;Q7Fc8O6p}-eOwdDnWAO5v=?-OOf@A+rW)ITuz>+~JjEhd# z6-3EDc=(VQbxE`)`VaDjYisqelio*d(?4@@&yzU^{;<7aof+#c;Ct#kvg#TCuxtLW z4wpv?F8#q`pTJxr#j4N%W)y^KzO@fFx5T^rn>G(JcL)P{}72o{mfFS0y8UY$VB(};Gh?t7@d+9)t znH~WS4t3CsQ16ooFs=xFE!pjnF*!-O&LUZY%zy*>3qQa22mO~jJtl*pov(UmA-)%u z5;##qc@^N;x?Y7uL&SNB!K^uQnV2R2^A$uOz5$T?L7;w^L(^X@71(vdJjxSQ29W4b zoQyCh=#K82zZ5RG>mk%2{&=a!cr;h>@#!J|>OC?ZtcJ~k|5r?)^KatGP6MJA#J6<5 zaDhzp0;tJ2S4pPT;(J2GxQBSN5)u$`a1fISe_WQkuF5Q%4~{$vFa_4+f6kFdIzX&- z02PqwK9CDM04Y+koAWsm!)zYz9sHO<>B1@j5r<&ACF;NpPywUbt=C)H`{T#N8m@|> zkj@)(5Sj0jjE#Z|3X+xy{xCCQRY2M)df!GLI)RIF z${)(ok_wq%Y$4;ne{X^m+hb-_sH2NV^GjbMRxKyZ9s;`DZ##`%+)Bd@Ug8711QrJ4 z+XGtKC)^JVqedgPAtgwlI}&-C(fB?F|4u~y;#;=#DD%DFsgi~ev6j#e>4iDeC#=_B z_G1J79D-(1bHpEk0Z@>7R@(3s{lT6OJ83*TmX}L*H>QKPYrr*2g?O;Ob`Kb~QU3Tc z80L1mm?mPKJ$LHTD8NJ00b+;Gv04R@A81fX5QA61dS;~7lTqJzjUMHj;+MQdLLg{e z3ii|h=R`;1vNzZ|apB2GLxhj0XEIx+v9;frL({DRYR%!;dX7+tA9)q*H@-8Cz!pb* z2kL%|m>jIA@+zhSk*HV=^C)Er+b>+SaT=6R6t4a}GDp2!rFOvjKoSkHaq)K~-qc0e z2n)~$3cR2z=*0TF`p6N_xn>c^Y;{|kX~WcM$Stbv+M;2+X+oWJA;rBz6HX>9f@-Lw zPs!eDpKGztg-U&sJ%AwWR)+AP*aGiLII-$*dP{-b*8CIu0j7~B{z&GDM$eOab>^cz zW?;LZ7f%G#-e3F+);xMOM2R*~Z4Y7N00FQO=YTyJjMs199!dN4|0l0kD?v}pPE$Jn z-M{^7bI=coO$B**5y*sreZcAC$xBhn1_MiV0Zyh3HZGAGI)Y^cHq#C(O5XFg1M%X} zo&!4)pPJf)sqwghy7d%`yM3C3lvH?=_pQO#oz?RYJ7bgFaeV7&S|zOXI)DLs1_q9$ zh8H0UCJ{Z_kZ|P@^z~f&w`2pFRNtjP`IwLGnxraVg9z~V=Xvn)lGY7;l6q~yAMbaz zjR4=UgM*>Il|ue1y&6EJ!^7!2-$@=i0BJdOD}D;rOU8kF9NYawr@^fm;%0${y%0iT zD3{znbdMLaX2owhiCaMznlL@41VvV2XQw%Yrb4u4xOHJcMk6T!e@xZWvfB^0ku897k`kjP?D%8dxW_M%Lr<1Pw>Le7tp3 zTLeFzj1R@KR&Z=T;YJ;Ez6ZjBf0uUx(F?(Z0KZw9Cs!QCCF42jzuN@O5Jhacq6h94 z<#hZAz~O=XOQf$1{mL~CIH+nTQ+{f+=bW|m>w&Q5?ek0-0I*v!jvpcKkLieX_kmlu zeB1*a>c0arG?auAAzDl*?$qJFKO<8x-jktinnp@&zq-2Ao%8>FC`!oyn0q{pOr_JJ zG&TE;4OA8{z^HrV_nbVuTAKNyA`0)BE+)YA zNuYu4sodFswm9qKz>Y+?`L?o>MBHkk*g{Qd)MXc(!*OBXz7TKv%K_-&(F9C_Fo8<`~5I z8==_Qy&;k&Hm~^Q>sR04Z2J~*5=9EFxtOG6AYo#mqJ&(Qq=JCdCv6`F1(o2bhNxXV z)rf?Z6>O*5K)5UmBzHoG=)6d+yhM!Cs4D_do)Mx3&1v~v6;F~ZT-D3CI8w`eD^c{% z5fo0?&pb}PxbXYuF>D_>!y3on6Mc*{DKZohfaN0`;79ELSe`hMaBlMhsHRXRU^e0t zGb<@M^pV&vyB_=+_c)f{j?={u>-Wdy#W`x^6)2f4ZS_thpA7a#9DAa$L=vO0DT0kj z4%f$zALSvQ!}%&hHWO%7AA?^dSv*j>uECJ9gWeO;I z(t&pV5WFX5mxMk>XqEhr+xLlsw4o68w)5wy^GkdH9G~gWT&kaPC=x`|<*AkKUN zd%1TRD#9{_5C~xsBMl%3{nCc@>(#PrF0SS1?B+*yeFPMWwm1PvQ3Hl@O`B26M9YSu zmh?R+r6DnS3oRtU0B|nhSBqo~ZU&rkhOL73I_T_vI7*hZ-9fQZW7 z#G+#&BGmBvc-@M9ccuq>{&X{EWMq)cIb>MXy4mU}R2(N&>LdWes7H@}$bEZrCLS7+ z2&f@!rW#ic{ORsSJ&$r;BjOVW%hvzvet+7{ywcEg}c`9drRb-QBz4fmIls$wNZc@@30}0ARJ9_y1WJCbP;cN|~b(QbQ8a z28r`)H!;Qw5CD@#@4&A4n3?^PPiTWurJ9& z$ZbEbbaGE}V*_MGxITN`yjsD{lRUbS2JtQ8;#aYq=|iJbaHtU57NcTVgoW82X}q3m z8WyxKG+D;KVw%yZH6W#$(uPr5b$+smSHY6HlcTSUGkZ;IqEObM7pdte%?o%(9SpgJ z<$ZOAH8oM8rr=vgLLQ1W96sVSiM;DbNHR1Lg-i0NM)||cRZNG~f-xC)?Bew}o}!+S z+eDa$U7Jj{AQ_5)hshv92vj!%kD*8)w8;yHkffvyXn=7TM2uuFf!%)vG)UN2-rNw; zMQxHIVOQwzMHaDRYj={AdIxBwv8sGAZXT+!F`x|u-$7?ca>2jds$u5p7?I3!ftra- zVFf5fx?C;8ldyL*VIh&B;An#S=SHYkSx+a1dI5KAg)nP1DB?!Q6)(IzS$HuN^*sI? zmhq;Hod&5!39rZhE*C`X5r6~E*7N=>6H31Ohc#1NTHx(l&&0$;?kT#kaEPX7Scg*Z zW27u5T0aDROp@8($Z_tlSk?|?K^>b4X^E%;q)O=uPW)xu3_1ZlWR{ngHN2Qnhf+5Z z5uWh`G8P6h*aBr%m5ENvBKMNSfe@!PglO1X9^#+nmYQBugNJ9)XaM64{GXt$vqqbl zP)%S>!4L?ID5^|@{JU=WqIeg$v}bCS#=m3!#PYZIjngkAyZ*nb9rf7ibJr~VHRY>| z_0Xq4-9=I@wD6>W+Oxx;3(|M;Iut)Q$#@80o@8_=oc(%}76B}6D~#O`R@z4JR7xU* z57;G1bU;>Hj83jWbfp(O5yi#o(%0F?4vilAlh>aPFGuOf9 zWoV6o_J^i<{XGi(3(>7Z`h!YCc;`+&wD^dCp$lXa48sje2Gtayv()XfNd!qVCHdqC z+OEX1gB`3!{N*i#V!=bPe#Z_TY}D#NrbwPU4r8hU#e>>FelnH{+4W%eq{EFMgHTZT z`V|&>Mq8Ql2yt9+rZDP&m%Mla21y_yOB=_QJnu?i8JzJP)W9 zp0I=PfTLtXvC)lFcdxGQZaf0R`$oD3ebW!J88L)YI`w=cXA7!wBC7!kIXa)C6%b66 z3|y%Hb(3xVp{&!Hy%df(kBhltGb3oC=9u1UD#`@hV6027%B-dh` zyKlH&LR=gn@bo149sCXg&Kcd&%tMladES2Dk{(k=;&{NyK7Z!H%Ry<4Rksu&6J6iG z`vxToexJ;4it}=lU9cJxBx82wb2KF|S_DTv;lKdBuS4MF`#C*b-3suaBwCb#MhJQs z$WgC3w{Nc)&G@=$S{4uc(GoAkc5r;lce$}Mb}@&Q3jE%yzWh36QLFCX(t&9$GVp3b zhHj1)rvGcM1k9;=xkoZ~MaAep!@MS=nS2qkGP?vu+Nlj(o==a|H@2|g{0aioh~tQi z+T>KO;MtqIe;7ppy^oH_G_&S?_5i>EOpA@jPbjFv)b?=yovTPYCFyBksvVSoA3vt- zYxXY56t7lcv5*dw(K$%J^?C4aKFLn{g8SD>DxO><@>+Z_m<>A7{Sd1P`Cy?H+QV5+ z%s~fzIon=^n)^o|O?~+}EqxcJ`7o;-R;G!Vo$cK<2&beESMVae|IQ4D(Hb`?@?{Lrigt^XL zU5!7f5nYjJZQc>jj4-9&PqT7UzNly!e9p06fjb0Bi%iB%%?b5k*WhR6A0|nztkafP z^WU{FnQgZ^Exc}|(4onIhlP=JN`P^&bAez^Ex|7&UErHFs6wh{%|nCmA)dDlNw56} zN1C=HG1Uo4l~_?)V%2wvYHc`r2zkt(EY*b>`S8lGK|j*<@mF+bv1Og-?%lmdB4S*>&n9vttVnPvyj7SIl3iB zlWQ;P7w89MA7CxsVtD-cx1UaKuUj~O`dEur30_RUxaz}MasT-nP88nEa^1JlT}@xr4A+-6%&CF-osrC`0sAjWH0o4mDQ_>s&NCsnLpwp|% zwn4>pttsSX>Pk4%Z#6T!c^;1;Uu{WM?R?}MmEiIGeMV1K-cS+aC zf;V}}m-^}R=e@gkGiWD|g_(b{mB#>$+H-!U*!}b8-_oq}7cZV3 z@2Ng}G};5G0nhexd;kUNH8@J^cWQ-4EY-3$dH5K*eOTG6Sj7}NOJ1xyaNKmL0 zFi#1ScPhiZCxz3Sh5Vg%vT_gOIELYML54T9@9r6?qEU@!Z|8iaW)kXK#C{2Y+kAi_ zAW_woFpBI&ZoE_aH&@t85|l20xM2CBt;XEwfENLp2d~fM-Q7xho4ByW)KLOizRSHv z4bpdBLs^L-d+JtJ!keN(ptA}?aBcvqdL!~(GWw+J^A`^yI$XtL@_uIK`|+=a2UN&V zka(4kw%T%WWe!%i?WYvv;-E>LO(=B0a(~ z6eCw!&ZK5#J-UCtbo`nKJ~`CKtF$ZtiTVcy%A&X<375v{z2VefVKg1Ib4v@pIJ^P% zz-{J!1(i6da1hGbGPFj+mG75QD}ss?QK%uO5s($!kGgm&Q0U^tUZnpzm!6hZMO9UG z0we$1Ex6d(vAD1uG#jed$hf$@SO>VM$N>s~*b6Ug|7V2;vrzNLVe$~6#VJQUj)JxV z`wYezk}Munn3JXYWnXWToJafscD+Bat`5LHh~5M1?k32T4sS7F^}Pgxda3j8ul>#Qpcz}bOOZEmsn5`kB>m8Gjs`OP2<_i)xYd)UCGD* zj{P)5&v@QYH&-HlM+?BJ!?Bpd&7VHGjCQI+F;a?rG&C0GkcV-OYcRKb+0wdNiwypH zaw7l6?5qa`(5f6gCQ8oqt?%p%Pd~c_2=V*(?>pRviOT8}r1s-Cud{0?Z2NZy(QPVT z8sA7bE=kEu{kzU~jZ z-;bY~a(V?XI8f_7S)W|MMksbte`K0lTF8e$cZ1iq0D%8Oav!sW%@f{X0Q(yODU=JE0phr}UxuXzN%4Fo-{zn9n-TLjtQRYUWV(DHLQ#~AW{Olb~i)!(iXtH;H`qN0o(usdpuUyKby<=Sj?qg zxu2ju--u}?z;tqh9G^la6bQ&-0>eSaYvt;lhFcYn$4-8Z*-9OMb8>O^2=SFez?V_O zq4C@$t-dwuM1DEG#T#6{Zx>hpq@hK%!2$F_GFc1B9;s<*mS|I6NNO=#Fd4B;Lwx^z zXs9O3drBfV$T;Bk?b{i;wj6+sKr=%dQkbF3q4AlnrD7hNdeP(u-4B=TmqO6ngB%^I z3%(}uLXGcJJ;t?;9b1bQzi4i_(4KWgtH*O^bgD#4(=w?H`nxdy15_ks(fOp_ieN_U16KQ$vxP|Q_{pFrxk91qj1 zRtWK3r;?JgN@jKFZo-SK5OjEzmH~{vY1msgV|*7jyLW)CSF8P*%A(qSKBe{Evbk%3 zpJ8G2^OiFP}Y-% z7oV>i!RD9K-xOa~9YT{k;@$==c&W3}2c(Z<1cxxSb7un5upzyz!{#_%b;cfK9UbG zkAseQL5b!b<=hjq;#pjMV-pkcE=hlPc?IrK$M&LB*WuP( zVPERk9}i!{Dx?{b=1dir?&;%_`O=%44V~O5H&GcGgE|Yv zi$8hrAc~_%a4Q=A^{Wm=NF1mw;kU$#LpFMl-ZVm4Ks6bQjdl!p+70Y^2dG&?l*~$Z z!r%-%THv6-yAre9grKef?t<#&BX`%VBQR9rbwTmk0MSf!q&aY}*%>%e%jkfaPf3qKjmJi3=xufslp@b7W zo4T+W`}lp2yuyNl93XqDe@AztJUYB~B0Z~w-W-Z0bQdOO^>uY32$=$rh9xPy>*s}& zm!5smL&pnvjKt%;klcf<=t)kmqoE-e89dz55)E%EBx1)E+R=y1w2gT0LFH^u*KUFPp>W2FA8uELYc2#L*W2)Q$-xl+Z!2XmRz5AYYu7zc0G#XA)c`ntGPZ_a z1F|ffAU5DjaOS=H*vl;R)BiAA)&9MEAt_x7SbZpL%6ZHkK6qkE#o+@^EVZS6a)po* zYx+YioNU06doy0Pm$$VQQ*Faz?E>g7u-f+H@k6=dv!swc>DZIIJS3_yGcycZoS})y zJ4f${b!T3sv=JjEP=ORUvY>lIl(Eh6*p2?#a>k15=guJo!5kFmc9XTIk2@TdNNbVB&IAq@t=G#7 z$26Fk8v43eU%p@^N?HJOgkwc6lq_&hR<7~I*-x*qLX-#yj3FkZdl_jz8_o5^pPfHS zzkGfhDqL8Ch_bL=WtxvXrPSgA$!jY%bFBGbWFvcPhjS0bjJ$>o5Y(Y&bAgK*y6`&W z2s)VFWiKx~nj4au|Nh>tdXSl9FF{`f#T^-HgnhDUa4U;LFGd^wMB*w=Uo@7Ov0#N5 zRFcmWs%5Fd2xXPxSQlk)slmhbnPYt(BurUvFW(XY7|Gh6)wV zf?7Om1k8kN085ojELKzV$-oOxrNA1E&uoRP({qhOQ>1A@1q%pT50?PPc?=nLiQ1*yVVetJ_`#u}Mthsw z7`c5Hl=u-FgEpocGFlR^;_bb2s@V|yb^{EVciq1)%U>(*Z~KW)vNeERI xM Date: Tue, 10 Oct 2017 13:51:40 -0700 Subject: [PATCH 322/342] gan design with graph --- doc/design/gan_api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 2fb30432cb..f9bf5939f4 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -29,13 +29,13 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to


-The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss mared in red and green are the two targets we would like to run. +Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.


-Photo borrowed from the original DC-GAN paper. +Figure 2. Photo borrowed from the original DC-GAN paper.

## The Conditional-GAN might be a class. From f185af8d7bbe45cf8029a5c3727e27a5d001c984 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 10 Oct 2017 13:55:01 -0700 Subject: [PATCH 323/342] Complete parameter --- python/paddle/v2/framework/graph.py | 50 ++++++++++++++++++- .../v2/framework/tests/test_parameter.py | 27 ++++++++++ 2 files changed, 75 insertions(+), 2 deletions(-) create mode 100644 python/paddle/v2/framework/tests/test_parameter.py diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py index ba14885462..0f0a2847e5 100644 --- a/python/paddle/v2/framework/graph.py +++ b/python/paddle/v2/framework/graph.py @@ -1,13 +1,19 @@ import paddle.v2.framework.core as core import collections import numpy as np +import copy __all__ = ['Block', 'Variable', 'Program', 'Operator'] class Variable(object): - def __init__(self, block, name=None, shape=None, dtype=None, - lod_level=None): + def __init__(self, + block, + name=None, + shape=None, + dtype=None, + lod_level=None, + **kwargs): self.block = block if name is None: @@ -144,6 +150,10 @@ class Block(object): def create_var(self, *args, **kwargs): return Variable(self, *args, **kwargs) + def create_parameter(self, *args, **kwargs): + global_block = self.program.global_block() + return Parameter(global_block, *args, **kwargs) + def append_op(self, *args, **kwargs): op_desc = self.desc.append_op() op = Operator(self, op_desc, *args, **kwargs) @@ -190,5 +200,41 @@ class Program(object): self.current_block_idx = self.current_block().parent_idx +class Parameter(Variable): + def __init__(self, block, shape, dtype, **kwargs): + if shape is None or dtype is None: + raise ValueError("Parameter must set shape and dtype") + if len(shape) == 0: + raise ValueError("Parameter shape cannot be empty") + + for each in shape: + if each < 0: + raise ValueError("Parameter shape should not be related with " + "batch-size") + + Variable.__init__(self, block, shape=shape, dtype=dtype, **kwargs) + self.trainable = kwargs.get('trainable', True) + self.init_attr = kwargs.get('initialize_attr', { + 'type': 'uniform_random', + 'min': -1.0, + 'max': 1.0 + }) + + self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0}) + self._append_initialize_ops_() + + def _append_initialize_ops_(self): + attr = copy.deepcopy(self.init_attr) + op_type = attr.pop('type', None) + block = self.block + assert isinstance(block, Block) + shape = self.shape + attr['dims'] = shape + attr['data_type'] = int(self.data_type) + op = block.prepend_op( + type=op_type, inputs=None, outputs={'Out': [self]}, attrs=attr) + self.op = op + + # program is a global instance. g_program = Program.instance() diff --git a/python/paddle/v2/framework/tests/test_parameter.py b/python/paddle/v2/framework/tests/test_parameter.py new file mode 100644 index 0000000000..3b5d38f257 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_parameter.py @@ -0,0 +1,27 @@ +import unittest +from paddle.v2.framework.graph import g_program +import paddle.v2.framework.core as core + + +class TestParameter(unittest.TestCase): + def test_param(self): + b = g_program.create_block() + param = b.create_parameter( + name='fc.w', + shape=[784, 100], + dtype='float32', + initialize_attr={ + 'type': 'uniform_random', + 'seed': 13, + 'min': -5.0, + 'max': 5.0 + }) + self.assertIsNotNone(param) + self.assertEqual('fc.w', param.name) + self.assertEqual((784, 100), param.shape) + self.assertEqual(core.DataType.FP32, param.data_type) + self.assertEqual(0, param.block.idx) + + +if __name__ == '__main__': + unittest.main() From e31cfcd2715feb16a5961020b9ae19c3e3013123 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 10 Oct 2017 14:06:29 -0700 Subject: [PATCH 324/342] gan --- doc/design/gan_api.md | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index f9bf5939f4..5764112f3c 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -6,32 +6,12 @@ It applies several important concepts in machine learning system design, includi In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation. -| important building blocks | People in Charge | Required | -|---------------------------|-------------------|----------| -| convolution 2d (done) | Chengduo | Y | -| cudnn conv 2d (missing) | Chengduo | N | -| deconv 2d (missing) | Zhuoyuan, Zhihong | Y | -| cudnn deconv 2d (missing) | Zhuoyuan, Zhihong | N | -| batch norm (missing) | Zhuoyuan, Jiayi | Y | -| cudnn batch norm (missing)| Zhuoyuan, Jiayi | N | -| max-pooling (done) | ? | Y | -| cudnn-max-pool (missing) | Chengduo | Y | -| fc (done) | ? | Y | -| softmax loss (done) | ? | Y | -| reshape op (done) | ? | Y | -| Dependency Engine (done) | Jiayi | Y * | -| Python API (done) | Longfei, Jiayi | Y * | -| Executor (done) | Tony | Y * | -| Multi optimizer (woking) | Longfei | Y * | -| Optimizer with any para | ? | Y * | -| Concat op (done) | ? | N (Cond) | -| Repmat op (done) | ? | N (Cond) | -


Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.

+The operators, layers and functions required/optional to build a GAN demo is summarized in https://github.com/PaddlePaddle/Paddle/issues/4563.


From 16b2f963b8a45175d2caaf1fcdc02df4cca8e6d3 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 10 Oct 2017 14:08:45 -0700 Subject: [PATCH 325/342] gan --- doc/design/gan_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md index 5764112f3c..fb41df8615 100644 --- a/doc/design/gan_api.md +++ b/doc/design/gan_api.md @@ -7,7 +7,7 @@ It applies several important concepts in machine learning system design, includi In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation.

-
+
Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.

From 2e7cd201a4337f49ce07de8cde11c3b8dd90f9ab Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 21:30:03 +0000 Subject: [PATCH 326/342] remove log in backward --- paddle/framework/backward.cc | 1 - paddle/framework/executor_test.cc | 1 - 2 files changed, 2 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 774d8e4918..0a4688db9c 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -378,7 +378,6 @@ std::vector> MakeBlockBackward( backward_descs[dup_op[i]]->Rename(out_name, new_name); sum_op_inputs.emplace_back(new_name); } - LOG(INFO) << "sum_op_inputs size " << sum_op_inputs.size(); std::unique_ptr sum_op(new OpDescBind( "sum", {{"X", sum_op_inputs}}, {{"Out", {out_name}}}, {})); pending_sum_ops.push_back({dup_op.back(), std::move(sum_op)}); diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 5ad5b98e7b..1cd7270240 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -35,7 +35,6 @@ USE_OP(squared_l2_distance); USE_OP(fill_constant); USE_OP(sgd); -using std::string; using namespace paddle::platform; using namespace paddle::framework; From 436ea50d5fc8867848892fc53b7f82aa59ae3b41 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 10 Oct 2017 14:31:47 -0700 Subject: [PATCH 327/342] follow comments --- paddle/framework/executor.cc | 4 +++- paddle/framework/executor_test.cc | 17 +++++++++-------- paddle/framework/scope.cc | 4 ++-- paddle/framework/scope.h | 2 +- paddle/operators/feed_op.h | 2 +- paddle/operators/fetch_op.h | 2 +- 6 files changed, 17 insertions(+), 14 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index def1d1fd06..1db5c878d6 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -44,7 +44,9 @@ Executor::Executor(const std::vector& places) { device_contexts_[i] = new platform::CUDADeviceContext( boost::get(places[i])); #else - PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); + PADDLE_THROW( + "'GPUPlace' is not supported, Please re-compile with WITH_GPU " + "option"); #endif } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 5ad5b98e7b..f36284b528 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -67,7 +67,7 @@ void AddOp(const std::string& type, const VariableNameMap& inputs, template void SetFeedVariable(const std::vector>& inputs, const std::vector>& dims) { - Variable* g_feed_value = GetGlobalScope()->FindVar("feed_value"); + Variable* g_feed_value = GetGlobalScope().FindVar("feed_value"); auto& feed_inputs = *(g_feed_value->GetMutable>()); size_t size = inputs.size(); @@ -82,7 +82,7 @@ void SetFeedVariable(const std::vector>& inputs, // So we can memcpy the data from fetch_value to vector template std::vector> GetFetchVariable() { - Variable* g_fetch_value = GetGlobalScope()->FindVar("fetch_value"); + Variable* g_fetch_value = GetGlobalScope().FindVar("fetch_value"); auto& fetch_outputs = *(g_fetch_value->GetMutable>()); @@ -232,8 +232,9 @@ TEST_F(ExecutorTesterRandom, CPU) { std::unique_ptr executor(new Executor(places)); - executor->Run(init_pdesc_, GetGlobalScope(), 0); - executor->Run(pdesc_, GetGlobalScope(), 0); + executor->Run(init_pdesc_, &GetGlobalScope(), 0); + SetFeedVariable(inputs_, dims_); + executor->Run(pdesc_, &GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); } @@ -252,7 +253,7 @@ TEST_F(ExecutorTesterFeedAndFetch, CPU) { for (int batch_id = 0; batch_id < 3; batch_id++) { SetFeedVariable(inputs_, dims_); - executor->Run(pdesc_, GetGlobalScope(), 0); + executor->Run(pdesc_, &GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); for (size_t i = 0; i < result.size(); ++i) { @@ -280,10 +281,10 @@ TEST_F(ExecutorTesterRandom, GPU) { std::unique_ptr executor(new Executor(places)); - executor->Run(init_pdesc_, GetGlobalScope(), 0); + executor->Run(init_pdesc_, &GetGlobalScope(), 0); for (int batch_id = 0; batch_id < 3; batch_id++) { SetFeedVariable(inputs_, dims_); - executor->Run(pdesc_, GetGlobalScope(), 0); + executor->Run(pdesc_, &GetGlobalScope(), 0); } } @@ -304,7 +305,7 @@ TEST_F(ExecutorTesterFeedAndFetch, GPU) { for (int batch_id = 0; batch_id < 3; batch_id++) { SetFeedVariable(inputs_, dims_); - executor->Run(pdesc_, GetGlobalScope(), 0); + executor->Run(pdesc_, &GetGlobalScope(), 0); std::vector> result = GetFetchVariable(); PADDLE_ENFORCE_EQ(result.size(), inputs_.size()); for (size_t i = 0; i < result.size(); ++i) { diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index c9e53a0d85..5821bac928 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -67,14 +67,14 @@ void Scope::DropKids() { std::once_flag feed_variable_flag; -framework::Scope* GetGlobalScope() { +framework::Scope& GetGlobalScope() { static std::unique_ptr g_scope{nullptr}; std::call_once(feed_variable_flag, [&]() { g_scope.reset(new framework::Scope()); g_scope->NewVar("feed_value"); g_scope->NewVar("fetch_value"); }); - return g_scope.get(); + return *(g_scope.get()); } } // namespace framework diff --git a/paddle/framework/scope.h b/paddle/framework/scope.h index 319d291efe..a8cfb107c2 100644 --- a/paddle/framework/scope.h +++ b/paddle/framework/scope.h @@ -73,7 +73,7 @@ class Scope { DISABLE_COPY_AND_ASSIGN(Scope); }; -framework::Scope* GetGlobalScope(); +framework::Scope& GetGlobalScope(); } // namespace framework } // namespace paddle diff --git a/paddle/operators/feed_op.h b/paddle/operators/feed_op.h index e406d22209..9d8158299f 100644 --- a/paddle/operators/feed_op.h +++ b/paddle/operators/feed_op.h @@ -26,7 +26,7 @@ class FeedKernel : public framework::OpKernel { framework::Tensor* out = ctx.Output("Out"); out->mutable_data(ctx.GetPlace()); framework::Variable* g_feed_variable = - framework::GetGlobalScope()->FindVar("feed_value"); + framework::GetGlobalScope().FindVar("feed_value"); const auto& tensors = g_feed_variable->Get>(); int col = ctx.template Attr("col"); diff --git a/paddle/operators/fetch_op.h b/paddle/operators/fetch_op.h index 6fee8b0589..eb9c3a7b59 100644 --- a/paddle/operators/fetch_op.h +++ b/paddle/operators/fetch_op.h @@ -25,7 +25,7 @@ class FetchKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { const framework::Tensor* input = ctx.Input("Input"); framework::Variable* g_fetch_variable = - framework::GetGlobalScope()->FindVar("fetch_value"); + framework::GetGlobalScope().FindVar("fetch_value"); auto* tensors = g_fetch_variable->GetMutable>(); int col = ctx.template Attr("col"); From a528a9717ec5880f271b9d216cb5532cee9d4504 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 21:32:03 +0000 Subject: [PATCH 328/342] remove prune as member function to function --- paddle/framework/executor.cc | 120 +++++++++++++++++------------------ paddle/framework/executor.h | 23 ++++--- 2 files changed, 71 insertions(+), 72 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index def1d1fd06..3c35102ff9 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -32,66 +32,7 @@ namespace framework { const std::string kFeedOpType = "feed"; const std::string kFetchOpType = "fetch"; -Executor::Executor(const std::vector& places) { - PADDLE_ENFORCE_GT(places.size(), 0); - device_contexts_.resize(places.size()); - for (size_t i = 0; i < places.size(); i++) { - if (platform::is_cpu_place(places[i])) { - device_contexts_[i] = new platform::CPUDeviceContext( - boost::get(places[i])); - } else if (platform::is_gpu_place(places[i])) { -#ifdef PADDLE_WITH_CUDA - device_contexts_[i] = new platform::CUDADeviceContext( - boost::get(places[i])); -#else - PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); -#endif - } - } -} - -Executor::~Executor() { - for (auto& device_context : device_contexts_) { - delete device_context; - } -} - -void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { - // TODO(tonyyang-svail): - // - only runs on the first device (i.e. no interdevice communication) - // - will change to use multiple blocks for RNN op and Cond Op - PADDLE_ENFORCE_GT(pdesc.blocks_size(), block_id); - auto& block = pdesc.blocks(block_id); - auto& device = device_contexts_[0]; - - // Instantiate all the vars in the global scope - for (auto& var : block.vars()) { - scope->NewVar(var.name()); - } - - Scope& local_scope = scope->NewScope(); - - std::vector should_run = Prune(pdesc, block_id); - PADDLE_ENFORCE_EQ(should_run.size(), static_cast(block.ops_size())); - for (size_t i = 0; i < should_run.size(); ++i) { - if (should_run[i]) { - for (auto& var : block.ops(i).outputs()) { - for (auto& argu : var.arguments()) { - if (local_scope.FindVar(argu) == nullptr) { - local_scope.NewVar(argu); - } - } - } - auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); - op->Run(local_scope, *device); - } - } - - // TODO(tonyyang-svail): - // - Destroy local_scope -} - -std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { +std::vector Prune(const ProgramDesc& pdesc, int block_id) { // TODO(tonyyang-svail): // - will change to use multiple blocks for RNN op and Cond Op @@ -159,5 +100,64 @@ std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { return should_run; } +Executor::Executor(const std::vector& places) { + PADDLE_ENFORCE_GT(places.size(), 0); + device_contexts_.resize(places.size()); + for (size_t i = 0; i < places.size(); i++) { + if (platform::is_cpu_place(places[i])) { + device_contexts_[i] = new platform::CPUDeviceContext( + boost::get(places[i])); + } else if (platform::is_gpu_place(places[i])) { +#ifdef PADDLE_WITH_CUDA + device_contexts_[i] = new platform::CUDADeviceContext( + boost::get(places[i])); +#else + PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); +#endif + } + } +} + +Executor::~Executor() { + for (auto& device_context : device_contexts_) { + delete device_context; + } +} + +void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { + // TODO(tonyyang-svail): + // - only runs on the first device (i.e. no interdevice communication) + // - will change to use multiple blocks for RNN op and Cond Op + PADDLE_ENFORCE_GT(pdesc.blocks_size(), block_id); + auto& block = pdesc.blocks(block_id); + auto& device = device_contexts_[0]; + + // Instantiate all the vars in the global scope + for (auto& var : block.vars()) { + scope->NewVar(var.name()); + } + + Scope& local_scope = scope->NewScope(); + + std::vector should_run = Prune(pdesc, block_id); + PADDLE_ENFORCE_EQ(should_run.size(), static_cast(block.ops_size())); + for (size_t i = 0; i < should_run.size(); ++i) { + if (should_run[i]) { + for (auto& var : block.ops(i).outputs()) { + for (auto& argu : var.arguments()) { + if (local_scope.FindVar(argu) == nullptr) { + local_scope.NewVar(argu); + } + } + } + auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); + op->Run(local_scope, *device); + } + } + + // TODO(tonyyang-svail): + // - Destroy local_scope +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 7fac4f4f46..4e3bc2c0a5 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -36,21 +36,20 @@ class Executor { */ void Run(const ProgramDesc&, Scope*, int); - protected: - /* @Brief - * Pruning the graph - * - * @param - * ProgramDesc - * - * @return - * vector Same size as ops. Indicates whether an op should be run. - */ - std::vector Prune(const ProgramDesc& pdesc, int block_id); - private: std::vector device_contexts_; }; +/* @Brief + * Pruning the graph + * + * @param + * ProgramDesc + * + * @return + * vector Same size as ops. Indicates whether an op should be run. + */ +std::vector Prune(const ProgramDesc& pdesc, int block_id); + } // namespace framework } // namespace paddle From 434949ca2d23a2fec5c3b4ab8e6bcb0ea18921fc Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 21:51:43 +0000 Subject: [PATCH 329/342] clean up for merge --- paddle/framework/executor.cc | 61 +----------------------------------- 1 file changed, 1 insertion(+), 60 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 69c21d7457..886e9ab33e 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -93,7 +93,7 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { // - Destroy local_scope } -std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { +std::vector Prune(const ProgramDesc& pdesc, int block_id) { // TODO(tonyyang-svail): // - will change to use multiple blocks for RNN op and Cond Op @@ -161,64 +161,5 @@ std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { return should_run; } -Executor::Executor(const std::vector& places) { - PADDLE_ENFORCE_GT(places.size(), 0); - device_contexts_.resize(places.size()); - for (size_t i = 0; i < places.size(); i++) { - if (platform::is_cpu_place(places[i])) { - device_contexts_[i] = new platform::CPUDeviceContext( - boost::get(places[i])); - } else if (platform::is_gpu_place(places[i])) { -#ifdef PADDLE_WITH_CUDA - device_contexts_[i] = new platform::CUDADeviceContext( - boost::get(places[i])); -#else - PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); -#endif - } - } -} - -Executor::~Executor() { - for (auto& device_context : device_contexts_) { - delete device_context; - } -} - -void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { - // TODO(tonyyang-svail): - // - only runs on the first device (i.e. no interdevice communication) - // - will change to use multiple blocks for RNN op and Cond Op - PADDLE_ENFORCE_GT(pdesc.blocks_size(), block_id); - auto& block = pdesc.blocks(block_id); - auto& device = device_contexts_[0]; - - // Instantiate all the vars in the global scope - for (auto& var : block.vars()) { - scope->NewVar(var.name()); - } - - Scope& local_scope = scope->NewScope(); - - std::vector should_run = Prune(pdesc, block_id); - PADDLE_ENFORCE_EQ(should_run.size(), static_cast(block.ops_size())); - for (size_t i = 0; i < should_run.size(); ++i) { - if (should_run[i]) { - for (auto& var : block.ops(i).outputs()) { - for (auto& argu : var.arguments()) { - if (local_scope.FindVar(argu) == nullptr) { - local_scope.NewVar(argu); - } - } - } - auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); - op->Run(local_scope, *device); - } - } - - // TODO(tonyyang-svail): - // - Destroy local_scope -} - } // namespace framework } // namespace paddle From 843ed8e3207583a8009526cbdac43e31e917de66 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Tue, 10 Oct 2017 18:08:33 -0400 Subject: [PATCH 330/342] dynamic recurrent op forward c++ implentation (#4597) --- cmake/configure.cmake | 4 + paddle/framework/operator.h | 6 +- paddle/framework/tensor_array.h | 4 +- paddle/operators/CMakeLists.txt | 1 + paddle/operators/dynamic_recurrent_op.cc | 276 ++++++++++++++++++ paddle/operators/dynamic_recurrent_op.h | 158 ++++++++++ paddle/operators/dynamic_recurrent_op_test.cc | 222 ++++++++++++++ 7 files changed, 666 insertions(+), 5 deletions(-) create mode 100644 paddle/operators/dynamic_recurrent_op.cc create mode 100644 paddle/operators/dynamic_recurrent_op.h create mode 100644 paddle/operators/dynamic_recurrent_op_test.cc diff --git a/cmake/configure.cmake b/cmake/configure.cmake index c1c93e17fd..db8f5ab045 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -24,6 +24,10 @@ if(WITH_DOUBLE) add_definitions(-DPADDLE_TYPE_DOUBLE) endif(WITH_DOUBLE) +if(WITH_TESTING) + add_definitions(-DPADDLE_WITH_TESTING) +endif(WITH_TESTING) + if(NOT WITH_TIMER) add_definitions(-DPADDLE_DISABLE_TIMER) endif(NOT WITH_TIMER) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 1e9ace9987..15f80b5720 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -142,9 +142,9 @@ class OperatorBase { // Macro for define a clone method. // If you are writing an kernel operator, `Clone` will be defined when you // register it. i.e. `Clone` method is not needed to define by yourself. -#define DEFINE_OP_CLONE_METHOD(cls) \ - std::unique_ptr Clone() const final { \ - return std::unique_ptr(new cls(*this)); \ +#define DEFINE_OP_CLONE_METHOD(cls) \ + std::unique_ptr<::paddle::framework::OperatorBase> Clone() const final { \ + return std::unique_ptr<::paddle::framework::OperatorBase>(new cls(*this)); \ } // Macro for define a default constructor for Operator. diff --git a/paddle/framework/tensor_array.h b/paddle/framework/tensor_array.h index 94a14c2df4..293da04997 100644 --- a/paddle/framework/tensor_array.h +++ b/paddle/framework/tensor_array.h @@ -87,12 +87,12 @@ class TensorArray { LoDTensor Stack() const; /* - * Unpacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors. + * Unstacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors. */ void Unstack(const LoDTensor &source) const; /* - * Unpacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors, + * Unstacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors, * with memory of tensors shared. */ void UnstackShared(const LoDTensor &source) const; diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index d132c1813e..7dae8fe2f9 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -133,3 +133,4 @@ cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory) +cc_test(dynamic_recurrent_op_test SRCS dynamic_recurrent_op_test.cc DEPS dynamic_recurrent_op recurrent_op tensor_array) diff --git a/paddle/operators/dynamic_recurrent_op.cc b/paddle/operators/dynamic_recurrent_op.cc new file mode 100644 index 0000000000..b919aef8fb --- /dev/null +++ b/paddle/operators/dynamic_recurrent_op.cc @@ -0,0 +1,276 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve . + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/dynamic_recurrent_op.h" + +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using framework::Scope; +using framework::TensorArray; +using framework::LoDTensor; +using framework::Variable; + +namespace detail { + +inline void CreateVariables(Scope& scope, + const std::vector& var_names) { + for (const auto& name : var_names) { + scope.NewVar(name); + } +} + +} // namespace detail + +class DynamicRecurrentOpProtoAndCheckerMaker + : public framework::OpProtoAndCheckerMaker { + public: + DynamicRecurrentOpProtoAndCheckerMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + const auto& name = DynamicRecurrentOp::kArgName; + // inputs and outputs stored in proto + AddInput(name.inlinks, + "the inputs that need to be segmented for each step.") + .AsDuplicable(); + AddInput(name.boot_memories, "variables to initialize memories.") + .AsDuplicable(); + + AddOutput(name.outlinks, "the outputs that need to concated for all steps.") + .AsDuplicable(); + AddOutput(name.step_scopes, "step scopes"); + + // Attributes stored in AttributeMap + AddAttr>(name.pre_memories, + "names of pre-memories"); + AddAttr>(name.memories, "names of memories"); + + AddComment("This is a RNN operator for varience-length sequences."); + } +}; + +void DynamicRecurrentOp::Run(const Scope& scope, + const platform::DeviceContext& dev_ctx) const { + cache_.Init(kArgName, *this, scope, &arg_); + SplitInputs(); + CreateScopes(); + WriteStepInputs(); + InitStates(); + + // call stepnet in all the time steps + for (size_t step = 0; step < cache_.num_steps; step++) { + auto& step_scope = cache_.GetScope(step); + stepnet_->Run(step_scope, dev_ctx); + } + + WriteStepOutputs(); + ConcatOutputs(); +} + +void DynamicRecurrentOp::SplitInputs() const { + // TODO(superjom) make level a config + // TODO(superjom) check all the inputs has the same LoD + int level = 0; + const auto& inlinks = cache_.inlinks; + for (const auto& item : inlinks) { + const auto& var = item.second; + const auto& tensor = var->Get(); + TensorArray& ta = step_inputs_[item.first]; + dy_seq_metas_[item.first] = + ta.Unpack(tensor, level, true /*length_descend*/); + + if (cache_.num_steps) { + PADDLE_ENFORCE_EQ(ta.size(), cache_.num_steps, + "inputs should have the same steps"); + } else { + cache_.num_steps = ta.size(); + } + } +} + +void DynamicRecurrentOp::WriteStepInputs() const { + for (const auto& item : cache_.inlinks) { + auto ta_it = step_inputs_.find(item.first); + PADDLE_ENFORCE(ta_it != step_inputs_.end(), + "step_inputs_ not compatible with memory set"); + TensorArray& ta = ta_it->second; + for (size_t step = 0; step < ta.size(); step++) { + auto tensor = ta.Read(step); + auto& step_scope = cache_.GetScope(step); + Variable* var = step_scope.FindVar(item.first); + if (var == nullptr) { + var = step_scope.NewVar(item.first); + } + var->GetMutable()->ShareDataWith(tensor); + } + } +} + +void DynamicRecurrentOp::WriteStepOutputs() const { + for (size_t step = 0; step < cache_.scopes->size(); step++) { + auto& scope = cache_.GetScope(step); + for (auto& item : step_outputs_) { + auto* var = scope.FindVar(item.first); + if (var == nullptr) { + var = scope.NewVar(item.first); + } + auto* tensor = var->GetMutable(); + item.second.WriteShared(step, *tensor); + } + } +} + +void DynamicRecurrentOp::CreateScopes() const { + PADDLE_ENFORCE_GT(cache_.num_steps, 0); + // resize scopes + size_t num_scopes_need_create = cache_.num_steps - cache_.scopes->size(); + for (size_t i = 0; i < num_scopes_need_create; i++) { + cache_.scopes->emplace_back(&cache_.scope->NewScope()); + } + + // init temporary inputs + PADDLE_ENFORCE_NOT_NULL(stepnet_, "stepnet should be set first"); + std::vector memories; + std::vector pre_memories; + std::transform(arg_.memories.begin(), arg_.memories.end(), + std::back_inserter(memories), + [](const rnn::MemoryAttr& m) { return m.var; }); + std::transform(arg_.memories.begin(), arg_.memories.end(), + std::back_inserter(pre_memories), + [](const rnn::MemoryAttr& m) { return m.pre_var; }); + + for (size_t step = 0; step < cache_.num_steps; step++) { + auto& scope = cache_.GetScope(step); + detail::CreateVariables(scope, arg_.inlinks); + detail::CreateVariables(scope, arg_.outlinks); + detail::CreateVariables(scope, memories); + detail::CreateVariables(scope, pre_memories); + } +} + +void DynamicRecurrentOp::ConcatOutputs() const { + // TODO(superjom) transform this to a config + int level = 0; + // TODO(superjom) pass in some lod + // just a placeholder + framework::LoD lod; + for (auto& item : step_outputs_) { + auto tensor = item.second.Pack(level, dy_seq_metas_[item.first], lod); + auto& output = cache_.outlinks[item.first]->Get(); + const_cast(&output)->ShareDataWith(tensor); + } +} + +void DynamicRecurrentOp::InitStates() const { + // init the first state + // TODO(superjom) parepare the scenerio that boot state not exists + for (auto memory : arg_.memories) { + auto* boot_state_var = cache_.scope->FindVar(memory.boot_var); + PADDLE_ENFORCE_NOT_NULL(boot_state_var); + auto& boot_state = boot_state_var->Get(); + const auto& dims = boot_state.dims(); + + for (size_t step = 0; step < cache_.num_steps; step++) { + auto& cur_scope = cache_.GetScope(step); + // link pre-state to boot_state + // init state and pre-state + auto* pre_state = cur_scope.FindVar(memory.pre_var); + PADDLE_ENFORCE_NOT_NULL(pre_state); + pre_state->GetMutable(); + + auto* state = cur_scope.FindVar(memory.var); + PADDLE_ENFORCE_NOT_NULL(state); + state->GetMutable()->Resize(dims); + state->GetMutable()->mutable_data( + platform::CPUPlace()); + + if (step == 0) { + auto* pre_state_tensor = pre_state->GetMutable(); + pre_state_tensor->Resize(boot_state.dims()); + pre_state_tensor->ShareDataWith(boot_state); + } else { + auto& pre_scope = cache_.GetScope(step - 1); + auto* state_pre = pre_scope.FindVar(memory.var); + PADDLE_ENFORCE_NOT_NULL(state_pre); + pre_state->GetMutable()->ShareDataWith( + *state_pre->GetMutable()); + } + } + } +} + +void DynamicRecurrentOp::ArgCache::Init( + const rnn::ArgumentName& name, const paddle::framework::OperatorBase& op, + const paddle::framework::Scope& scope, rnn::Argument* arg) { + this->scope = &scope; + InitArgument(name, op, arg); + CacheScopes(scope, *arg); + CacheInlinks(scope, arg->inlinks); + CacheOutlinks(scope, arg->outlinks); +} + +void DynamicRecurrentOp::ArgCache::InitArgument(const rnn::ArgumentName& name, + const OperatorBase& op, + rnn::Argument* arg) { + rnn::InitArgument(name, arg, op, false /*is_grad*/); +} + +void DynamicRecurrentOp::ArgCache::CacheScopes(const Scope& scope, + const rnn::Argument& arg) { + auto scopes_var = scope.FindVar(arg.step_scopes); + PADDLE_ENFORCE(scopes_var != nullptr, + "the step_scopes output argument [%s] should be created first " + "by framework.", + arg.step_scopes); + this->scopes = scopes_var->GetMutable>(); +} + +void DynamicRecurrentOp::ArgCache::CacheInlinks( + const Scope& scope, const std::vector& names) { + for (auto name : names) { + auto* var = GetVariable(scope, name); + inlinks[name] = var; + } +} + +void DynamicRecurrentOp::ArgCache::CacheOutlinks( + const Scope& scope, const std::vector& names) { + for (auto name : names) { + auto* var = GetVariable(scope, name); + outlinks[name] = var; + } +} + +Variable* DynamicRecurrentOp::ArgCache::GetVariable(const Scope& scope, + const std::string& name) { + auto* var = scope.FindVar(name); + PADDLE_ENFORCE_NOT_NULL(var, "variable [%s] not exist in scope", name); + return var; +} + +const rnn::ArgumentName DynamicRecurrentOp::kArgName{ + "step_net", "step_scopes", "inlinks", "outlinks", + "memories", "pre_memories", "boot_memories"}; + +void DynamicRecurrentGradientOp::Run( + const Scope& scope, const platform::DeviceContext& dev_ctx) const {} + +} // namespace operators +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT( + dynamic_recurrent, paddle::operators::DynamicRecurrentOp, + paddle::operators::DynamicRecurrentOpProtoAndCheckerMaker); diff --git a/paddle/operators/dynamic_recurrent_op.h b/paddle/operators/dynamic_recurrent_op.h new file mode 100644 index 0000000000..6a2970f27f --- /dev/null +++ b/paddle/operators/dynamic_recurrent_op.h @@ -0,0 +1,158 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#ifdef PADDLE_WITH_TESTING +#include "gtest/gtest.h" +#endif + +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/tensor_array.h" +#include "paddle/framework/variable.h" +#include "paddle/operators/rnn/recurrent_op_utils.h" + +namespace paddle { +namespace operators { + +class DynamicRecurrentOp : public framework::OperatorBase { + public: + static const rnn::ArgumentName kArgName; + using value_type = float; + + DynamicRecurrentOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + DynamicRecurrentOp(const DynamicRecurrentOp& o) + : framework::OperatorBase( + static_cast(o)) { + // TODO(yuyang18): Implement copy ctor well. + PADDLE_THROW("Not implemented"); + } + + void Run(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const override; + + /* + * Split the inputs(LoDTensors) to segments for each time step. + */ + void SplitInputs() const; + + /* + * Create step-scopes to store temporary outputs in each time steps. + */ + void CreateScopes() const; + + /* + * Link TensorArray steps to the corresponding variables located in + * step-scopes. + */ + void WriteStepInputs() const; + + /* + * Write output of each step to the corresponding TensorArray. + */ + void WriteStepOutputs() const; + + /* + * Initialize the states, each state will have a corresponding pre-state, + * which share the memory with the state in the previous time state. The + * pre-state in the first time step will be initialized with an zero tensor or + * a tensor in parent scope if is provided. + */ + void InitStates() const; + + /* + * Concatenate outputs in each time step and generate a LoDTensor. + */ + void ConcatOutputs() const; + + /* + * set a stepnet that is created according to a RecurrentOp's stepnet. + */ + void SetStepNet(std::unique_ptr net) { + PADDLE_ENFORCE_NOT_NULL(net); + stepnet_ = std::move(net); + } + const OperatorBase& GetStepNet() const { return *stepnet_; } + + protected: + struct ArgCache { + framework::Scope const* scope; + std::vector* scopes; + std::map inlinks; + std::map outlinks; + + size_t num_steps{0}; + + void Init(const rnn::ArgumentName& name, const OperatorBase& op, + const framework::Scope& scope, rnn::Argument* arg); + + framework::Scope& GetScope(size_t index) { + PADDLE_ENFORCE_LT(index, num_steps); + return *scopes->at(index); + } + + private: + void InitArgument(const rnn::ArgumentName& name, const OperatorBase& op, + rnn::Argument* arg); + void CacheScopes(const framework::Scope& scope, const rnn::Argument& arg); + void CacheInlinks(const framework::Scope& scope, + const std::vector& names); + void CacheOutlinks(const framework::Scope& scope, + const std::vector& names); + framework::Variable* GetVariable(const framework::Scope& scope, + const std::string& name); + }; + + private: + std::unique_ptr stepnet_; + mutable framework::TensorArray states_; + mutable std::map step_inputs_; + mutable std::map step_outputs_; + mutable std::map> + dy_seq_metas_; + mutable rnn::Argument arg_; + mutable ArgCache cache_; + +#ifdef PADDLE_WITH_TESTING + friend class DynamicRecurrentOpTestHelper; + FRIEND_TEST(DynamicRecurrentOpTestHelper, SplitInputs); + FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateCache); + FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateScopes); + FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepInputs); + FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepOutputs); + FRIEND_TEST(DynamicRecurrentOpTestHelper, InitStates); + FRIEND_TEST(DynamicRecurrentOpTestHelper, ConcatOutputs); +#endif +}; + +class DynamicRecurrentGradientOp : public framework::OperatorBase { + public: + DynamicRecurrentGradientOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + void Run(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const override; +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/dynamic_recurrent_op_test.cc b/paddle/operators/dynamic_recurrent_op_test.cc new file mode 100644 index 0000000000..675a7890f3 --- /dev/null +++ b/paddle/operators/dynamic_recurrent_op_test.cc @@ -0,0 +1,222 @@ +#include "paddle/operators/dynamic_recurrent_op.h" + +#include + +#include "paddle/framework/ddim.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/op_desc.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/net_op.h" + +namespace paddle { +namespace operators { + +using framework::Scope; +using framework::TensorArray; +using framework::LoDTensor; +using framework::Variable; + +class TestOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + DEFINE_OP_CLONE_METHOD(TestOp); + void Run(const Scope& scope, + const platform::DeviceContext& dev_ctx) const override {} +}; + +void OpDescNewVar(const std::string& param_name, + std::initializer_list arguments, + paddle::framework::OpDesc::Var* var) { + var->set_parameter(param_name); + for (auto& arg_name : arguments) { + var->add_arguments(arg_name); + } +} + +// create a LoD tensor in scope with specific dims +LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims, + const platform::Place& place) { + auto* var = scope.NewVar(name); + auto* tensor = var->GetMutable(); + tensor->Resize(dims); + tensor->mutable_data(place); + return tensor; +} + +class DynamicRecurrentOpTestHelper : public ::testing::Test { + protected: + const rnn::ArgumentName argname = DynamicRecurrentOp::kArgName; + + virtual void SetUp() override { + CreateGlobalVariables(); + + auto op_desc = CreateOpDesc(); + op = paddle::framework::OpRegistry::CreateOp(op_desc); + dop = dynamic_cast(op.get()); + InitCacheManually(); + InitStepNet(); + } + + framework::OpDesc CreateOpDesc() { + // create op + paddle::framework::OpDesc op_desc; + op_desc.set_type("dynamic_recurrent"); + + OpDescNewVar(argname.inlinks, {"in0"}, op_desc.add_inputs()); + OpDescNewVar(argname.boot_memories, {"boot_mem"}, op_desc.add_inputs()); + OpDescNewVar(argname.step_scopes, {"step_scopes"}, op_desc.add_outputs()); + OpDescNewVar(argname.outlinks, {"out0"}, op_desc.add_outputs()); + + // set pre-memories + auto pre_memories = op_desc.mutable_attrs()->Add(); + pre_memories->set_name(argname.pre_memories); + pre_memories->set_type(paddle::framework::AttrType::STRINGS); + auto pre_memories_item = pre_memories->add_strings(); + *pre_memories_item = "mem@pre"; + + // set memories + auto memories = op_desc.mutable_attrs()->Add(); + memories->set_name(argname.memories); + memories->set_type(paddle::framework::AttrType::STRINGS); + auto memories_item = memories->add_strings(); + *memories_item = "mem"; + return op_desc; + } + + void CreateGlobalVariables() { + platform::CPUPlace place; + scope.NewVar("step_scopes"); + CreateVar(scope, "boot_mem", framework::make_ddim({10, 20}), place); + // auto* out0 = + CreateVar(scope, "out0", framework::make_ddim({10, 20}), place); + auto* in0 = CreateVar(scope, "in0", framework::make_ddim({10, 8}), place); + // 10 instanes with 4 sentences, length is 4, 3, 2, 1 respectively. + framework::LoD in0_lod(1); + for (int x : std::vector{0, 4, 7, 9, 10}) { + in0_lod[0].push_back(x); + } + in0->set_lod(in0_lod); + in0->Resize(framework::make_ddim({10, 8})); + // set the content, each sentence content is seqid.batchid + // the seqid starts from 0 + int start = 0; + for (size_t seqid = 0; seqid < in0_lod.size() - 1; seqid++) { + for (size_t batchid = 0; + batchid < in0_lod[0][seqid + 1] - in0_lod[0][seqid]; batchid++) { + float v = seqid + batchid * 0.1; + + for (size_t dim = 0; dim < 8; dim++) { + in0->data()[start * 8 + dim] = v; + } + start++; + } + } + } + + void InitCacheManually() { + dop->cache_.Init(DynamicRecurrentOp::kArgName, *dop, scope, &dop->arg_); + } + + void InitStepNet() { + std::unique_ptr stepnet{new NetOp}; + dynamic_cast(stepnet.get()) + ->AppendOp(std::unique_ptr(new TestOp( + "test", {{"inlinks", {"in0"}}, {"boot_memories", {"boot_mem"}}}, + {{"outlinks", {"out0"}}, {"step_scopes", {"step_scopes"}}}, {}))); + dop->SetStepNet(std::move(stepnet)); + } + + protected: + DynamicRecurrentOp* dop; + std::unique_ptr op; + paddle::platform::CPUDeviceContext device_context; + paddle::framework::Scope scope; +}; + +TEST_F(DynamicRecurrentOpTestHelper, CreateCache) { + const rnn::Argument& arg = dop->arg_; + ASSERT_EQ(arg.inlinks.size(), 1UL); + ASSERT_EQ(arg.outlinks.size(), 1UL); +} + +TEST_F(DynamicRecurrentOpTestHelper, SplitInputs) { + dop->SplitInputs(); + auto& in0_ta = dop->step_inputs_["in0"]; + ASSERT_EQ(in0_ta.size(), 4UL); + + const auto& batch0 = in0_ta.Read(0); + const auto& batch1 = in0_ta.Read(1); + const auto& batch2 = in0_ta.Read(2); + const auto& batch3 = in0_ta.Read(3); + EXPECT_EQ(batch0.dims()[0], 4); + EXPECT_EQ(batch1.dims()[0], 3); + EXPECT_EQ(batch2.dims()[0], 2); + EXPECT_EQ(batch3.dims()[0], 1); +} + +TEST_F(DynamicRecurrentOpTestHelper, CreateScopes) { + dop->SplitInputs(); + dop->CreateScopes(); + ASSERT_EQ(dop->cache_.num_steps, 4UL); + ASSERT_EQ(dop->cache_.scopes->size(), 4UL); +} + +TEST_F(DynamicRecurrentOpTestHelper, WriteStepInputs) { + dop->SplitInputs(); + dop->CreateScopes(); + dop->WriteStepInputs(); + + for (size_t step = 0; step < dop->cache_.num_steps; step++) { + auto& scope = dop->cache_.GetScope(step); + for (auto name : std::vector({"in0"})) { + ASSERT_TRUE(scope.FindVar(name) != nullptr); + } + } +} + +TEST_F(DynamicRecurrentOpTestHelper, WriteStepOutputs) { + dop->SplitInputs(); + dop->CreateScopes(); + dop->WriteStepInputs(); + dop->WriteStepOutputs(); + + for (size_t step = 0; step < dop->cache_.num_steps; step++) { + auto& scope = dop->cache_.GetScope(step); + for (auto name : std::vector({"out0"})) { + ASSERT_TRUE(scope.FindVar(name)); + } + } +} + +TEST_F(DynamicRecurrentOpTestHelper, ConcatOutputs) { + // Let's leave this test to python unittest. +} + +TEST_F(DynamicRecurrentOpTestHelper, InitStates) { + dop->SplitInputs(); + dop->CreateScopes(); + dop->WriteStepInputs(); + dop->WriteStepOutputs(); + dop->InitStates(); + + for (size_t step = 0; step < dop->cache_.num_steps; step++) { + auto& scope = dop->cache_.GetScope(step); + auto state = scope.FindVar("mem"); + ASSERT_TRUE(state != nullptr); + + auto* pre_state = scope.FindVar("mem@pre"); + ASSERT_TRUE(pre_state != nullptr); + + auto* boot_state = scope.FindVar("boot_mem"); + ASSERT_TRUE(boot_state != nullptr); + + if (step == 0) { + // check pre_state is a reference of boot_state + ASSERT_EQ(boot_state->Get().data(), + pre_state->Get().data()); + } + } +} + +} // operators +} // namespace paddle From d1479d930eb2067ae08b63f3ad41706cc7265dc8 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Tue, 10 Oct 2017 16:57:50 -0700 Subject: [PATCH 331/342] Fixing errors in the refactorization doc (#4680) --- doc/design/refactorization.md | 45 ++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/doc/design/refactorization.md b/doc/design/refactorization.md index 629422e774..ec51aa1a0e 100644 --- a/doc/design/refactorization.md +++ b/doc/design/refactorization.md @@ -17,22 +17,22 @@ The goals of refactoring include: 1. A graph is composed of *variables* and *operators*. -1. The description of graphs must be capable of being serialized/deserialized, so that: +1. The description of graphs must be serializable/deserializable, so that: - 1. It can to be sent to the cloud for distributed execution, and + 1. It can be sent to the cloud for distributed execution, and 1. It can be sent to clients for mobile or enterprise deployment. -1. The Python program does the following steps +1. The Python program does two things - 1. *compilation*: run a Python program to generate a protobuf message representation of the graph and send it to + 1. *Compilation* runs a Python program to generate a protobuf message representation of the graph and send it to 1. the C++ library `libpaddle.so` for local execution, 1. the master process of a distributed training job for training, or 1. the server process of a Kubernetes serving job for distributed serving. - 1. *execution*: execute the graph by constructing instances of class [`Variable`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24) and [`OperatorBase`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L70), according to the protobuf message. + 1. *Execution* executes the graph by constructing instances of class [`Variable`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24) and [`OperatorBase`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L70), according to the protobuf message. ## Description and Realization of Computation Graph -At compile time, the Python program generates a protobuf message representation of the graph, or the description of the graph. +At compile time, the Python program generates a protobuf message representation of the graph, or a description of the graph. At runtime, the C++ program realizes the graph and runs it. @@ -42,11 +42,11 @@ At runtime, the C++ program realizes the graph and runs it. |Operation|[OpDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L35)|[Operator](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L64)| |Block|BlockDesc|Block| -The word *graph* is interchangeable with *block* in this document. A graph represents computation steps and local variables similar to a C++/Java program block, or a pair of parentheses(`{` and `}`). +The word *graph* is interchangeable with *block* in this document. A graph consists of computation steps and local variables similar to a C++/Java program block, or a pair of parentheses(`{` and `}`). ## Compilation and Execution -1. Run an application Python program to describe the graph. In particular, the Python application program does the following: +1. Run a Python program to describe the graph. In particular, the Python application program does the following: 1. Create `VarDesc` to represent local/intermediate variables, 1. Create operators and set attributes, @@ -54,10 +54,10 @@ The word *graph* is interchangeable with *block* in this document. A graph repr 1. Infer the type and the shape of variables, 1. Plan memory-reuse for variables, 1. Generate the backward graph - 1. Optimize the computation graph. - 1. Potentially, split the graph for distributed training. + 1. Add optimization operators to the computation graph. + 1. Optionally, split the graph for distributed training. -1. The invocation of `train` or [`infer`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/inference.py#L108) methods in the application Python program does the following: +1. The invocation of `train` or [`infer`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/inference.py#L108) methods in the Python program does the following: 1. Create a new Scope instance in the [scope hierarchy](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md) for each run of a block, 1. realize local variables defined in the BlockDesc message in the new scope, @@ -107,8 +107,8 @@ Compile Time -> IR -> Runtime ![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/dd598e8f1976f5759f58af5e5ef94738a6b2e661/op.dot) * `Operator` is the fundamental building block of the user interface. - * Operator stores input/output variable names, and attributes. - * The `InferShape` interface is used to infer the shape of the output variable shapes based on the shapes of the input variables. + * Operator stores input/output variable names and attributes. + * The `InferShape` interface is used to infer the shape of the output variables based on the shapes of the input variables. * Use `Run` to compute the `output` variables from the `input` variables. --- @@ -139,7 +139,7 @@ Compile Time -> IR -> Runtime * Limit the number of `tensor.device(dev) = ` in your code. * `thrust::transform` and `std::transform`. * `thrust` has the same API as C++ standard library. Using `transform`, one can quickly implement customized element-wise kernels. - * `thrust` also has more complex APIs, like `scan`, `reduce`, `reduce_by_key`. + * `thrust`, in addition, supports more complex APIs, like `scan`, `reduce`, `reduce_by_key`. * Hand-writing `GPUKernel` and `CPU` code * Do not write in header (`.h`) files. CPU Kernel should be in cpp source (`.cc`) and GPU kernels should be in cuda (`.cu`) files. (GCC cannot compile GPU code.) --- @@ -185,10 +185,10 @@ Make sure the registration process is executed and linked. 1. Write an Op class and its gradient Op class, if required. 2. Write an Op maker class. In the constructor of this class, describe the inputs, outputs and attributes of the operator. 3. Invoke the macro `REGISTER_OP`. This macro will - 1. Call maker class to complete the `proto` and the `checker` + 1. Call maker class to complete `proto` and `checker` 2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap` -4. Invoke the `USE` macro in which the Op is used, to make sure that it is linked. +4. Invoke the `USE` macro in which the Op is used to make sure that it is linked. --- # Backward Module (1/2) @@ -199,13 +199,14 @@ Make sure the registration process is executed and linked. --- # Backward Module (2/2) ### Build Backward Network -- **Input**: graph of forward operators -- **Output**: graph of backward operators +- **Input**: a graph of forward operators +- **Output**: a graph of backward operators - **Corner cases in construction** - Shared Variables => insert an `Add` operator to combine gradients - No Gradient => insert a `fill_zero_grad` operator - Recursive NetOp => call `Backward` recursively - RNN Op => recursively call `Backward` on stepnet + - RNN Op => recursively call `Backward` on stepnet --- @@ -215,10 +216,10 @@ Make sure the registration process is executed and linked. * Only dims and data pointers are stored in `Tensor`. * All operations on `Tensor` are written in `Operator` or global functions. * Variable length Tensor design [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) -* `Variable` instances are the inputs and the outputs of an operator. Not just `Tensor`. +* `Variable` instances are the inputs and the outputs of an operator, not just `Tensor`. * `step_scopes` in RNN is a variable and not a tensor. -* `Scope` is where variables are stores. - * map +* `Scope` is where variables are stored. + * map * `Scope` has a hierarchical structure. The local scope can get variables from its parent scope. --- @@ -246,7 +247,7 @@ Make sure the registration process is executed and linked. --- # Control the migration quality - Compare the performance of migrated models with old ones. -- Follow the google C++ style +- Follow the google C++ style guide. - Build the automatic workflow of generating Python/C++ documentations. - The documentation of layers and ops should be written inside the code. - Take the documentation quality into account when submitting pull requests. From 0a74fed181bea14e6b797e59261631556401d29b Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Tue, 10 Oct 2017 17:08:01 -0700 Subject: [PATCH 332/342] Correcting few mistakes in the block doc (#4681) --- doc/design/block.md | 74 ++++++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 38 deletions(-) diff --git a/doc/design/block.md b/doc/design/block.md index 4d5dd4ba95..9c812732d6 100644 --- a/doc/design/block.md +++ b/doc/design/block.md @@ -5,12 +5,12 @@ Both deep learning systems and programming languages help users describe computation procedures. These systems use various representations of computation: - Caffe, Torch, and Paddle: sequences of layers. -- TensorFlow, Caffe2, Mxnet: graphs of operators. +- TensorFlow, Caffe2, Mxnet: graph of operators. - PaddlePaddle: nested blocks, like C++ and Java programs. ## Block in Programming Languages and Deep Learning -In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions, or operators. +In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions or operators. Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning: @@ -24,14 +24,14 @@ A key difference is that a C++ program describes a one pass computation, whereas ## Stack Frames and the Scope Hierarchy -The existence of the backward makes the execution of a block of traditional programs and PaddlePaddle different to each other: +The existence of the backward pass makes the execution of a block of PaddlePaddle different from traditional programs: -| programming languages | PaddlePaddle | -|-----------------------|-------------------------------| -| stack | scope hierarchy | -| stack frame | scope | -| push at entering block| push at entering block | -| pop at leaving block | destroy at minibatch completes| +| programming languages | PaddlePaddle | +|-----------------------|---------------------------------| +| stack | scope hierarchy | +| stack frame | scope | +| push at entering block| push at entering block | +| pop at leaving block | destroy when minibatch completes| 1. In traditional programs: @@ -42,9 +42,9 @@ The existence of the backward makes the execution of a block of traditional prog 1. In PaddlePaddle - When the execution enters a block, PaddlePaddle adds a new scope, where it realizes variables. - - PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are to be used by the backward pass. So it has a stack forest known as a *scope hierarchy*. + - PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are used by the backward pass. So it has a stack forest known as a *scope hierarchy*. - The height of the highest tree is the maximum depth of nested blocks. - - After the process of a minibatch, PaddlePaddle destroys the scope hierarchy. + - After the processing of a minibatch, PaddlePaddle destroys the scope hierarchy. ## Use Blocks in C++ and PaddlePaddle Programs @@ -94,14 +94,14 @@ with ie.false_block(): o1, o2 = ie(cond) ``` -In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `x+1` and `fc(x)`. +In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `fc(x)` and `x+1` . -A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values. +The difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. ### Blocks with `for` and `RNNOp` -The following RNN model from the [RNN design doc](./rnn.md) +The following RNN model in PaddlePaddle from the [RNN design doc](./rnn.md) : ```python x = sequence([10, 20, 30]) # shape=[None, 1] @@ -112,9 +112,9 @@ U = var(0.375, param=true) # shape=[1] rnn = pd.rnn() with rnn.step(): h = rnn.memory(init = m) - hh = rnn.previous_memory(h) + h_prev = rnn.previous_memory(h) a = layer.fc(W, x) - b = layer.fc(U, hh) + b = layer.fc(U, h_prev) s = pd.add(a, b) act = pd.sigmoid(s) rnn.update_memory(h, act) @@ -147,9 +147,9 @@ for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { ## Compilation and Execution -Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference. +Like TensorFlow, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest executes the message for training or inference. -The generation of this protobuf message is like what a compiler generates a binary executable file. The execution of the message that the OS executes the binary file. +The generation of this protobuf message is similar to how a compiler generates a binary executable file. The execution of the message is similar to how the OS executes the binary file. ## The "Binary Executable File Format" @@ -186,8 +186,8 @@ Also, the RNN operator in above example is serialized into a protobuf message of ``` OpDesc { - inputs = {0} // the index of x - outputs = {5, 3} // indices of act and hidden_out + inputs = {0} // the index of x in vars of BlockDesc above + outputs = {5, 3} // indices of act and hidden_out in vars of BlockDesc above attrs { "memories" : {1} // the index of h "step_net" : @@ -203,14 +203,14 @@ This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator). VarDesc in a block should have its name scope to avoid local variables affect parent block's name scope. -Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example +Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example: ```python -a = pd.Varaible(shape=[20, 20]) +a = pd.Variable(shape=[20, 20]) b = pd.fc(a, params=["fc.w", "fc.b"]) rnn = pd.create_rnn() -with rnn.stepnet() +with rnn.stepnet(): x = a.as_step_input() # reuse fc's parameter fc_without_b = pd.get_variable("fc.w") @@ -218,17 +218,17 @@ with rnn.stepnet() out = rnn() ``` -the method `pd.get_variable` can help retrieve a Variable by a name, a Variable may store in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance. +The method `pd.get_variable` can help retrieve a Variable by the name. The Variable may be stored in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance. In compiler design, the symbol table is a data structure created and maintained by compilers to store information about the occurrence of various entities such as variable names, function names, classes, etc. To store the definition of variables and operators, we define a C++ class `SymbolTable`, like the one used in compilers. -`SymbolTable` can do the following stuff: +`SymbolTable` can do the following: - store the definitions (some names and attributes) of variables and operators, -- to verify if a variable was declared, -- to make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers). +- verify if a variable was declared, +- make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers). ```c++ @@ -240,19 +240,18 @@ class SymbolTable { OpDesc* NewOp(const string& name=""); - // TODO determine whether name is generated by python or C++ - // currently assume that a unique name will be generated by C++ if the - // argument name left default. + // TODO determine whether name is generated by python or C++. + // Currently assume that a unique name will be generated by C++ if the + // argument name is left default. VarDesc* NewVar(const string& name=""); - // find a VarDesc by name, if recursive true, find parent's SymbolTable + // find a VarDesc by name, if recursive is true, find parent's SymbolTable // recursively. // this interface is introduced to support InferShape, find protobuf messages // of variables and operators, pass pointers into InferShape. - // operator // // NOTE maybe some C++ classes such as VarDescBuilder and OpDescBuilder should - // be proposed and embedded into pybind to enable python operate on C++ pointers. + // be proposed and embedded into pybind to enable python operation on C++ pointers. VarDesc* FindVar(const string& name, bool recursive=true); OpDesc* FindOp(const string& name); @@ -270,7 +269,7 @@ class SymbolTable { After all the description of variables and operators is added into SymbolTable, the block has enough information to run. -The `Block` class takes a `BlockDesc` as input, and provide `Run` and `InferShape` functions. +The `Block` class takes a `BlockDesc` as input, and provides `Run` and `InferShape` functions. ```c++ @@ -302,7 +301,7 @@ public: void CreateVariables(const framework::Scope& scope); void CreateOperators(); - // some other necessary interfaces of NetOp are list below + // some other necessary interfaces of NetOp are listed below // ... private: @@ -316,15 +315,14 @@ private: Block inherits from OperatorBase, which has a Run method. Block's Run method will run its operators sequentially. -There is another important interface called `Eval`, which take some arguments called targets, and generate a minimal graph which takes targets as the end points and creates a new Block, -after `Run`, `Eval` will get the latest value and return the targets. +There is another important interface called `Eval`, which takes some arguments called targets and generates a minimal graph which treats targets as the end points and creates a new Block. After `Run`, `Eval` will get the latest value and return the targets. The definition of Eval is as follows: ```c++ // clean a block description by targets using the corresponding dependency graph. // return a new BlockDesc with minimal number of operators. -// NOTE not return a Block but the block's description so that this can be distributed +// NOTE: The return type is not a Block but the block's description so that this can be distributed // to a cluster. BlockDesc Prune(const BlockDesc& desc, vector targets); From 6604d7cda295cc7978ff227a91a0128a497f14be Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Tue, 10 Oct 2017 17:57:02 -0700 Subject: [PATCH 333/342] Add logsigmoid (numerically stable) and softshrink (#4663) * Add numerically-stable logsigmoid activation * Add softshrink operator * Adjust relative tolerance for grad-check * Address review comments --- paddle/operators/activation_op.cc | 35 ++++++++++ paddle/operators/activation_op.h | 70 ++++++++++++++++++- .../v2/framework/tests/test_activation_op.py | 35 ++++++++++ 3 files changed, 139 insertions(+), 1 deletion(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 92db629079..a6bb738af3 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -49,6 +49,18 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class LogSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LogSigmoidOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of LogSigmoid operator"); + AddOutput("Y", "Output of LogSigmoid operator"); + AddComment( + "Logsigmoid activation operator, logsigmoid = log (1 / (1 + exp(-x)))"); + } +}; + class ExpOpMaker : public framework::OpProtoAndCheckerMaker { public: ExpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -85,6 +97,23 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { } }; +template +class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SoftShrinkOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Softshrink operator"); + AddOutput("Y", "Output of Softshrink operator"); + AddComment( + "Softshrink activation operator, " + "softshrink = x - lambda, if x > lambda;" + " x + lambda, if x < lambda; 0 otherwise"); + AddAttr("lambda", "non-negative offset") + .SetDefault(static_cast(0.5f)); + } +}; + class TanhOpMaker : public framework::OpProtoAndCheckerMaker { public: TanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -271,6 +300,9 @@ namespace ops = paddle::operators; REGISTER_OP(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker, sigmoid_grad, ops::ActivationOpGrad); +REGISTER_OP(logsigmoid, ops::ActivationOp, ops::LogSigmoidOpMaker, + logsigmoid_grad, ops::ActivationOpGrad); + REGISTER_OP(exp, ops::ActivationOp, ops::ExpOpMaker, exp_grad, ops::ActivationOpGrad); @@ -283,6 +315,9 @@ REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad, REGISTER_OP(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker, tanh_shrink_grad, ops::ActivationOpGrad); +REGISTER_OP(softshrink, ops::ActivationOp, ops::SoftShrinkOpMaker, + softshrink_grad, ops::ActivationOpGrad); + REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 123f0c4dbc..70d5a62052 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -95,6 +95,41 @@ struct SigmoidGradFunctor : public BaseActivationFunctor { } }; +// Originally: logsigmoid(x) = -log (1 + exp(-x)) +// For numerical stability, we can use the log-sum-exp trick: +// https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/ +// We can rewrite the above equation as: +// y = -log( exp(0) + exp(-x)) [since exp(0) = 1] +// = -log( exp(max(-x, 0) - max(-x, 0)) + exp(-x + max(-x, 0) - max(-x, 0))) +// = -log( exp(max(-x, 0)) * exp(-max(-x, 0)) - exp(max(-x, 0)) * exp(-x - +// max(-x, 0))) +// = -log( exp(max(-x, 0)) * (exp(-max(-x, 0)) + exp(-x - max(-x, 0)))) +// = -log( exp(max(-x, 0)) - log(exp(-max(-x, 0)) + exp(-x - max(-x, 0))) +// +// Hence, logsigmoid(x) = - (max(-x, 0) + log(exp(-max(-x, 0)) +// + exp(-x - max(-x, 0)))) +template +struct LogSigmoidFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y) const { + auto temp = (-x).cwiseMax(static_cast(0)); // temp = max(-x, 0) + y.device(d) = -temp - (((-temp).exp() + (-x - temp).exp()).log()); + } +}; + +// Originally: f' = exp(-x) / (1 + exp(-x)) +// For numerical stability: f' = exp(-x - max(-x, 0)) / (exp(-max(-x, 0)) + +// exp(-x - max(-x, 0))) +template +struct LogSigmoidGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + auto temp = (-x).cwiseMax(static_cast(0)); // temp = max(-x, 0) + dx.device(d) = + dy * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp())); + } +}; + // exp(x) = e^x template struct ExpFunctor : public BaseActivationFunctor { @@ -164,6 +199,37 @@ struct TanhShrinkGradFunctor : public BaseActivationFunctor { } }; +// softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < lambda; 0 +// otherwise +template +struct SoftShrinkFunctor : public BaseActivationFunctor { + float lambda; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"lambda", &lambda}}; + } + + template + void operator()(Device d, X x, Y y) const { + auto temp1 = (x > lambda).template cast().eval(); + auto temp2 = (x < -lambda).template cast().eval(); + y.device(d) = temp1 * (x - lambda) + temp2 * (x + lambda); + } +}; + +template +struct SoftShrinkGradFunctor : public BaseActivationFunctor { + float lambda; + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"lambda", &lambda}}; + } + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + auto temp1 = (x > lambda).template cast().eval(); + auto temp2 = (x < -lambda).template cast().eval(); + dx.device(d) = dy * (temp1 + temp2).template cast(); + } +}; + // sqrt(x) = x^(1/2) template struct SqrtFunctor : public BaseActivationFunctor { @@ -471,9 +537,11 @@ struct STanhGradFunctor : public BaseActivationFunctor { #define FOR_EACH_KERNEL_FUNCTOR(__macro) \ __macro(sigmoid, SigmoidFunctor, SigmoidGradFunctor); \ + __macro(logsigmoid, LogSigmoidFunctor, LogSigmoidGradFunctor); \ __macro(exp, ExpFunctor, ExpGradFunctor); \ __macro(relu, ReluFunctor, ReluGradFunctor); \ __macro(tanh, TanhFunctor, TanhGradFunctor); \ + __macro(softshrink, SoftShrinkFunctor, SoftShrinkGradFunctor); \ __macro(sqrt, SqrtFunctor, SqrtGradFunctor); \ __macro(abs, AbsFunctor, AbsGradFunctor); \ __macro(reciprocal, ReciprocalFunctor, ReciprocalGradFunctor); \ @@ -484,7 +552,7 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ - __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ __macro(relu6, Relu6Functor, Relu6GradFunctor); \ + __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor); \ __macro(elu, ELUFunctor, ELUGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index 4528ed555d..9157e00f6e 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -33,6 +33,21 @@ class TestSigmoid(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.008) +class TestLogSigmoid(OpTest): + def setUp(self): + self.op_type = "logsigmoid" + self.inputs = { + 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") + } + self.outputs = {'Y': np.log(1 / (1 + np.exp(-self.inputs['X'])))} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.008) + + class TestTanh(OpTest): def setUp(self): self.op_type = "tanh" @@ -63,6 +78,26 @@ class TestTanhShrink(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.008) +class TestSoftShrink(OpTest): + def setUp(self): + self.op_type = "softshrink" + lambda_val = 0.1 + self.attrs = {'lambda': lambda_val} + self.inputs = { + 'X': np.random.uniform(0.25, 10, [4, 4]).astype("float32") + } + y = np.copy(self.inputs['X']) + y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * ( + y - lambda_val) + self.outputs = {'Y': y} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.007) + + class TestSqrt(OpTest): def setUp(self): self.op_type = "sqrt" From c85d777f879e128a3a9b00ddfc243879a747f5da Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 10 Oct 2017 22:35:55 +0800 Subject: [PATCH 334/342] follow comments --- paddle/operators/math/CMakeLists.txt | 8 ++++-- paddle/operators/math/vol2col.cc | 2 +- paddle/operators/math/vol2col_test.cc | 40 +++++++-------------------- 3 files changed, 16 insertions(+), 34 deletions(-) diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index d6e8373210..575e89eed8 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,15 +1,17 @@ if(WITH_GPU) - nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu vol2col.cc vol2col.cu pooling.cc pooling.cu DEPS cblas device_context operator) + nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc im2col.cu pooling.cc pooling.cu DEPS cblas device_context operator) nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator) + nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS cblas device_context operator) else() - cc_library(math_function SRCS math_function.cc im2col.cc vol2col.cc pooling.cc DEPS cblas device_context operator) + cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc DEPS cblas device_context operator) cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_library(softmax SRCS softmax.cc DEPS operator) cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator) + cc_library(vol2col SRCS vol2col.cc DEPS cblas device_context operator) endif() cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) -cc_test(vol2col_test SRCS vol2col_test.cc DEPS math_function tensor) +cc_test(vol2col_test SRCS vol2col_test.cc DEPS vol2col tensor) diff --git a/paddle/operators/math/vol2col.cc b/paddle/operators/math/vol2col.cc index 5bad2e8073..e9718a0473 100644 --- a/paddle/operators/math/vol2col.cc +++ b/paddle/operators/math/vol2col.cc @@ -67,7 +67,7 @@ class Vol2ColFunctor { ((c * output_depth + d) * output_height + h) * output_width + w; if (h_pad < 0 || h_pad >= input_height || w_pad < 0 || w_pad >= input_width || d_pad < 0 || d_pad >= input_depth) { - col_data[col_idx] = T(0); + col_data[col_idx] = static_cast(0); } else { int vol_idx = ((c_in * input_depth + d_pad) * input_height + h_pad) * diff --git a/paddle/operators/math/vol2col_test.cc b/paddle/operators/math/vol2col_test.cc index 107a94511f..e3c599da87 100644 --- a/paddle/operators/math/vol2col_test.cc +++ b/paddle/operators/math/vol2col_test.cc @@ -30,12 +30,12 @@ void testVol2col() { context = new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); } else { -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_CUDA context = new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); #else PADDLE_THROW("no GPU support"); -#endif // PADDLE_ONLY_CPU +#endif // PADDLE_WITH_CUDA } /** @@ -89,6 +89,7 @@ void testVol2col() { vol2col(*context, input, output_cfo, stride, stride, stride, padding, padding, padding); + float vol_2_col[] = {0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}; float* out_cfo_ptr; if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output_cfo.data(); @@ -97,24 +98,12 @@ void testVol2col() { out_cfo_ptr = output_tmp.data(); } - EXPECT_EQ(out_cfo_ptr[0], 0); - EXPECT_EQ(out_cfo_ptr[1], 1); - EXPECT_EQ(out_cfo_ptr[2], 1); - EXPECT_EQ(out_cfo_ptr[3], 2); - EXPECT_EQ(out_cfo_ptr[4], 3); - EXPECT_EQ(out_cfo_ptr[5], 4); - EXPECT_EQ(out_cfo_ptr[6], 4); - EXPECT_EQ(out_cfo_ptr[7], 5); - EXPECT_EQ(out_cfo_ptr[8], 6); - EXPECT_EQ(out_cfo_ptr[9], 7); - EXPECT_EQ(out_cfo_ptr[10], 7); - EXPECT_EQ(out_cfo_ptr[11], 8); - EXPECT_EQ(out_cfo_ptr[12], 9); - EXPECT_EQ(out_cfo_ptr[13], 10); - EXPECT_EQ(out_cfo_ptr[14], 10); - EXPECT_EQ(out_cfo_ptr[15], 11); + for (int i = 0; i < 16; ++i) { + EXPECT_EQ(out_cfo_ptr[i], vol_2_col[i]); + } // Col2Vol test + float col_2_vol[] = {0, 2, 2, 3, 8, 5, 6, 14, 8, 9, 20, 11}; memset(input_ptr, 0, 12 * sizeof(float)); if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; @@ -134,18 +123,9 @@ void testVol2col() { in_cfo_ptr = input_tmp.data(); } - EXPECT_EQ(in_cfo_ptr[0], 0); - EXPECT_EQ(in_cfo_ptr[1], 2); - EXPECT_EQ(in_cfo_ptr[2], 2); - EXPECT_EQ(in_cfo_ptr[3], 3); - EXPECT_EQ(in_cfo_ptr[4], 8); - EXPECT_EQ(in_cfo_ptr[5], 5); - EXPECT_EQ(in_cfo_ptr[6], 6); - EXPECT_EQ(in_cfo_ptr[7], 14); - EXPECT_EQ(in_cfo_ptr[8], 8); - EXPECT_EQ(in_cfo_ptr[9], 9); - EXPECT_EQ(in_cfo_ptr[10], 20); - EXPECT_EQ(in_cfo_ptr[11], 11); + for (int i = 0; i < 12; ++i) { + EXPECT_EQ(in_cfo_ptr[i], col_2_vol[i]); + } } TEST(math, vol2col) { From 1d41a6d45a917010f21088d1917040565649f2a5 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 11 Oct 2017 10:11:45 +0800 Subject: [PATCH 335/342] update paddle/operators/math/CMakeLists.txt --- paddle/operators/math/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index 575e89eed8..d32924db85 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -3,13 +3,13 @@ if(WITH_GPU) nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator) - nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS cblas device_context operator) + nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS device_context operator) else() cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc DEPS cblas device_context operator) cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_library(softmax SRCS softmax.cc DEPS operator) cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator) - cc_library(vol2col SRCS vol2col.cc DEPS cblas device_context operator) + cc_library(vol2col SRCS vol2col.cc DEPS device_context operator) endif() From 1397e17f6b1fe1088af6ab3117eb7b6c5f4adea3 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Tue, 10 Oct 2017 19:50:34 -0700 Subject: [PATCH 336/342] Implemented the hardShrink activation (#4653) * Implemented the hardShrink activation * Fixing the unit test --- paddle/operators/activation_op.cc | 21 ++++++++++ paddle/operators/activation_op.h | 38 +++++++++++++++++-- .../v2/framework/tests/test_activation_op.py | 20 ++++++++++ 3 files changed, 76 insertions(+), 3 deletions(-) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index a6bb738af3..61a201b6cd 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -137,6 +137,24 @@ class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker { } }; +template +class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker { + public: + HardShrinkOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of HardShrink operator"); + AddOutput("Y", "Output of HardShrink operator"); + AddComment( + "HardShrink activation operator, " + "hard_shrink(x) = x if x > lambda" + "hard_shrink(x) = x if x < -lambda" + "hard_shrink(x) = 0 otherwise"); + AddAttr("threshold", "The value of threshold for HardShrink") + .SetDefault(static_cast(0.5)); + } +}; + class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { public: SqrtOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) @@ -357,6 +375,9 @@ REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker, pow_grad, REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker, stanh_grad, ops::ActivationOpGrad); +REGISTER_OP(hard_shrink, ops::ActivationOp, ops::HardShrinkOpMaker, + hard_shrink_grad, ops::ActivationOpGrad); + #define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor) \ REGISTER_OP_CPU_KERNEL( \ act_type, \ diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 70d5a62052..29f159bbae 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -199,6 +199,39 @@ struct TanhShrinkGradFunctor : public BaseActivationFunctor { } }; +// tanhshrink(x) = x - tanh(x) +// where tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) +template +struct HardShrinkFunctor : public BaseActivationFunctor { + float threshold; + + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } + template + void operator()(Device d, X x, Y y) const { + auto temp1 = (x < (threshold * -1)).template cast().eval(); + auto temp2 = (x > threshold).template cast().eval(); + y.device(d) = x * (temp1 + temp2); + } +}; + +template +struct HardShrinkGradFunctor : public BaseActivationFunctor { + float threshold; + + typename BaseActivationFunctor::AttrPair GetAttrs() { + return {{"threshold", &threshold}}; + } + + template + void operator()(Device d, X x, Y y, dY dy, dX dx) const { + auto temp1 = (x < (threshold * -1)).template cast().eval(); + auto temp2 = (x > threshold).template cast().eval(); + dx.device(d) = dy * (temp1 + temp2).template cast(); + } +}; + // softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < lambda; 0 // otherwise template @@ -351,8 +384,6 @@ template struct Relu6Functor : public BaseActivationFunctor { float threshold; - // NOTE: Explicit hides the `BaseActivationFunctor::GetAttrs` - // not polymorphism for speed. typename BaseActivationFunctor::AttrPair GetAttrs() { return {{"threshold", &threshold}}; } @@ -555,4 +586,5 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(relu6, Relu6Functor, Relu6GradFunctor); \ __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor); \ - __macro(elu, ELUFunctor, ELUGradFunctor) + __macro(elu, ELUFunctor, ELUGradFunctor); \ + __macro(hard_shrink, HardShrinkFunctor, HardShrinkGradFunctor) diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index 9157e00f6e..52e027bd54 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -78,6 +78,26 @@ class TestTanhShrink(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.008) +class TestHardShrink(OpTest): + def setUp(self): + self.op_type = "hard_shrink" + x = np.random.uniform(-1, 1, [4, 4]).astype("float32") + threshold = 0.5 + + self.inputs = {'X': x} + self.attrs = {'lambda': threshold} + + t = np.copy(x) + t[(t >= -threshold) & (t <= threshold)] = 0 + self.outputs = {'Y': t} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.005) + + class TestSoftShrink(OpTest): def setUp(self): self.op_type = "softshrink" From 696874ac6ee1b2b284d9817988aa4c99f74c0c76 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Tue, 10 Oct 2017 19:54:01 -0700 Subject: [PATCH 337/342] Optimizer Design (#4656) * init optimizer design * fix index * optimize the interface * add a link to python_api.md * optimize the code of Optimizer --- doc/design/optimizer.md | 105 +++++++++++++++++++++++++++++++++++++++ doc/design/python_api.md | 4 ++ 2 files changed, 109 insertions(+) create mode 100644 doc/design/optimizer.md diff --git a/doc/design/optimizer.md b/doc/design/optimizer.md new file mode 100644 index 0000000000..17440fae50 --- /dev/null +++ b/doc/design/optimizer.md @@ -0,0 +1,105 @@ +## Optimizer Design + +### The Problem + +A PaddlePaddle program, or a block, is a sequence of operators operating variables. A training program needs to do three kinds of works: + +1. the forward pass, which computes intermediate results and the cost(s), +1. the backward pass, which derives gradients from intermediate results and costs, and +1. the optimization pass, which update model parameters to optimize the cost(s). + +These works rely on three kinds of operators: + +1. forward operators, +1. gradient operators, and +1. optimization operators. + +It's true that users should be able to create all these operators manually by calling some low-level API, but it would be much more convenient if they could only describe the forward pass and let PaddlePaddle create the backward and optimization operators automatically. + +In this design, we propose a high-level API that automatically derives the optimisation pass and operators from the forward pass. + + +### High-level Python API to describe the training process + +1. User write code to describe the network: + + ```python + images = layer.data("images") + labels = layer.data("labels") + w1 = pd.var("w1") + b1 = pd.var("b1") + hidden = layer.fc(images, w=w1, b=b1) + cost = layer.mse(hidden, labels) + ``` + + The above code snippet will create forward operators in [Block](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md). + + +2. Users create a certain kind of Optimizer with some argument. + + ```python + optimizer = AdagradOptimizer(learing_rate=0.001) + ``` + +3. Users use the optimizer to `minimize` a certain `cost` through updating parameters in parameter_list. + + ```python + opt_op_list = optimizer.minimize(cost, parameter_list=[w1, b1]) + ``` + The above code snippet will create gradient and optimization operators in Block. The return value of `minimize()` is list of optimization operators that will be run by session. + +4. Users use Session/Executor to run this opt_op_list as target to do training. + + ```python + sess.run(target= opt_op_list, ...) + ``` + +#### Optimizer Python interface: + +```python +class Optimizer(object): + """Optimizer Base class. + + """ + + def __init__(self): + pass + + def create_backward_pass(self, loss, parameter_list=None): + """ + create and add gradient Operators in BlockDesc to Compute gradients of `loss` + for parameters in parameter_list + + Args: + loss: an variable generated by cost function. + parameter_list: parameters that need to compute gradient and update to optimize the lost. + + Returns: + list of (parameters, gradients) pair. + """ + return None + + def create_optimization_pass(self, parameters_and_grads): + """Add optimization operators to update gradients to variables. + + Args: + parameters_and_grads: a list of (variable, gradient) pair to update. + + Returns: + optmization_op_list: a list of optimization operator that will update parameter using gradient. + """ + return None + + def minimize(self, loss, parameter_list): + """Add operations to minimize `loss` by updating `parameter_list`. + + This method combines interface `create_backward_pass()` and + `create_optimization_pass()` into one. + """ + params_grads = self.create_backward_pass(loss, parameter_list) + update_ops = self.create_optimization_pass(params_grads) + return update_ops + +``` + +Users can inherit the Optimizer above to create their own Optimizer with some special logic, such as AdagradOptimizer. diff --git a/doc/design/python_api.md b/doc/design/python_api.md index c4665e44fc..56ae1d925a 100644 --- a/doc/design/python_api.md +++ b/doc/design/python_api.md @@ -214,3 +214,7 @@ def fc_layer(input, size, ...): out.writer = op return out ``` + +## Optimizer + +[Optimizer Design Doc](./optimizer.md) From 23407e7a649d07d0b92219d926179f1dcf6f9326 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 10 Oct 2017 19:57:44 -0700 Subject: [PATCH 338/342] Design doc of SelectedRows (#4652) * Design doc of SelectedRows * Follow comments * Update protobuf message * Follow comments, seperate LoDTensorDesc and SelectedRows Desc --- doc/design/selected_rows.md | 74 +++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 doc/design/selected_rows.md diff --git a/doc/design/selected_rows.md b/doc/design/selected_rows.md new file mode 100644 index 0000000000..9e6f3b20cb --- /dev/null +++ b/doc/design/selected_rows.md @@ -0,0 +1,74 @@ +# Design Doc: Selected Rows + +`SelectedRows` is a kind of sparse tensor data type, which is designed to support `embedding` operators. The gradient of embedding table is a sparse tensor. Only a few rows are non-zero values in that tensor. It is straightforward to represent the sparse tensor by the following sparse tensor data structure: + +```cpp +class SelectedRows { + private: + vector rows_; + Tensor value_; + int height_; +}; +``` + +The field `height_` shows the first dimension of `SelectedRows`. The `rows` are the indices of which rows of `SelectedRows` are non-zeros. The `value_` field is an N-dim tensor and shape is `[rows.size() /* NUM_ROWS */, ...]`, which supplies values for each row. The dimension of `SelectedRows` satisfies `[height_] + value_.shape[1:]`. + +Suppose that a SelectedRows-typed variable `x` has many rows, but only two of them have values -- row 73 is `[1, 2]` and row 84 is `[3, 4]`, the `SelectedRows` representation would be: + +``` +x = SelectedRow { + rows = [73, 84], + value = [[1, 2], [3,4]] +} +``` + + +## SelectedRows in Protobuf + +`SelectedRows` is a kind of `Variable`. `VarDesc` in protobuf should describe the `SelectedRows` information. Only the tensor dimension of a `SelectedRows` will be described in compile-time since the `rows_` and `value_` are related to training data. +So we use `TensorDesc` to unify `data_type` and `dims`. A LodTensorDesc contains a `TensorDesc` and `lod_level`. The description of `SelectedRows` is a Tensor description. + +```proto +message TensorDesc { + required DataType data_type = 1; + repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480] +} + +message LodTensorDesc { + required TensorDesc tensor = 1; + optional int lod_level = 2; +} + +message VarDesc { + required string name = 1; + enum VarType { + LOD_TENSOR = 0; + SELECTED_ROWS = 1; + } + required VarType type = 2; + optional LodTensorDesc lod_desc = 3; + optional TensorDesc selected_rows_desc = 4; + optional bool persistable = 5 [ default = false ]; +} +``` + +## InferShape for Selected Rows + +Just like `LoD` information, `InferShape` method will inference output tensor type as well. The operator should decide whether its output is a `SelectedRows` or `Dense` tensor. + +For example, the gradient operator of `TableLookup` will always generate `SelectedRows`. Its `InferShape` method should be like following + +```cpp +void TableLookupGrad::InferShape(context) { + ... + context.SetDataType("Embedding.Grad", kSelectedRows); +} +``` + + +## Sparse Operators + +There are several operators should be written to support `SelectedRows`. They are: + +1. Operators which generates `SelectedRows` gradient. e.g. Gradient of `TableLookupOp`. +2. Optimize operators which support `SelectedRows` gradient. e.g. `SGD` or `AdaGrad` for `SelectedRows`. However, there should be only one `SGD` operator. `OpWithKernel::Run` should select a suitable kernel for both `dense` tensor or `SelectedRows`. From f5ac335046feb81529e85cd0c386379746771157 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 11 Oct 2017 11:02:26 +0800 Subject: [PATCH 339/342] follow comments --- paddle/operators/math/CMakeLists.txt | 5 ++- paddle/operators/math/vol2col_test.cc | 47 +++++++++++++-------------- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index d32924db85..2fd559e90a 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -3,14 +3,13 @@ if(WITH_GPU) nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) nv_library(softmax SRCS softmax.cc softmax.cu DEPS operator) nv_library(cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator) - nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS device_context operator) + nv_library(vol2col SRCS vol2col.cc vol2col.cu DEPS device_context) else() cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc DEPS cblas device_context operator) cc_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) cc_library(softmax SRCS softmax.cc DEPS operator) cc_library(cross_entropy SRCS cross_entropy.cc DEPS operator) - cc_library(vol2col SRCS vol2col.cc DEPS device_context operator) - + cc_library(vol2col SRCS vol2col.cc DEPS device_context) endif() cc_test(im2col_test SRCS im2col_test.cc DEPS math_function tensor) diff --git a/paddle/operators/math/vol2col_test.cc b/paddle/operators/math/vol2col_test.cc index e3c599da87..81225e9a98 100644 --- a/paddle/operators/math/vol2col_test.cc +++ b/paddle/operators/math/vol2col_test.cc @@ -18,10 +18,9 @@ limitations under the License. */ template void testVol2col() { - paddle::framework::Tensor input_tmp; paddle::framework::Tensor input; - paddle::framework::Tensor output_cfo; - paddle::framework::Tensor output_ocf; + paddle::framework::Tensor input_tmp; + paddle::framework::Tensor output; paddle::framework::Tensor output_tmp; auto* place = new Place(); @@ -44,14 +43,14 @@ void testVol2col() { * [6, 7, 8, * 9, 10, 11]] * - * output_cfo = [0, 1 - * 1, 2 - * 3, 4 - * 4, 5 - * 6, 7 - * 7, 8 - * 9, 10 - * 10, 11] + * output = [0, 1 + * 1, 2 + * 3, 4 + * 4, 5 + * 6, 7 + * 7, 8 + * 9, 10 + * 10, 11] * * col2vol = [[0, 2, 2, * 3, 8, 5] @@ -81,20 +80,20 @@ void testVol2col() { } else { input.CopyFrom(input_tmp, *place); } - output_cfo.mutable_data({1, filter_size, filter_size, filter_size, - output_depth, output_height, output_width}, - *place); + output.mutable_data({1, filter_size, filter_size, filter_size, + output_depth, output_height, output_width}, + *place); paddle::operators::math::Vol2ColFunctor vol2col; - vol2col(*context, input, output_cfo, stride, stride, stride, padding, padding, + vol2col(*context, input, output, stride, stride, stride, padding, padding, padding); float vol_2_col[] = {0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}; float* out_cfo_ptr; if (paddle::platform::is_cpu_place(*place)) { - out_cfo_ptr = output_cfo.data(); + out_cfo_ptr = output.data(); } else { - output_tmp.CopyFrom(output_cfo, paddle::platform::CPUPlace()); + output_tmp.CopyFrom(output, paddle::platform::CPUPlace()); out_cfo_ptr = output_tmp.data(); } @@ -112,25 +111,25 @@ void testVol2col() { } paddle::operators::math::Col2VolFunctor col2vol; - col2vol(*context, input, output_cfo, stride, stride, stride, padding, padding, + col2vol(*context, input, output, stride, stride, stride, padding, padding, padding); - float* in_cfo_ptr; + float* in_ptr; if (paddle::platform::is_cpu_place(*place)) { - in_cfo_ptr = input.data(); + in_ptr = input.data(); } else { input_tmp.CopyFrom(input, paddle::platform::CPUPlace()); - in_cfo_ptr = input_tmp.data(); + in_ptr = input_tmp.data(); } for (int i = 0; i < 12; ++i) { - EXPECT_EQ(in_cfo_ptr[i], col_2_vol[i]); + EXPECT_EQ(in_ptr[i], col_2_vol[i]); } } TEST(math, vol2col) { testVol2col(); -#ifndef PADDLE_ONLY_CPU +#ifdef PADDLE_WITH_CUDA testVol2col(); -#endif +#endif // PADDLE_WITH_CUDA } From 7454ec0400429676edaa46578b024ff4bd4c028e Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 11 Oct 2017 03:48:43 +0000 Subject: [PATCH 340/342] Simplify backward when inserting a sum operator to accumulate all duplicated variables. --- paddle/framework/backward.cc | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 0a4688db9c..063b108500 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -172,30 +172,14 @@ static std::unique_ptr BackwardRecursive( std::to_string(i)); net->ops_[op_offset]->Rename(name, dup_outputs.back()); } - // collect all the offset to append `add` op for each alias - // - // one variable is shared between multiple operators. - // insert add operator one by one, then add it to output - for (size_t output_idx = 0; output_idx < dup_outputs.size() - 1; - ++output_idx) { - auto insert_add_x = dup_outputs[output_idx]; - auto insert_add_y = dup_outputs[output_idx + 1]; - auto insert_add_out = name + "@SHARED@" + std::to_string(output_idx); - // first add op inserted - if (output_idx == dup_outputs.size() - 2) { - insert_add_out = name; - } - if (output_idx != 0) { - insert_add_y = name + "@SHARED@" + std::to_string(output_idx - 1); - } - insert_position.push_back( - {dup_op.back(), - OpRegistry::CreateOp("sum", {{"X", {insert_add_x, insert_add_y}}}, - {{"Out", {insert_add_out}}}, {})}); - } + // collect all the offset for each alias, + // insert a sum operator to add all aliases to output + insert_position.push_back( + {dup_op.back(), OpRegistry::CreateOp("sum", {{"X", dup_outputs}}, + {{"Out", {name}}}, {})}); } - // make sure the inserted `add` ops follow the BFS order. + // make sure the inserted `sum` ops follow the BFS order. insert_position.sort( [](const Pos& l, const Pos& r) { return l.first > r.first; }); From 9995aed114148ff96b8e06f1548cff0445fce628 Mon Sep 17 00:00:00 2001 From: kexinzhao <19hskevin87@gmail.com> Date: Tue, 10 Oct 2017 21:14:48 -0700 Subject: [PATCH 341/342] Implementing Softplus operator (#4690) * implementing softplus * small fix * small fix * small fix * small fix --- paddle/operators/activation_op.cc | 14 ++++++++++ paddle/operators/activation_op.h | 28 +++++++++++++++++++ .../v2/framework/tests/test_activation_op.py | 15 ++++++++++ 3 files changed, 57 insertions(+) diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index 61a201b6cd..ced14a8923 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -206,6 +206,17 @@ class SquareOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SoftplusOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Softplus operator"); + AddOutput("Y", "Output of Softplus operator"); + AddComment("Softplus activation operator, softplus(x) = log(1 + exp(x))"); + } +}; + class SoftsignOpMaker : public framework::OpProtoAndCheckerMaker { public: SoftsignOpMaker(framework::OpProto *proto, @@ -351,6 +362,9 @@ REGISTER_OP(log, ops::ActivationOp, ops::LogOpMaker, log_grad, REGISTER_OP(square, ops::ActivationOp, ops::SquareOpMaker, square_grad, ops::ActivationOpGrad); +REGISTER_OP(softplus, ops::ActivationOp, ops::SoftplusOpMaker, softplus_grad, + ops::ActivationOpGrad); + REGISTER_OP(softsign, ops::ActivationOp, ops::SoftsignOpMaker, softsign_grad, ops::ActivationOpGrad); diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h index 29f159bbae..f88c9c48eb 100644 --- a/paddle/operators/activation_op.h +++ b/paddle/operators/activation_op.h @@ -407,6 +407,33 @@ struct Relu6GradFunctor : public BaseActivationFunctor { } }; +// softplus(x) = log(1 + exp(x)) +// When x is a very large positive number, exp(x) may explode to inf, +// Using trick below for numerical stability +// https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/ +// Then: softplus(x) = max(x, 0) + log(exp(-max(x, 0)) + exp(x - max(x, 0))) +template +struct SoftplusFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y) { + auto temp = x.cwiseMax(static_cast(0)); // temp = max(x, 0) + y.device(d) = temp + (((-temp).exp() + (x - temp).exp()).log()); + } +}; + +// d(softplus(x))/dx = exp(x) / (1 + exp(x)) +// For numerical stability: +// d(softplus(x))/dx = exp(x - max(x, 0)) / (exp(-max(x, 0)) + +// exp(x - max(x, 0))) +template +struct SoftplusGradFunctor : public BaseActivationFunctor { + template + void operator()(Device d, X x, Y y, dY dy, dX dx) { + auto temp = x.cwiseMax(static_cast(0)); // temp = max(x, 0) + dx.device(d) = dy * ((x - temp).exp() / ((-temp).exp() + (x - temp).exp())); + } +}; + // softsign(x) = x / (1 + |x|) template struct SoftsignFunctor : public BaseActivationFunctor { @@ -582,6 +609,7 @@ struct STanhGradFunctor : public BaseActivationFunctor { __macro(soft_relu, SoftReluFunctor, SoftReluGradFunctor); \ __macro(pow, PowFunctor, PowGradFunctor); \ __macro(stanh, STanhFunctor, STanhGradFunctor); \ + __macro(softplus, SoftplusFunctor, SoftplusGradFunctor); \ __macro(softsign, SoftsignFunctor, SoftsignGradFunctor); \ __macro(relu6, Relu6Functor, Relu6GradFunctor); \ __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor); \ diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py index 52e027bd54..a28c4431e1 100644 --- a/python/paddle/v2/framework/tests/test_activation_op.py +++ b/python/paddle/v2/framework/tests/test_activation_op.py @@ -331,6 +331,21 @@ class TestSTanh(OpTest): self.check_grad(['X'], 'Y', max_relative_error=0.007) +class TestSoftplus(OpTest): + def setUp(self): + self.op_type = "softplus" + self.inputs = { + 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") + } + self.outputs = {'Y': np.log(1 + np.exp(self.inputs['X']))} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Y', max_relative_error=0.007) + + class TestSoftsign(OpTest): def setUp(self): self.op_type = "softsign" From 82a2b1a92db573f0021d145a96f4bddbaf0606e8 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 11 Oct 2017 15:29:48 +0800 Subject: [PATCH 342/342] fix Compile error [fatal error: boost/range/adaptor/reversed.hpp No such file or directory] --- paddle/framework/executor.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 886e9ab33e..c388b2198e 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -24,8 +24,6 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/scope.h" -#include - namespace paddle { namespace framework {