From 5be454bf330864c7cf5c4a331ded9c0a9c211cad Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Fri, 8 Jun 2018 16:51:06 +0800 Subject: [PATCH 01/56] polish docs --- .../framework/details/fuse_vars_op_handle.cc | 2 +- paddle/fluid/operators/crf_decoding_op.cc | 16 ++--- paddle/fluid/operators/roi_pool_op.cc | 10 +++ paddle/fluid/operators/scale_op.cc | 1 + python/paddle/fluid/layers/io.py | 31 ++++++++- python/paddle/fluid/layers/nn.py | 67 ++++++++++--------- python/paddle/fluid/layers/ops.py | 1 + 7 files changed, 86 insertions(+), 42 deletions(-) diff --git a/paddle/fluid/framework/details/fuse_vars_op_handle.cc b/paddle/fluid/framework/details/fuse_vars_op_handle.cc index 32415c192f..018c9bff71 100644 --- a/paddle/fluid/framework/details/fuse_vars_op_handle.cc +++ b/paddle/fluid/framework/details/fuse_vars_op_handle.cc @@ -42,7 +42,7 @@ void FuseVarsOpHandle::RunImpl() { out_t->ShareDataWith(out_tensor->Slice(s, s + numel)); s += numel; } - this->RunAndRecordEvent([this] {}); + this->RunAndRecordEvent([] {}); } std::string FuseVarsOpHandle::Name() const { return "fuse vars"; } diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index 40f43936db..a8d7831122 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -54,20 +54,20 @@ The output of this operator changes according to whether Input(Label) is given: 1. Input(Label) is given: -This happens in training. This operator is used to co-work with the chunk_eval -operator. + This happens in training. This operator is used to co-work with the chunk_eval + operator. -When Input(Label) is given, the crf_decoding operator returns a row vector -with shape [N x 1] whose values are fixed to be 0, indicating an incorrect -prediction, or 1 indicating a tag is correctly predicted. Such an output is the -input to chunk_eval operator. + When Input(Label) is given, the crf_decoding operator returns a row vector + with shape [N x 1] whose values are fixed to be 0, indicating an incorrect + prediction, or 1 indicating a tag is correctly predicted. Such an output is the + input to chunk_eval operator. 2. Input(Label) is not given: -This is the standard decoding process. + This is the standard decoding process. The crf_decoding operator returns a row vector with shape [N x 1] whose values -range from 0 to maximum tag number - 1. Each element indicates an index of a +range from 0 to maximum tag number - 1, Each element indicates an index of a predicted tag. )DOC"); } diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 293abb0ea4..cb8982e57f 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -141,6 +141,16 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( ROIPool operator +Region of interest pooling (also known as RoI pooling) is to perform +is to perform max pooling on inputs of nonuniform sizes to obtain +fixed-size feature maps (e.g. 7*7). + +The operator has three steps: +1. Dividing each region proposal into equal-sized sections with + the pooled_width and pooled_height +2. Finding the largest value in each section +3. Copying these max values to the output buffer + ROI Pooling for Faster-RCNN. The link below is a further introduction: https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn )DOC"); diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index 4687e21e71..24c217adcb 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -42,6 +42,7 @@ class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(Tensor) Output tensor of scale operator."); AddComment(R"DOC( Scale operator +Multiply the input tensor with a float scalar to scale the input tensor. $$Out = scale*X$$ )DOC"); diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index a56f3ea9db..cff074faff 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -108,10 +108,35 @@ class BlockGuardServ(BlockGuard): class ListenAndServ(object): """ - ListenAndServ class. + ListenAndServ layer. - ListenAndServ class is used to wrap listen_and_serv op to create a server - which can receive variables from clients and run a block. + ListenAndServ is used to create a rpc server bind and listen + on specific TCP port, this server will run the sub-block when + received variables from clients. + + Args: + endpoint(string): IP:port string which the server will listen on. + inputs(list): a list of variables that the server will get from clients. + fan_in(int): how many client are expected to report to this server, default: 1. + optimizer_mode(bool): whether to run the server as a parameter server, default: True. + + Examples: + .. code-block:: python + + with fluid.program_guard(main): + serv = layers.ListenAndServ( + "127.0.0.1:6170", ["X"], optimizer_mode=False) + with serv.do(): + x = layers.data( + shape=[32, 32], + dtype='float32', + name="X", + append_batch_size=False) + fluid.initializer.Constant(value=1.0)(x, main.global_block()) + layers.scale(x=x, scale=10.0, out=out_var) + + self.server_exe = fluid.Executor(place) + self.server_exe.run(main) """ def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ddaeb415af..a1b3503746 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -869,10 +869,17 @@ def crf_decoding(input, param_attr, label=None): return viterbi_path +@templatedoc() def cos_sim(X, Y): """ - This function performs the cosine similarity between two tensors - X and Y and returns that as the output. + ${comment} + + Args: + X(${X_type}): ${X_comment} + Y(${Y_type}): ${Y_comment} + + Returns: + A Variable contains the output of this layer. """ helper = LayerHelper('cos_sim', **locals()) out = helper.create_tmp_variable(dtype=X.dtype) @@ -1059,14 +1066,25 @@ def square_error_cost(input, label): return square_out +@templatedoc() def chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None): """ - This function computes and outputs the precision, recall and - F1-score of chunk detection. + ${comment} + + Args: + input(Variable): ${Inference_comment} + label(Variable): ${Label_comment} + chunk_scheme(${chunk_scheme_type}): ${chunk_scheme_comment} + num_chunk_types(${num_chunk_types_type}): ${num_chunk_types_comment} + excluded_chunk_types(${excluded_chunk_types_type}): ${excluded_chunk_types_comment} + + Returns(typle): a tuple of variables: + (precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks) + """ helper = LayerHelper("chunk_eval", **locals()) @@ -1737,6 +1755,7 @@ def beam_search_decode(ids, scores, name=None): return sentence_ids, sentence_scores +@templatedoc() def conv2d_transpose(input, num_filters, output_size=None, @@ -1760,7 +1779,7 @@ def conv2d_transpose(input, Parameters(dilations, strides, paddings) are two elements. These two elements represent height and width, respectively. The details of convolution transpose layer, please refer to the following explanation and references - `therein `_. + `here `_. For each input :math:`X`, the equation is: @@ -1774,7 +1793,7 @@ def conv2d_transpose(input, * :math:`W`: Filter value, a tensor with MCHW format. * :math:`\\ast` : Convolution transpose operation. * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be - different. + different. Example: @@ -2781,6 +2800,7 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None, def ctc_greedy_decoder(input, blank, name=None): """ This op is used to decode sequences by greedy policy by below steps: + 1. Get the indexes of max value for each row in input. a.k.a. numpy.argmax(input, axis=0). 2. For each sequence in result of step1, merge repeated tokens between two @@ -3451,8 +3471,9 @@ def one_hot(input, depth): def autoincreased_step_counter(counter_name=None, begin=1, step=1): """ - NOTE: The counter will be automatically increased by 1 every mini-batch - Return the run counter of the main program, which is started with 1. + Create an auto-increase variable + which will be automatically increased by 1 every mini-batch + Return the run counter of the main program, default is started from 1. Args: counter_name(str): The counter name, default is '@STEP_COUNTER@'. @@ -3866,34 +3887,20 @@ def label_smooth(label, return smooth_label +@templatedoc() def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): """ - Region of interest pooling (also known as RoI pooling) is to perform - is to perform max pooling on inputs of nonuniform sizes to obtain - fixed-size feature maps (e.g. 7*7). - The operator has three steps: - 1. Dividing each region proposal into equal-sized sections with - the pooled_width and pooled_height - 2. Finding the largest value in each section - 3. Copying these max values to the output buffer + ${comment} Args: - input (Variable): The input for ROI pooling. - rois (Variable): ROIs (Regions of Interest) to pool over. It should - be a 2-D one level LoTensor of shape [num_rois, 4]. - The layout is [x1, y1, x2, y2], where (x1, y1) - is the top left coordinates, and (x2, y2) is the - bottom right coordinates. The num_rois is the - total number of ROIs in this batch data. - pooled_height (integer): The pooled output height. Default: 1 - pooled_width (integer): The pooled output width. Default: 1 - spatial_scale (float): Multiplicative spatial scale factor. To - translate ROI coords from their input scale - to the scale used when pooling. Default: 1.0 + input (Variable): ${X_comment} + rois (Variable): ${ROIs_comment} + pooled_height (integer): ${pooled_height_comment} Default: 1 + pooled_width (integer): ${pooled_width_comment} Default: 1 + spatial_scale (float): ${spatial_scale_comment} Default: 1.0 Returns: - pool_out (Variable): The output is a 4-D tensor of the shape - (num_rois, channels, pooled_h, pooled_w). + pool_out (Variable): ${Out_comment}. Examples: .. code-block:: python diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 69cfde852d..24b8b86dcf 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -73,6 +73,7 @@ __all__ = [ 'sum', 'polygon_box_transform', 'shape', + 'iou_similarity', ] + __activations__ for _OP in set(__all__): From efcff3d9e50ef75e854e8945c3b829e94ddffa50 Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Fri, 8 Jun 2018 17:48:14 +0800 Subject: [PATCH 02/56] polish api ref docs --- doc/fluid/api/layers.rst | 6 ++++ .../operators/detection/iou_similarity_op.cc | 7 +++-- paddle/fluid/operators/roi_pool_op.cc | 1 + python/paddle/fluid/layers/io.py | 4 +-- python/paddle/fluid/layers/nn.py | 28 +++++++++++++------ 5 files changed, 32 insertions(+), 14 deletions(-) diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index f78e6db326..ef8ada407b 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -790,6 +790,12 @@ shape .. autofunction:: paddle.fluid.layers.shape :noindex: +iou_similarity +----- + +.. autofunction:: paddle.fluid.layers.iou_similarity + :noindex: + sigmoid ------- diff --git a/paddle/fluid/operators/detection/iou_similarity_op.cc b/paddle/fluid/operators/detection/iou_similarity_op.cc index 8e58605fce..b08c2cdf53 100644 --- a/paddle/fluid/operators/detection/iou_similarity_op.cc +++ b/paddle/fluid/operators/detection/iou_similarity_op.cc @@ -69,10 +69,11 @@ class IOUSimilarityOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( IOU Similarity Operator. + Computes intersection-over-union (IOU) between two box lists. - Box list 'X' should be a LoDTensor and 'Y' is a common Tensor, - boxes in 'Y' are shared by all instance of the batched inputs of X. - Given two boxes A and B, the calculation of IOU is as follows: +Box list 'X' should be a LoDTensor and 'Y' is a common Tensor, +boxes in 'Y' are shared by all instance of the batched inputs of X. +Given two boxes A and B, the calculation of IOU is as follows: $$ IOU(A, B) = diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index cb8982e57f..a6247a467a 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -146,6 +146,7 @@ is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7). The operator has three steps: + 1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height 2. Finding the largest value in each section diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index cff074faff..ecfaf35296 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -135,8 +135,8 @@ class ListenAndServ(object): fluid.initializer.Constant(value=1.0)(x, main.global_block()) layers.scale(x=x, scale=10.0, out=out_var) - self.server_exe = fluid.Executor(place) - self.server_exe.run(main) + exe = fluid.Executor(place) + exe.run(main) """ def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a1b3503746..10dba6cafd 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -850,7 +850,9 @@ def crf_decoding(input, param_attr, label=None): Args: input(${emission_type}): ${emission_comment} + param_attr(ParamAttr): The parameter attribute for training. + label(${label_type}): ${label_comment} Returns: @@ -875,8 +877,8 @@ def cos_sim(X, Y): ${comment} Args: - X(${X_type}): ${X_comment} - Y(${Y_type}): ${Y_comment} + X(${x_type}): ${x_comment} + Y(${y_type}): ${x_comment} Returns: A Variable contains the output of this layer. @@ -1076,13 +1078,18 @@ def chunk_eval(input, ${comment} Args: - input(Variable): ${Inference_comment} - label(Variable): ${Label_comment} + input(Variable): ${inference_comment} + + label(Variable): ${label_comment} + chunk_scheme(${chunk_scheme_type}): ${chunk_scheme_comment} + num_chunk_types(${num_chunk_types_type}): ${num_chunk_types_comment} + excluded_chunk_types(${excluded_chunk_types_type}): ${excluded_chunk_types_comment} - Returns(typle): a tuple of variables: + Returns: + chunk_eval(tuple): a tuple of variables: (precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks) """ @@ -1755,7 +1762,6 @@ def beam_search_decode(ids, scores, name=None): return sentence_ids, sentence_scores -@templatedoc() def conv2d_transpose(input, num_filters, output_size=None, @@ -3893,14 +3899,18 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): ${comment} Args: - input (Variable): ${X_comment} - rois (Variable): ${ROIs_comment} + input (Variable): ${x_comment} + + rois (Variable): ROIs (Regions of Interest) to pool over. + pooled_height (integer): ${pooled_height_comment} Default: 1 + pooled_width (integer): ${pooled_width_comment} Default: 1 + spatial_scale (float): ${spatial_scale_comment} Default: 1.0 Returns: - pool_out (Variable): ${Out_comment}. + roi_pool (Variable): ${out_comment}. Examples: .. code-block:: python From e2433207d3575d46cd4a503d98b763252861e3a7 Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Mon, 11 Jun 2018 14:44:23 +0800 Subject: [PATCH 03/56] update --- python/paddle/fluid/layers/nn.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 4485e91237..5f9b1db0f2 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1079,17 +1079,13 @@ def chunk_eval(input, Args: input(Variable): ${inference_comment} - label(Variable): ${label_comment} - chunk_scheme(${chunk_scheme_type}): ${chunk_scheme_comment} - num_chunk_types(${num_chunk_types_type}): ${num_chunk_types_comment} - excluded_chunk_types(${excluded_chunk_types_type}): ${excluded_chunk_types_comment} Returns: - chunk_eval(tuple): a tuple of variables: + tuple: a tuple of variables: (precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks) """ @@ -3900,13 +3896,9 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): Args: input (Variable): ${x_comment} - rois (Variable): ROIs (Regions of Interest) to pool over. - pooled_height (integer): ${pooled_height_comment} Default: 1 - pooled_width (integer): ${pooled_width_comment} Default: 1 - spatial_scale (float): ${spatial_scale_comment} Default: 1.0 Returns: From 8e19c324ab82feeea87cd582737ebae5bec95fb8 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 12:47:11 +0800 Subject: [PATCH 04/56] update split_lod_tensor, create_array and array_length doc --- python/paddle/fluid/layers/control_flow.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 80e8ff484a..114c1f0ed4 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -62,6 +62,8 @@ def split_lod_tensor(input, mask, level=0): The output is the true branch and the false branch with the mask applied to the input at a certain level in the tensor. + Mainly used in IfElse to split data into two parts. Related API: IfElse. + Args: input(tuple|list|None): The input tensor that contains complete lod information needed to construct the output. @@ -83,6 +85,7 @@ def split_lod_tensor(input, mask, level=0): out_true, out_false = layers.split_lod_tensor( input=x, mask=y, level=level) + """ helper = LayerHelper('split_lod_tensor', **locals()) out_true = helper.create_tmp_variable(dtype=input.dtype) @@ -887,14 +890,18 @@ def array_write(x, i, array=None): def create_array(dtype): - """This function creates an array of type :math:`LOD_TENSOR_ARRAY` using the - LayerHelper. + """ + **Create LoDTensor Array** + + This function creates an array of type :math:`LOD_TENSOR_ARRAY` using the + LayerHelper. It is mainly used to implement RNN with array_write, array_read + and While. Args: dtype (int|float): The data type of the elements in the array. Returns: - Variable: The tensor variable storing the elements of data type. + Variable: The lod_tensor_array variable storing the elements of data type. Examples: .. code-block:: python @@ -1020,9 +1027,14 @@ def shrink_memory(x, i, table): def array_length(array): - """This function performs the operation to find the length of the input + """ + **Get the length of Input LoDTensorArray** + + This function performs the operation to find the length of the input LOD_TENSOR_ARRAY. + Related API: array_read, array_write, While. + Args: array (LOD_TENSOR_ARRAY): The input array that will be used to compute the length. From 2c1e2caa7d8c225040fdc3674df72afd7313f219 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 13:35:00 +0800 Subject: [PATCH 05/56] update document --- python/paddle/fluid/layers/control_flow.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 114c1f0ed4..15f294698c 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -60,15 +60,14 @@ def split_lod_tensor(input, mask, level=0): This function takes in an input that contains the complete lod information, and takes in a mask which is used to mask certain parts of the input. The output is the true branch and the false branch with the mask applied to - the input at a certain level in the tensor. - - Mainly used in IfElse to split data into two parts. Related API: IfElse. + the input at a certain level in the tensor. Mainly used in IfElse to split + data into two parts. Args: input(tuple|list|None): The input tensor that contains complete lod information needed to construct the output. mask(list): A bool column vector which masks the input. - level(int): The specific lod level to rank. + level(int): The specific lod level to split. Returns: Variable: The true branch of tensor as per the mask applied to input. @@ -108,8 +107,9 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): This function takes in an input :math:`x`, the True branch, the False branch and a binary :math:`mask`. Using this information, this function - merges the True and False branches of the tensor into a single Output - at a certain lod level indiacted by :math:`level`. + merges the True and False branches of the tensor into a single tensor as + output at a certain lod level indicated by :math:`level`. Used in IfElse + to merge the output if True block and False Block. Args: in_true(tuple|list|None): The True branch to be merged. @@ -117,7 +117,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): x(tuple|list|None): The input tensor that contains complete lod information needed to construct the output. mask(list): A bool column vector which masks the input. - level(int): The specific lod level to rank. + level(int): The specific lod level to merge. Returns: Variable: The merged output tensor. From 4d0fd7e725b259509896f6d891965feec7effee8 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 14:50:56 +0800 Subject: [PATCH 06/56] add API reference for create_tensor --- python/paddle/fluid/layers/tensor.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 62b01d595a..6ce486d70d 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -39,6 +39,25 @@ __all__ = [ def create_tensor(dtype, name=None, persistable=False): + """ + **Create a Tensor with certain data type and name** + + Args: + dtype (string): 'float32'|'int32'|..., the data type of the + created tensor. + name (string|None): The name of the created tensor, if not set, + the name will be a random unique one. + persistable (bool): Set the persistable flag of the create tensor, + default value is False. + + Returns: + Variable: The tensor variable storing the created tensor. + + Examples: + .. code-block:: python + + tensor = fluid.layers.create_tensor(dtype='float32') + """ helper = LayerHelper("create_tensor", **locals()) return helper.create_variable( name=helper.name, dtype=dtype, persistable=persistable) From 7b54b30be5c4f22f7a96d068f75c04ac04521057 Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Tue, 12 Jun 2018 16:08:26 +0800 Subject: [PATCH 07/56] follow comments --- paddle/fluid/operators/linear_chain_crf_op.cc | 2 + paddle/fluid/operators/lstm_op.cc | 28 +++---- python/paddle/fluid/layers/nn.py | 79 ++++--------------- 3 files changed, 30 insertions(+), 79 deletions(-) diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index a711da3627..ea1ca7f59d 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -84,6 +84,7 @@ CRF. Please refer to http://www.cs.columbia.edu/~mcollins/fb.pdf and http://cseweb.ucsd.edu/~elkan/250Bwinter2012/loglinearCRFs.pdf for details. Equation: + 1. Denote Input(Emission) to this operator as $x$ here. 2. The first D values of Input(Transition) to this operator are for starting weights, denoted as $a$ here. @@ -106,6 +107,7 @@ Finally, the linear chain CRF operator outputs the logarithm of the conditional likelihood of each training sample in a mini-batch. NOTE: + 1. The feature function for a CRF is made up of the emission features and the transition features. The emission feature weights are NOT computed in this operator. They MUST be computed first before this operator is called. diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index 4751e3e802..29cec6ae10 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -198,20 +198,20 @@ c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\ h_t = o_t \odot act_h(c_t) $$ -where the W terms denote weight matrices (e.g. $W_{xi}$ is the matrix -of weights from the input gate to the input), $W_{ic}, W_{fc}, W_{oc}$ -are diagonal weight matrices for peephole connections. In our implementation, -we use vectors to reprenset these diagonal weight matrices. The b terms -denote bias vectors ($b_i$ is the input gate bias vector), $\sigma$ -is the non-line activations, such as logistic sigmoid function, and -$i, f, o$ and $c$ are the input gate, forget gate, output gate, -and cell activation vectors, respectively, all of which have the same size as -the cell output activation vector $h$. - -The $\odot$ is the element-wise product of the vectors. $act_g$ and $act_h$ -are the cell input and cell output activation functions and `tanh` is usually -used for them. $\tilde{c_t}$ is also called candidate hidden state, -which is computed based on the current input and the previous hidden state. +- W terms denote weight matrices (e.g. $W_{xi}$ is the matrix + of weights from the input gate to the input), $W_{ic}, W_{fc}, W_{oc}$ + are diagonal weight matrices for peephole connections. In our implementation, + we use vectors to reprenset these diagonal weight matrices. +- The b terms denote bias vectors ($b_i$ is the input gate bias vector). +- $\sigma$ is the non-line activations, such as logistic sigmoid function. +- $i, f, o$ and $c$ are the input gate, forget gate, output gate, + and cell activation vectors, respectively, all of which have the same size as + the cell output activation vector $h$. +- The $\odot$ is the element-wise product of the vectors. +- $act_g$ and $act_h$ are the cell input and cell output activation functions + and `tanh` is usually used for them. +- $\tilde{c_t}$ is also called candidate hidden state, + which is computed based on the current input and the previous hidden state. Set `use_peepholes` False to disable peephole connection. The formula is omitted here, please refer to the paper diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 56d25e18f4..40134ed42f 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -262,6 +262,7 @@ def embedding(input, # TODO(qijun): expose H0 and C0 +@templatedoc(op_type="lstm") def dynamic_lstm(input, size, param_attr=None, @@ -274,64 +275,19 @@ def dynamic_lstm(input, dtype='float32', name=None): """ - **Dynamic LSTM Layer** - - The defalut implementation is diagonal/peephole connection - (https://arxiv.org/pdf/1402.1128.pdf), the formula is as follows: - - .. math:: - - i_t & = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) - - f_t & = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) - - \\tilde{c_t} & = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) - - o_t & = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) - - c_t & = f_t \odot c_{t-1} + i_t \odot \\tilde{c_t} - - h_t & = o_t \odot act_h(c_t) - - where the :math:`W` terms denote weight matrices (e.g. :math:`W_{xi}` is - the matrix of weights from the input gate to the input), :math:`W_{ic}, \ - W_{fc}, W_{oc}` are diagonal weight matrices for peephole connections. In - our implementation, we use vectors to reprenset these diagonal weight - matrices. The :math:`b` terms denote bias vectors (:math:`b_i` is the input - gate bias vector), :math:`\sigma` is the non-linear activations, such as - logistic sigmoid function, and :math:`i, f, o` and :math:`c` are the input - gate, forget gate, output gate, and cell activation vectors, respectively, - all of which have the same size as the cell output activation vector :math:`h`. - - The :math:`\odot` is the element-wise product of the vectors. :math:`act_g` - and :math:`act_h` are the cell input and cell output activation functions - and `tanh` is usually used for them. :math:`\\tilde{c_t}` is also called - candidate hidden state, which is computed based on the current input and - the previous hidden state. - - Set `use_peepholes` to `False` to disable peephole connection. The formula - is omitted here, please refer to the paper - http://www.bioinf.jku.at/publications/older/2604.pdf for details. - - Note that these :math:`W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}` - operations on the input :math:`x_{t}` are NOT included in this operator. - Users can choose to use fully-connect layer before LSTM layer. + ${comment} Args: - input(Variable): The input of dynamic_lstm layer, which supports - variable-time length input sequence. The underlying - tensor in this Variable is a matrix with shape - (T X 4D), where T is the total time steps in this - mini-batch, D is the hidden size. - size(int): 4 * hidden size. - param_attr(ParamAttr|None): The parameter attribute for the learnable + input (Variable): ${input_comment} + size (int): 4 * hidden size. + param_attr (ParamAttr|None): The parameter attribute for the learnable hidden-hidden weights. - Weights = {:math:`W_{ch}, W_{ih}, \ W_{fh}, W_{oh}`} - The shape is (D x 4D), where D is the hidden size. - bias_attr(ParamAttr|None): The bias attribute for the learnable bias + bias_attr (ParamAttr|None): The bias attribute for the learnable bias weights, which contains two parts, input-hidden bias weights and peephole connections weights if setting `use_peepholes` to `True`. @@ -343,21 +299,14 @@ def dynamic_lstm(input, - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \ W_{fc}, W_{oc}`}. - The shape is (1 x 7D). - use_peepholes(bool): Whether to enable diagonal/peephole connections, - default `True`. - is_reverse(bool): Whether to compute reversed LSTM, default `False`. - gate_activation(str): The activation for input gate, forget gate and - output gate. Choices = ["sigmoid", "tanh", "relu", - "identity"], default "sigmoid". - cell_activation(str): The activation for cell output. Choices = ["sigmoid", - "tanh", "relu", "identity"], default "tanh". - candidate_activation(str): The activation for candidate hidden state. - Choices = ["sigmoid", "tanh", - "relu", "identity"], - default "tanh". - dtype(str): Data type. Choices = ["float32", "float64"], default "float32". - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + use_peepholes (bool): ${use_peepholes_comment} + is_reverse (bool): ${is_reverse_comment} + gate_activation (str): ${gate_activation_comment} + cell_activation (str): ${cell_activation_comment} + candidate_activation (str): ${candidate_activation_comment} + dtype (str): Data type. Choices = ["float32", "float64"], default "float32". + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: tuple: The hidden state, and cell state of LSTM. The shape of both \ From d82422997a7c21a9d440fae18c6c60c84a5bff7a Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 20:07:17 +0800 Subject: [PATCH 08/56] add doc for batch norm --- python/paddle/fluid/layers/nn.py | 51 ++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9e2c06d26f..6719a4d7ec 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1541,8 +1541,55 @@ def batch_norm(input, moving_variance_name=None, do_model_average_for_mean_and_var=False): """ - This function helps create an operator to implement - the BatchNorm layer using the configurations from the input parameters. + **Batch Normalization Layer** + + Can be used as a normalizer function for conv2d and fully_connected operations. + The required data format for this layer is one of the following: + 1. NHWC `[batch, in_height, in_width, in_channels]` + 2. NCHW `[batch, in_channels, in_height, in_width]` + + Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift + `_ for more details. + + :math:`input` is the input features over a mini-batch. + + .. math:: + + \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ + \ mini-batch\ mean \\\\ + \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ + \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ + \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ + \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ + y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift + + Args: + input(variable): The input variable which is a LoDTensor. + act(string, default None): Activation type, linear|relu|prelu|... + is_test(bool, default False): Used for training or training. + momentum(float, default 0.9): + epsilon(float, default 1e-05): + param_attr(ParamAttr): The parameter attribute for Parameter `scale`. + bias_attr(ParamAttr): The parameter attribute for Parameter `bias`. + data_layout(string, default NCHW): NCHW|NHWC + in_place(bool, default False): Make the input and output of batch norm reuse memory. + use_mkldnn(bool, Default false): ${use_mkldnn_comment} + name(string, Default None): A name for this layer(optional). If set None, the layer + will be named automatically. + moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. + moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance. + do_model_average_for_mean_and_var(bool, Default False): + + Returns: + The sequence's last step variable which is a Tensor. + + Examples: + + .. code-block:: python + + hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + hidden2 = fluid.layers.batch_norm(input=hidden1) + """ helper = LayerHelper('batch_norm', **locals()) dtype = helper.input_dtype() From f3e631cd9e59741f3d2531706080e3a94c979f35 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 20:21:18 +0800 Subject: [PATCH 09/56] small update --- python/paddle/fluid/layers/nn.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1a010ab3ac..d5db75ebea 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4078,7 +4078,7 @@ def image_resize(input, name=None, resample='BILINEAR'): """ - Resize a batch of images. + **Resize a batch of images** The input must be a tensor of the shape (num_batches, channels, in_h, in_w), and the resizing only applies on the last two dimensions(hight and width). @@ -4208,6 +4208,8 @@ def image_resize_short(input, out_short_len, resample='BILINEAR'): def gather(input, index): """ + **Gather Layer** + Output is obtained by gathering entries of the outer-most dimension of X indexed by `index` and concatenate them together. From e72eb0edec589ce6bee07ec5eb34ee7ceca2af33 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 20:23:47 +0800 Subject: [PATCH 10/56] small update --- paddle/fluid/operators/detection/polygon_box_transform_op.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/detection/polygon_box_transform_op.cc b/paddle/fluid/operators/detection/polygon_box_transform_op.cc index 335e8dd470..568d50d457 100644 --- a/paddle/fluid/operators/detection/polygon_box_transform_op.cc +++ b/paddle/fluid/operators/detection/polygon_box_transform_op.cc @@ -83,11 +83,13 @@ class PolygonBoxTransformOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( PolygonBoxTransform Operator. + +PolygonBoxTransform Operator is used to transform the coordinate shift to the real coordinate. + The input is the final geometry output in detection network. We use 2*n numbers to denote the coordinate shift from n corner vertices of the polygon_box to the pixel location. As each distance offset contains two numbers (xi, yi), the geometry output contains 2*n channels. -PolygonBoxTransform Operator is used to transform the coordinate shift to the real coordinate. )DOC"); } }; From dde0a28073420134d4aae01d91511ead3d0c362a Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 20:51:56 +0800 Subject: [PATCH 11/56] add doc for Switch --- python/paddle/fluid/layers/control_flow.py | 25 +++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 15f294698c..7999ee0f80 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1132,6 +1132,28 @@ class ConditionalBlock(object): class Switch(object): + """ + **Switch Class** + + Many programming languages provide `switch` as a generalization of `if-elif-else`. + Switch class works just like a `if-elif-else`. + + The Semantics: + + 1. A `switch` control-flow checks cases one-by-one. + 1. The condition of each case is a boolean value, which is a scalar. + 1. It runs the first matched case, or the default case if there is one. + 1. Once it matches a case, it runs the corresponding branch and only that branch. + + Examples: + .. code-block:: python + + with control_flow.Switch() as switch: + with switch.case(global_step == zero_var): + tensor.assign(input=one_var, output=div_res) + + """ + def __init__(self, name=None): self.helper = LayerHelper('switch', name=name) self.inside_scope = False @@ -1161,7 +1183,8 @@ class Switch(object): return ConditionalBlockGuard(cond_block) def default(self): - """create a default case for this switch + """ + create a default case for this switch """ pre_cond_num = len(self.pre_not_conditions) if pre_cond_num == 0: From d76f8a8f5d06419f3db2647fec8956444ca7c1fe Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 12 Jun 2018 21:24:39 +0800 Subject: [PATCH 12/56] refine doc of polynomial_decay --- .../fluid/layers/learning_rate_scheduler.py | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 716cc7824e..2e5cff74c1 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -162,22 +162,27 @@ def polynomial_decay(learning_rate, end_learning_rate=0.0001, power=1.0, cycle=False): - """Applies polynomial decay to the initial learning rate. + """ + **polynomial_decay** + + Applies polynomial decay to the initial learning rate. + + .. code-block::python + + if cycle: + decay_steps = decay_steps * ceil(global_step / decay_steps) + else: + global_step = min(global_step, decay_steps) + decayed_learning_rate = (learning_rate - end_learning_rate) * + (1 - global_step / decay_steps) ^ power + end_learning_rate - >>> if cycle: - >>> decay_steps = decay_steps * ceil(global_step / decay_steps) - >>> else: - >>> global_step = min(global_step, decay_steps) - >>> decayed_learning_rate = (learning_rate - end_learning_rate) * - >>> (1 - global_step / decay_steps) ^ power + - >>> end_learning_rate Args: - learning_rate: A scalar float32 value or a Variable. This + learning_rate(Variable|float32): A scalar float32 value or a Variable. This will be the initial learning rate during training - decay_steps: A Python `int32` number. - end_learning_rate: A Python `float` number. - power: A Python `float` number - cycle: Boolean. If set true, decay the learning rate every decay_steps. + decay_steps(int32): A Python `int32` number. + end_learning_rate(float): A Python `float` number. + power(float): A Python `float` number + cycle(bool, Default False): Boolean. If set true, decay the learning rate every decay_steps. Returns: The decayed learning rate From c6c9c657e0c4d9d2c17129511f6a3ab30a190486 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 13 Jun 2018 15:35:23 +0800 Subject: [PATCH 13/56] update doc --- python/paddle/fluid/layers/control_flow.py | 36 ++++++--- .../fluid/layers/learning_rate_scheduler.py | 70 +++++++++++++----- python/paddle/fluid/layers/nn.py | 73 +++++++++++++------ python/paddle/fluid/layers/tensor.py | 42 +++++++---- 4 files changed, 155 insertions(+), 66 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 80e8ff484a..f4013b61d0 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -748,16 +748,25 @@ def max_sequence_len(rank_table): def lod_tensor_to_array(x, table): - """ Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY. + """ + Convert a LoDTensor to a LoDTensorArray. + + This function split a LoDTesnor to a LoDTensorArray according to its LoD + information. LoDTensorArray is an alias of C++ std::vector in + Paddle. The generated LoDTensorArray of this function can be further read + or written by 'read_from_array()' and 'write_to_array()' operators. However, + this function is generally an internal component of Paddle 'DynamicRNN'. + Users should not use it directly. Args: - x (Variable|list): The LOD tensor to be converted to a LOD tensor array. + x (Variable|list): The LoDTensor to be converted to a LoDTensorArray. table (ParamAttr|list): The variable that stores the level of lod which is ordered by sequence length in - descending order. + descending order. It is generally generated + by 'layers.lod_rank_table()' API. Returns: - Variable: The variable of type array that has been converted from a + Variable: The LoDTensorArray that has been converted from the input tensor. Examples: @@ -1047,6 +1056,13 @@ def array_length(array): class ConditionalBlockGuard(BlockGuard): + """ + ConditionalBlockGuard is derived from BlockGuard. It is dedicated for + holding a ConditionalBlock, and helping users entering and exiting the + ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard + is generally an internal component of IfElse, users should not use it directly. + """ + def __init__(self, block): if not isinstance(block, ConditionalBlock): raise TypeError("block should be conditional block") @@ -1563,17 +1579,15 @@ def reorder_lod_tensor_by_rank(x, rank_table): def is_empty(x, cond=None, **ignored): """ - **Is Empty** - - This layer returns the truth value of whether the variable is empty. + Test whether an Variable is empty. Args: - x(Variable): Operand of *is_empty* - cond(Variable|None): Optional output variable to store the result - of *is_empty* + x (Variable): The Variable to be tested. + cond (Variable|None): Output parameter. Returns the test result + of given 'x'. Returns: - Variable: The tensor variable storing the output of *is_empty*. + Variable: The tensor variable storing the test result of 'x'. Raises: TypeError: If input cond is not a variable, or cond's dtype is diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 716cc7824e..9cbb559093 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -70,21 +70,40 @@ def noam_decay(d_model, warmup_steps): def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): - """Applies exponential decay to the learning rate. + """ + Applies exponential decay to the learning rate. + + When training a model, it is often recommended to lower the learning rate as the + training progresses. By using this function, the learning rate will be decayed by + 'decay_rate' every 'decay_steps' steps. + + >>> if staircase == True: + >>> decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps) + >>> else: + >>> decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) - ```python - decayed_learning_rate = learning_rate * - decay_rate ^ (global_step / decay_steps) - ``` Args: - learning_rate: A scalar float32 value or a Variable. This - will be the initial learning rate during training - decay_steps: A Python `int32` number. - decay_rate: A Python `float` number. - staircase: Boolean. If set true, decay the learning rate every decay_steps. + learning_rate(Variable|float): The initial learning rate. + decay_steps(int): See the decay computation above. + decay_rate(float): The decay rate. See the decay computation above. + staircase(Boolean): If True, decay the learning rate at discrete intervals. + Default: False Returns: The decayed learning rate + + Examples: + .. code-block:: python + + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + sgd_optimizer.minimize(avg_cost) + """ global_step = _decay_step_counter() @@ -128,22 +147,39 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): - """Applies inverse time decay to the initial learning rate. + """ + Applies inverse time decay to the initial learning rate. - >>> if staircase: + When training a model, it is often recommended to lower the learning rate as the + training progresses. By using this function, an inverse decay function will be + applied to the initial learning rate. + + >>> if staircase == True: >>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) >>> else: >>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) Args: - learning_rate: A scalar float32 value or a Variable. This - will be the initial learning rate during training. - decay_steps: A Python `int32` number. - decay_rate: A Python `float` number. - staircase: Boolean. If set true, decay the learning rate every decay_steps. + learning_rate(Variable|float): The initial learning rate. + decay_steps(int): See the decay computation above. + decay_rate(float): The decay rate. See the decay computation above. + staircase(Boolean): If True, decay the learning rate at discrete intervals. + Default: False Returns: The decayed learning rate + + Examples: + .. code-block:: python + + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.inverse_time_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + sgd_optimizer.minimize(avg_cost) """ global_step = _decay_step_counter() diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index c8cbb5ef00..047c3aa2b7 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -102,14 +102,15 @@ def fc(input, """ **Fully Connected Layer** - The fully connected layer can take multiple tensors as its inputs. It - creates a variable called weights for each input tensor, which represents - a fully connected weight matrix from each input unit to each output unit. - The fully connected layer multiplies each input tensor with its coresponding - weight to produce an output Tensor. If multiple input tensors are given, - the results of multiple multiplications will be sumed up. If bias_attr is - not None, a bias variable will be created and added to the output. Finally, - if activation is not None, it will be applied to the output as well. + This function creates a fully connected layer in the network. It can take + multiple tensors as its inputs. It creates a variable called weights for + each input tensor, which represents a fully connected weight matrix from + each input unit to each output unit. The fully connected layer multiplies + each input tensor with its coresponding weight to produce an output Tensor. + If multiple input tensors are given, the results of multiple multiplications + will be sumed up. If bias_attr is not None, a bias variable will be created + and added to the output. Finally, if activation is not None, it will be applied + to the output as well. This process can be formulated as follows: @@ -878,7 +879,7 @@ def cos_sim(X, Y): Args: X (Variable): The input X. Y (Variable): The input Y. - + Returns: Variable: the output of cosine(X, Y). """ @@ -1083,7 +1084,7 @@ def chunk_eval(input, chunk_scheme (str): ${chunk_scheme_comment} num_chunk_types (int): ${num_chunk_types_comment} excluded_chunk_types (list): ${excluded_chunk_types_comment} - + Returns: tuple: tuple containing: (precision, recall, f1_score, num_infer_chunks, num_label_chunks, @@ -1143,7 +1144,7 @@ def sequence_conv(input, bias_attr (ParamAttr|None): attributes for bias param_attr (ParamAttr|None): attributes for parameter act (str): the activation type - + Returns: Variable: output of sequence_conv """ @@ -1509,6 +1510,7 @@ def sequence_last_step(input): return sequence_pool(input=input, pool_type="last") +@templatedoc() def pool2d(input, pool_size=-1, pool_type="max", @@ -1520,12 +1522,12 @@ def pool2d(input, use_mkldnn=False, name=None): """ - This function adds the operator for pooling in 2 dimensions, using the - pooling configurations mentioned in input parameters. + ${comment} Args: input (Variable): ${input_comment} - pool_size (int): ${ksize_comment} + pool_size (int): The side length of pooling windows. All pooling + windows are squares with pool_size on a side. pool_type (str): ${pooling_type_comment} pool_stride (int): stride of the pooling layer. pool_padding (int): padding size. @@ -1533,11 +1535,29 @@ def pool2d(input, use_cudnn (bool): ${use_cudnn_comment} ceil_mode (bool): ${ceil_mode_comment} use_mkldnn (bool): ${use_mkldnn_comment} - name (str): A name for this layer(optional). If set None, the layer - will be named automatically. - + name (str|None): A name for this layer(optional). If set None, the + layer will be named automatically. + Returns: Variable: output of pool2d layer. + + Raises: + ValueError: If 'pool_type' is not "max" nor "avg" + ValueError: If 'global_pooling' is False and 'pool_size' is -1 + ValueError: If 'use_cudnn' is not a bool value. + + Examples: + + .. code-block:: python + + data = fluid.layers.data( + name='data', shape=[3, 32, 32], dtype='float32') + conv2d = fluid.layers.pool2d( + input=data, + pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False) """ if pool_type not in ["max", "avg"]: raise ValueError( @@ -1800,7 +1820,7 @@ def beam_search_decode(ids, scores, name=None): ids (Variable): ${ids_comment} scores (Variable): ${scores_comment} name (str): The name of this layer. It is optional. - + Returns: tuple: a tuple of two output variable: sentence_ids, sentence_scores """ @@ -2063,7 +2083,7 @@ def beam_search(pre_ids, ids, scores, beam_size, end_id, level=0): beam_size (int): ${beam_size_comment} end_id (int): ${end_id_comment} level (int): ${level_comment} - + Returns: tuple: a tuple of beam_search output variables: selected_ids, selected_scores ''' @@ -2719,7 +2739,7 @@ def topk(input, k, name=None): This operator is used to find values and indices of the k largest entries for the last dimension. - If the input is a vector (rank=1), finds the k largest entries in the vector + If the input is a vector (1-D Tensor), finds the k largest entries in the vector and outputs their values and indices as vectors. Thus values[j] is the j-th largest entry in input, and its index is indices[j]. @@ -2729,9 +2749,11 @@ def topk(input, k, name=None): Args: input(Variable): The input variable which can be a vector or Tensor with higher rank. - k(int): An integer value to specify the top k largest elements. + k(int): The number of top elements to look for along the last dimension + of input. name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + will be named automatically. + Default: None Returns: values(Variable): The k largest elements along each last dimensional @@ -2739,13 +2761,16 @@ def topk(input, k, name=None): indices(Variable): The indices of values within the last dimension of input. + Raises: + ValueError: If k < 1 or k is not less than the last dimension of input + Examples: .. code-block:: python top5_values, top5_indices = layers.topk(input, k=5) """ shape = input.shape - if k < 1 and k >= shape[-1]: + if k < 1 or k >= shape[-1]: raise ValueError("k must be greater than 0 and less than %d." % (shape[-1])) @@ -3045,7 +3070,7 @@ def nce(input, param_attr (ParamAttr|None): attributes for parameter bias_attr (ParamAttr|None): attributes for bias num_neg_samples (int): ${num_neg_samples_comment} - + Returns: Variable: output of nce layer. """ diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 62b01d595a..e03c8ca914 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -79,20 +79,33 @@ def create_global_var(shape, force_cpu=False, name=None): """ - Create a global variable. such as global_step + Create a new variable in the global block(block 0). + Args: shape(list[int]): shape of the variable - value(float): the value of the variable - dtype(string): element type of the parameter - persistable(bool): if this variable is persistable - force_cpu(bool): force this variable to be on CPU + value(float): the value of the variable. The new created + variable will be filled with it. + dtype(string): data type of the variable + persistable(bool): if this variable is persistable. + Default: False + force_cpu(bool): force this variable to be on CPU. + Default: False + name(str|None): The name of the variable. If set to None the variable + name will be generated automatically. + Default: None Returns: Variable: the created Variable + + Examples: + .. code-block:: python + + var = fluid.create_global_var(shape=[2,3], value=1.0, dtype='float32', + persistable=True, force_cpu=True, name='new_var') """ helper = LayerHelper("global_var", **locals()) var = helper.create_global_variable( - dtype=dtype, shape=shape, persistable=persistable, name=name) + dtype=dtype, shape=shape, persistable=persistable) helper.set_variable_initializer( var, initializer=Constant( value=float(value), force_cpu=force_cpu)) @@ -152,10 +165,11 @@ def sums(input, out=None): Args: input (Variable|list): The input tensor that has the elements that need to be summed up. + out (Variable|None): Output parameter. Returns the sum result. + Default: None Returns: - Variable: The tensor type variable that has the sum of input - written to it. + Variable: the sum of input. The same as the argument 'out' Examples: .. code-block::python @@ -328,13 +342,13 @@ def argmin(x, axis=0): x(Variable): The input to compute the indices of the min elements. axis(int): Axis to compute indices along. - + Returns: Variable: The tensor variable storing the output - + Examples: .. code-block:: python - + out = fluid.layers.argmin(x=in, axis=0) out = fluid.layers.argmin(x=in, axis=-1) """ @@ -359,13 +373,13 @@ def argmax(x, axis=0): x(Variable): The input to compute the indices of the max elements. axis(int): Axis to compute indices along. - + Returns: Variable: The tensor variable storing the output - + Examples: .. code-block:: python - + out = fluid.layers.argmax(x=in, axis=0) out = fluid.layers.argmax(x=in, axis=-1) """ From c58ba827bb26435628d2e65e632cb98ad760dae8 Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Wed, 13 Jun 2018 21:15:16 +0800 Subject: [PATCH 14/56] update --- paddle/fluid/operators/conv_transpose_op.cc | 2 +- paddle/fluid/operators/crf_decoding_op.cc | 3 --- paddle/fluid/operators/roi_pool_op.cc | 2 ++ python/paddle/fluid/layers/io.py | 5 +++-- python/paddle/fluid/layers/nn.py | 14 ++++++-------- 5 files changed, 12 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 0b363f5c43..2e9e957ebd 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -156,7 +156,7 @@ Parameters(strides, paddings) are two elements. These two elements represent hei and width, respectively. The input(X) size and output(Out) size may be different. -Example: +For an example: Input: Input shape: $(N, C_{in}, H_{in}, W_{in})$ Filter shape: $(C_{in}, C_{out}, H_f, W_f)$ diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index a8d7831122..c27befe114 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -53,17 +53,14 @@ sequence of observed tags. The output of this operator changes according to whether Input(Label) is given: 1. Input(Label) is given: - This happens in training. This operator is used to co-work with the chunk_eval operator. - When Input(Label) is given, the crf_decoding operator returns a row vector with shape [N x 1] whose values are fixed to be 0, indicating an incorrect prediction, or 1 indicating a tag is correctly predicted. Such an output is the input to chunk_eval operator. 2. Input(Label) is not given: - This is the standard decoding process. The crf_decoding operator returns a row vector with shape [N x 1] whose values diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index a6247a467a..3dec59e247 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -149,7 +149,9 @@ The operator has three steps: 1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height + 2. Finding the largest value in each section + 3. Copying these max values to the output buffer ROI Pooling for Faster-RCNN. The link below is a further introduction: diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 65b9c68885..76ef82ddb0 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -109,8 +109,6 @@ class BlockGuardServ(BlockGuard): class ListenAndServ(object): """ - ListenAndServ layer. - ListenAndServ is used to create a rpc server bind and listen on specific TCP port, this server will run the sub-block when received variables from clients. @@ -121,6 +119,9 @@ class ListenAndServ(object): fan_in(int): how many client are expected to report to this server, default: 1. optimizer_mode(bool): whether to run the server as a parameter server, default: True. + Returns: + None + Examples: .. code-block:: python diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 40134ed42f..4fb3ac4a92 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -806,7 +806,7 @@ def crf_decoding(input, param_attr, label=None): label(${label_type}): ${label_comment} Returns: - ${viterbi_path_comment} + Variable: ${viterbi_path_comment} """ helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) @@ -828,7 +828,7 @@ def cos_sim(X, Y): Args: X (Variable): ${x_comment} - Y (Variable): ${x_comment} + Y (Variable): ${y_comment} Returns: Variable: the output of cosine(X, Y). @@ -1036,9 +1036,9 @@ def chunk_eval(input, excluded_chunk_types (list): ${excluded_chunk_types_comment} Returns: - tuple: tuple containing: (precision, recall, f1_score, - num_infer_chunks, num_label_chunks, - num_correct_chunks) + tuple: tuple containing: precision, recall, f1_score, + num_infer_chunks, num_label_chunks, + num_correct_chunks """ helper = LayerHelper("chunk_eval", **locals()) @@ -3050,8 +3050,6 @@ def nce(input, def transpose(x, perm, name=None): """ - **transpose Layer** - Permute the dimensions of `input` according to `perm`. The `i`-th dimension of the returned tensor will correspond to the @@ -3918,7 +3916,7 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): spatial_scale (float): ${spatial_scale_comment} Default: 1.0 Returns: - roi_pool (Variable): ${out_comment}. + Variable: ${out_comment}. Examples: .. code-block:: python From 76129f03314afb26acae18e7a5838612f6fb28f0 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 13 Jun 2018 22:16:23 +0800 Subject: [PATCH 15/56] update comment --- python/paddle/fluid/layers/control_flow.py | 7 +++---- python/paddle/fluid/layers/nn.py | 5 +++-- python/paddle/fluid/layers/tensor.py | 7 +++---- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 7999ee0f80..feac42d94e 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -893,12 +893,11 @@ def create_array(dtype): """ **Create LoDTensor Array** - This function creates an array of type :math:`LOD_TENSOR_ARRAY` using the - LayerHelper. It is mainly used to implement RNN with array_write, array_read - and While. + This function creates an array of LOD_TENSOR_ARRAY . It is mainly used to + implement RNN with array_write, array_read and While. Args: - dtype (int|float): The data type of the elements in the array. + dtype (int|float): The data type of the elements in the lod_tensor_array. Returns: Variable: The lod_tensor_array variable storing the elements of data type. diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 80452a1e8b..3f3b7e20ef 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1618,8 +1618,9 @@ def batch_norm(input, 1. NHWC `[batch, in_height, in_width, in_channels]` 2. NCHW `[batch, in_channels, in_height, in_width]` - Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift - `_ for more details. + Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing + Internal Covariate Shift `_ + for more details. :math:`input` is the input features over a mini-batch. diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 6ce486d70d..6b7f69807b 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -40,15 +40,14 @@ __all__ = [ def create_tensor(dtype, name=None, persistable=False): """ - **Create a Tensor with certain data type and name** + **Create a Tensor** Args: dtype (string): 'float32'|'int32'|..., the data type of the created tensor. - name (string|None): The name of the created tensor, if not set, + name (string, Default: None): The name of the created tensor, if not set, the name will be a random unique one. - persistable (bool): Set the persistable flag of the create tensor, - default value is False. + persistable (bool, Default: False): Set the persistable flag of the create tensor. Returns: Variable: The tensor variable storing the created tensor. From 0ae670917489d24e25e648c85df6a0f8a110f979 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 14 Jun 2018 10:49:07 +0800 Subject: [PATCH 16/56] update document --- python/paddle/fluid/layers/control_flow.py | 16 +++++++++------- .../fluid/layers/learning_rate_scheduler.py | 10 +++++----- python/paddle/fluid/layers/nn.py | 2 ++ 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index feac42d94e..5354582aaa 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -76,13 +76,13 @@ def split_lod_tensor(input, mask, level=0): Examples: .. code-block:: python - x = layers.data(name='x', shape=[1]) + x = fluid.layers.data(name='x', shape=[1]) x.persistable = True - y = layers.data(name='y', shape=[1]) + y = fluid.layers.data(name='y', shape=[1]) y.persistable = True - out_true, out_false = layers.split_lod_tensor( + out_true, out_false = fluid.layers.split_lod_tensor( input=x, mask=y, level=level) """ @@ -891,7 +891,7 @@ def array_write(x, i, array=None): def create_array(dtype): """ - **Create LoDTensor Array** + **Create LoDTensorArray** This function creates an array of LOD_TENSOR_ARRAY . It is mainly used to implement RNN with array_write, array_read and While. @@ -989,7 +989,8 @@ def array_read(array, i): Returns: Variable: The tensor type variable that has the data written to it. Examples: - .. code-block::python + .. code-block:: python + tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) arr = layers.array_read(tmp, i=i) @@ -1027,7 +1028,7 @@ def shrink_memory(x, i, table): def array_length(array): """ - **Get the length of Input LoDTensorArray** + **Get the Length of Input LoDTensorArray** This function performs the operation to find the length of the input LOD_TENSOR_ARRAY. @@ -1042,12 +1043,13 @@ def array_length(array): Variable: The length of the input LoDTensorArray. Examples: - .. code-block::python + .. code-block:: python tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) arr = fluid.layers.array_write(tmp, i=i) arr_len = fluid.layers.array_length(arr) + """ helper = LayerHelper('array_length', **locals()) tmp = helper.create_tmp_variable(dtype='int64') diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 2e5cff74c1..2dbc51c23f 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -163,11 +163,11 @@ def polynomial_decay(learning_rate, power=1.0, cycle=False): """ - **polynomial_decay** + **Polynomial Decay** Applies polynomial decay to the initial learning rate. - .. code-block::python + .. code-block:: python if cycle: decay_steps = decay_steps * ceil(global_step / decay_steps) @@ -180,9 +180,9 @@ def polynomial_decay(learning_rate, learning_rate(Variable|float32): A scalar float32 value or a Variable. This will be the initial learning rate during training decay_steps(int32): A Python `int32` number. - end_learning_rate(float): A Python `float` number. - power(float): A Python `float` number - cycle(bool, Default False): Boolean. If set true, decay the learning rate every decay_steps. + end_learning_rate(float, Default: 0.0001): A Python `float` number. + power(float, Default: 1.0): A Python `float` number + cycle(bool, Default: False): Boolean. If set true, decay the learning rate every decay_steps. Returns: The decayed learning rate diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3f3b7e20ef..7c4393c4d9 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1615,7 +1615,9 @@ def batch_norm(input, Can be used as a normalizer function for conv2d and fully_connected operations. The required data format for this layer is one of the following: + 1. NHWC `[batch, in_height, in_width, in_channels]` + 2. NCHW `[batch, in_channels, in_height, in_width]` Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing From 21ecd357cffb7165813ffa65b2ab7c810eddfece Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 14 Jun 2018 11:01:07 +0800 Subject: [PATCH 17/56] little optimize --- python/paddle/fluid/layers/nn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 7c4393c4d9..627718f87e 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4099,7 +4099,7 @@ def image_resize(input, name=None, resample='BILINEAR'): """ - **Resize a batch of images** + **Resize a Batch of Images** The input must be a tensor of the shape (num_batches, channels, in_h, in_w), and the resizing only applies on the last two dimensions(hight and width). From 62bf672eddfdbd8c9287292c5ddae80d4eae2af4 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 14 Jun 2018 11:20:46 +0800 Subject: [PATCH 18/56] update document for Switch --- python/paddle/fluid/layers/control_flow.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 5354582aaa..db5b07558a 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1142,16 +1142,19 @@ class Switch(object): The Semantics: 1. A `switch` control-flow checks cases one-by-one. - 1. The condition of each case is a boolean value, which is a scalar. - 1. It runs the first matched case, or the default case if there is one. - 1. Once it matches a case, it runs the corresponding branch and only that branch. + + 2. The condition of each case is a boolean value, which is a scalar. + + 3. It runs the first matched case, or the default case if there is one. + + 4. Once it matches a case, it runs the corresponding branch and only that branch. Examples: .. code-block:: python - with control_flow.Switch() as switch: + with fluid.control_flow.Switch() as switch: with switch.case(global_step == zero_var): - tensor.assign(input=one_var, output=div_res) + fluid.tensor.assign(input=one_var, output=div_res) """ From 980499faf15e869f237bfd94171f4aea079865e5 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 14 Jun 2018 13:54:23 +0800 Subject: [PATCH 19/56] fix errors --- paddle/fluid/operators/pool_op.cc | 15 ++++-- python/paddle/fluid/layers/control_flow.py | 23 +++++---- python/paddle/fluid/layers/io.py | 26 ++++++++++ .../fluid/layers/learning_rate_scheduler.py | 4 +- python/paddle/fluid/layers/nn.py | 51 ++++++++++++++----- python/paddle/fluid/layers/tensor.py | 7 +-- 6 files changed, 92 insertions(+), 34 deletions(-) diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index 6707cdded4..d94ddc7a53 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -204,8 +204,6 @@ void Pool2dOpMaker::Make() { // TODO(dzhwinter): need to registered layout transform function AddComment(R"DOC( -Pool2d Operator. - The pooling2d operation calculates the output based on the input, pooling_type and ksize, strides, paddings parameters. Input(X) and output(Out) are in NCHW format, where N is batch size, C is the @@ -215,18 +213,27 @@ These two elements represent height and width, respectively. The input(X) size and output(Out) size may be different. Example: + Input: + X shape: $(N, C, H_{in}, W_{in})$ + Output: + Out shape: $(N, C, H_{out}, W_{out})$ + For ceil_mode = false: $$ - H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\ + H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 + $$ + $$ W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 $$ For ceil_mode = true: $$ - H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\ + H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 + $$ + $$ W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 $$ diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index f4013b61d0..3ffebb960b 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -753,9 +753,9 @@ def lod_tensor_to_array(x, table): This function split a LoDTesnor to a LoDTensorArray according to its LoD information. LoDTensorArray is an alias of C++ std::vector in - Paddle. The generated LoDTensorArray of this function can be further read - or written by 'read_from_array()' and 'write_to_array()' operators. However, - this function is generally an internal component of Paddle 'DynamicRNN'. + PaddlePaddle. The generated LoDTensorArray of this function can be further read + or written by `read_from_array()` and `write_to_array()` operators. However, + this function is generally an internal component of PaddlePaddle `DynamicRNN`. Users should not use it directly. Args: @@ -763,11 +763,10 @@ def lod_tensor_to_array(x, table): table (ParamAttr|list): The variable that stores the level of lod which is ordered by sequence length in descending order. It is generally generated - by 'layers.lod_rank_table()' API. + by `layers.lod_rank_table()` API. Returns: - Variable: The LoDTensorArray that has been converted from the input - tensor. + Variable: The LoDTensorArray that has been converted from the input tensor. Examples: .. code-block:: python @@ -1579,24 +1578,26 @@ def reorder_lod_tensor_by_rank(x, rank_table): def is_empty(x, cond=None, **ignored): """ - Test whether an Variable is empty. + Test whether a Variable is empty. Args: x (Variable): The Variable to be tested. cond (Variable|None): Output parameter. Returns the test result - of given 'x'. + of given 'x'. Default: None Returns: - Variable: The tensor variable storing the test result of 'x'. + Variable: A bool scalar. True if 'x' is an empty Variable. Raises: TypeError: If input cond is not a variable, or cond's dtype is - not bool + not bool. Examples: .. code-block:: python - less = fluid.layers.is_empty(x=input) + res = fluid.layers.is_empty(x=input) + # or: + fluid.layers.is_empty(x=input, cond=res) """ helper = LayerHelper("is_empty", **locals()) if cond is None: diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 9de88e2c32..6d6cdffe27 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -572,6 +572,32 @@ def parallel(reader): def read_file(file_obj): + """ + Read data from a file object. + + A file object is also a Variable. It can be a raw file object generated by + `fluid.layers.open_files()` or a decorated one generated by + `fluid.layers.double_buffer()` and so on. + + Args: + + file_obj(Variable): The file object from where to read data. + + Returns: + Tuple[Variable]: Data read from the given file object. + + Examples: + .. code-block:: python + + data_file = fluid.layers.open_files( + filenames=['mnist.recordio'], + shapes=[(-1, 748), (-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int64"]) + data_file = fluid.layers.double_buffer( + fluid.layers.batch(data_file, batch_size=64)) + input, label = fluid.layers.read_file(data_file) + """ helper = LayerHelper('read_file') out = [ helper.create_tmp_variable( diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 9cbb559093..0efa726331 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -90,7 +90,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): Default: False Returns: - The decayed learning rate + Variable: The decayed learning rate Examples: .. code-block:: python @@ -167,7 +167,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): Default: False Returns: - The decayed learning rate + Variable: The decayed learning rate Examples: .. code-block:: python diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3194b72da6..030681999d 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -151,7 +151,7 @@ def fc(input, name (str, default None): The name of this layer. Returns: - A tensor variable storing the transformation result. + Variable: The transformation result. Raises: ValueError: If rank of the input tensor is less than 2. @@ -159,8 +159,7 @@ def fc(input, Examples: .. code-block:: python - data = fluid.layers.data( - name="data", shape=[32, 32], dtype="float32") + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") fc = fluid.layers.fc(input=data, size=1000, act="tanh") """ @@ -1543,21 +1542,24 @@ def pool2d(input, ${comment} Args: - input (Variable): ${input_comment} + input (Variable): The input tensor of pooling operator. The format of + input tensor is NCHW, where N is batch size, C is the number of + channels, H is the height of the feature, and W is the width of + the feature. pool_size (int): The side length of pooling windows. All pooling windows are squares with pool_size on a side. - pool_type (str): ${pooling_type_comment} + pool_type: ${pooling_type_comment} pool_stride (int): stride of the pooling layer. pool_padding (int): padding size. - global_pooling (bool): ${global_pooling_comment} - use_cudnn (bool): ${use_cudnn_comment} - ceil_mode (bool): ${ceil_mode_comment} - use_mkldnn (bool): ${use_mkldnn_comment} + global_pooling: ${global_pooling_comment} + use_cudnn: ${use_cudnn_comment} + ceil_mode: ${ceil_mode_comment} + use_mkldnn: ${use_mkldnn_comment} name (str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: - Variable: output of pool2d layer. + Variable: The pooling result. Raises: ValueError: If 'pool_type' is not "max" nor "avg" @@ -2764,6 +2766,27 @@ def topk(input, k, name=None): If the input is a Tensor with higher rank, this operator computes the top k entries along the last dimension. + For example: + + .. code-block:: text + + If: + input = [[5, 4, 2, 3], + [9, 7, 10, 25], + [6, 2, 10, 1]] + k = 2 + + Then: + The first output: + values = [[5, 4], + [10, 25], + [6, 10]] + + The second output: + indices = [[0, 1], + [2, 3], + [0, 2]] + Args: input(Variable): The input variable which can be a vector or Tensor with higher rank. @@ -2774,10 +2797,10 @@ def topk(input, k, name=None): Default: None Returns: - values(Variable): The k largest elements along each last dimensional - slice. - indices(Variable): The indices of values within the last dimension of - input. + Tuple[Variable]: A tuple with two elements. Each element is a Variable. + The first one is k largest elements along each last + dimensional slice. The second one is indices of values + within the last dimension of input. Raises: ValueError: If k < 1 or k is not less than the last dimension of input diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index e03c8ca914..392fa6a422 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -159,20 +159,21 @@ def concat(input, axis=0, name=None): def sums(input, out=None): - """This function performs the sum operation on the input and returns the + """ + This function performs the sum operation on the input and returns the result as the output. Args: input (Variable|list): The input tensor that has the elements that need to be summed up. - out (Variable|None): Output parameter. Returns the sum result. + out (Variable|None): Output parameter. The sum result. Default: None Returns: Variable: the sum of input. The same as the argument 'out' Examples: - .. code-block::python + .. code-block:: python tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) From 2f9ed97eb66250a788702e21240bc09fea93b85d Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 14 Jun 2018 13:54:44 +0800 Subject: [PATCH 20/56] follow comment --- python/paddle/fluid/layers/control_flow.py | 2 -- python/paddle/fluid/layers/nn.py | 14 +++++++------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index db5b07558a..5394ac3278 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -55,8 +55,6 @@ __all__ = [ def split_lod_tensor(input, mask, level=0): """ - **split_lod_tensor** - This function takes in an input that contains the complete lod information, and takes in a mask which is used to mask certain parts of the input. The output is the true branch and the false branch with the mask applied to diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 627718f87e..d3899cd442 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1638,23 +1638,23 @@ def batch_norm(input, Args: input(variable): The input variable which is a LoDTensor. - act(string, default None): Activation type, linear|relu|prelu|... - is_test(bool, default False): Used for training or training. - momentum(float, default 0.9): - epsilon(float, default 1e-05): + act(string, Default None): Activation type, linear|relu|prelu|... + is_test(bool, Default False): Used for training or training. + momentum(float, Default 0.9): + epsilon(float, Default 1e-05): param_attr(ParamAttr): The parameter attribute for Parameter `scale`. bias_attr(ParamAttr): The parameter attribute for Parameter `bias`. data_layout(string, default NCHW): NCHW|NHWC - in_place(bool, default False): Make the input and output of batch norm reuse memory. + in_place(bool, Default False): Make the input and output of batch norm reuse memory. use_mkldnn(bool, Default false): ${use_mkldnn_comment} name(string, Default None): A name for this layer(optional). If set None, the layer will be named automatically. moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance. - do_model_average_for_mean_and_var(bool, Default False): + do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not. Returns: - The sequence's last step variable which is a Tensor. + Variable: A tensor variable which is the result after applying batch normalization on the input. Examples: From b3cb536402c8e3cc332c6c6da5de13a28ff78acc Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Thu, 14 Jun 2018 11:53:35 +0000 Subject: [PATCH 21/56] Fix doc of relu, log and zeros. --- python/paddle/fluid/layers/nn.py | 130 ++++++++++++++------------- python/paddle/fluid/layers/ops.py | 2 - python/paddle/fluid/layers/tensor.py | 7 +- 3 files changed, 74 insertions(+), 65 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index bd6ed0f30e..97a8af69ea 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -24,66 +24,20 @@ from tensor import concat import utils __all__ = [ - 'fc', - 'embedding', - 'dynamic_lstm', - 'dynamic_lstmp', - 'dynamic_gru', - 'gru_unit', - 'linear_chain_crf', - 'crf_decoding', - 'cos_sim', - 'cross_entropy', - 'square_error_cost', - 'chunk_eval', - 'sequence_conv', - 'conv2d', - 'sequence_pool', - 'sequence_softmax', - 'softmax', - 'pool2d', - 'batch_norm', - 'beam_search_decode', - 'conv2d_transpose', - 'sequence_expand', - 'lstm_unit', - 'reduce_sum', - 'reduce_mean', - 'reduce_max', - 'reduce_min', - 'reduce_prod', - 'sequence_first_step', - 'sequence_last_step', - 'dropout', - 'split', - 'ctc_greedy_decoder', - 'edit_distance', - 'l2_normalize', - 'matmul', - 'topk', - 'warpctc', - 'sequence_reshape', - 'transpose', - 'im2sequence', - 'nce', - 'beam_search', - 'row_conv', - 'multiplex', - 'layer_norm', - 'softmax_with_cross_entropy', - 'smooth_l1', - 'one_hot', - 'autoincreased_step_counter', - 'reshape', - 'lod_reset', - 'lrn', - 'pad', - 'label_smooth', - 'roi_pool', - 'dice_loss', - 'resize_bilinear', - 'gather', - 'random_crop', + 'fc', 'embedding', 'dynamic_lstm', 'dynamic_lstmp', 'dynamic_gru', + 'gru_unit', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'cross_entropy', + 'square_error_cost', 'chunk_eval', 'sequence_conv', 'conv2d', + 'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', 'batch_norm', + 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', 'lstm_unit', + 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'reduce_prod', + 'sequence_first_step', 'sequence_last_step', 'dropout', 'split', + 'ctc_greedy_decoder', 'edit_distance', 'l2_normalize', 'matmul', 'topk', + 'warpctc', 'sequence_reshape', 'transpose', 'im2sequence', 'nce', + 'beam_search', 'row_conv', 'multiplex', 'layer_norm', + 'softmax_with_cross_entropy', 'smooth_l1', 'one_hot', + 'autoincreased_step_counter', 'reshape', 'lod_reset', 'lrn', 'pad', + 'label_smooth', 'roi_pool', 'dice_loss', 'resize_bilinear', 'gather', + 'random_crop', 'relu', 'log' ] @@ -4075,3 +4029,59 @@ def random_crop(input, shape, seed=1): "SeedOut": seed_out}, attrs={"shape": shape}) return out + + +def log(x): + """ + Calculates the natural log of the given input tensor, element-wise. + + .. math:: + + Out = \\ln(x) + + Args: + x (Variable): Input tensor. + + Returns: + Variable: The natural log of the input tensor computed element-wise. + + Examples: + + .. code-block:: python + + output = fluid.layers.log(x) + """ + helper = LayerHelper('log', **locals()) + dtype = helper.input_dtype() + out = helper.create_tmp_variable(dtype) + helper.append_op(type="log", inputs={"X": input}, outputs={"Out": out}) + return out + + +def relu(x): + """ + Relu takes one input data (Tensor) and produces one output data (Tensor) + where the rectified linear function, y = max(0, x), is applied to + the tensor elementwise. + + .. math:: + + Out = \\max(0, x) + + Args: + x (Variable): The input tensor. + + Returns: + Variable: The output tensor with the same shape as input. + + Examples: + + .. code-block:: python + + output = fluid.layers.relu(x) + """ + helper = LayerHelper('relu', **locals()) + dtype = helper.input_dtype() + out = helper.create_tmp_variable(dtype) + helper.append_op(type="relu", inputs={"X": input}, outputs={"Out": out}) + return out diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 60f8cbbfa7..7d3c44a389 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -17,7 +17,6 @@ __activations__ = [ 'sigmoid', 'logsigmoid', 'exp', - 'relu', 'tanh', 'tanh_shrink', 'softshrink', @@ -29,7 +28,6 @@ __activations__ = [ 'sin', 'round', 'reciprocal', - 'log', 'square', 'softplus', 'softsign', diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index be34cc81a5..c7e9813c49 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -349,11 +349,12 @@ def zeros(shape, dtype, force_cpu=False): It also sets *stop_gradient* to True. Args: - shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor + shape(tuple|list|None): Shape of output tensor. + dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor. + force_cpu(bool, default False): Whether to make output stay on CPU. Returns: - Variable: The tensor variable storing the output + Variable: The tensor variable storing the output. Examples: .. code-block:: python From 297a16986d3ed700c2d02b6a00b2012ab5bdba3b Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Thu, 14 Jun 2018 20:14:08 +0800 Subject: [PATCH 22/56] Fix doc of warpctc, array_read, edit_distance and sequence_reshape. --- python/paddle/fluid/layers/control_flow.py | 63 +- python/paddle/fluid/layers/nn.py | 742 ++++++++++++++------- 2 files changed, 522 insertions(+), 283 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index d1ea9f1485..8cd389910c 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -13,7 +13,7 @@ # limitations under the License. import contextlib -from layer_function_generator import autodoc +from layer_function_generator import autodoc, templatedoc from tensor import assign, fill_constant from .. import core from ..framework import Program, Variable, Operator @@ -721,26 +721,22 @@ def lod_rank_table(x, level=0): return table +@templatedoc() def max_sequence_len(rank_table): - """Max Sequence Len Operator. Given a LoDRankTable object, this layer - returns the max length of a batch of sequences. In fact, a LoDRankTable - object contains a list of tuples() and - the list is already sorted by sequence length in descending order, so the - operator just returns the sequence length of the first tuple element. + """ + ${comment} + + >>> import paddle.fluid as fluid + >>> x = fluid.layers.data(name='x', shape=[10], dtype='float32', + >>> lod_level=1) + >>> rank_table = layers.lod_rank_table(x=x, level=0) + >>> max_seq_len = layers.max_sequence_len(rank_table) Args: - rank_table (Variable): Input variable which is a LoDRankTable object. + rank_table(${rank_table_type}): ${rank_table_comment}. Returns: - Variable: The max length of sequence. - - Examples: - .. code-block:: python - - x = fluid.layers.data(name='x', shape=[10], - dtype='float32', lod_level=1) - rank_table = layers.lod_rank_table(x=x, level=0) - max_seq_len = layers.max_sequence_len(rank_table) + ${out_comment}. """ helper = LayerHelper("max_seqence_len", **locals()) res = helper.create_tmp_variable(dtype="int64") @@ -978,19 +974,38 @@ def equal(x, y, cond=None, **ignored): def array_read(array, i): - """This function performs the operation to read the data in as an + """ + This function performs the operation to read the data in as an LOD_TENSOR_ARRAY. + + .. code-block:: text + + Given: + + array = [0.6, 0.1, 0.3, 0.1] + + And: + + i = 2 + + Then: + + output = 0.3 + Args: - array (Variable|list): The input tensor that will be written to an array. - i (Variable|list): The subscript index in tensor array, that points the - place where data will be written to. + array (Variable|list): The input tensor that store data to be read. + i (Variable|list): The index of the data to be read from input array. + Returns: Variable: The tensor type variable that has the data written to it. + Examples: - .. code-block::python - tmp = fluid.layers.zeros(shape=[10], dtype='int32') - i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) - arr = layers.array_read(tmp, i=i) + .. code-block:: python + + tmp = fluid.layers.zeros(shape=[10], dtype='int32') + i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) + arr = layers.array_read(tmp, i=i) + """ helper = LayerHelper('array_read', **locals()) if not isinstance( diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index bd6ed0f30e..eba22f5962 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12,78 +12,33 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -All layers just related to the neural network. +All layers just related to the neural network. """ from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr -from layer_function_generator import autodoc +from layer_function_generator import autodoc, templatedoc from tensor import concat import utils +import random __all__ = [ - 'fc', - 'embedding', - 'dynamic_lstm', - 'dynamic_lstmp', - 'dynamic_gru', - 'gru_unit', - 'linear_chain_crf', - 'crf_decoding', - 'cos_sim', - 'cross_entropy', - 'square_error_cost', - 'chunk_eval', - 'sequence_conv', - 'conv2d', - 'sequence_pool', - 'sequence_softmax', - 'softmax', - 'pool2d', - 'batch_norm', - 'beam_search_decode', - 'conv2d_transpose', - 'sequence_expand', - 'lstm_unit', - 'reduce_sum', - 'reduce_mean', - 'reduce_max', - 'reduce_min', - 'reduce_prod', - 'sequence_first_step', - 'sequence_last_step', - 'dropout', - 'split', - 'ctc_greedy_decoder', - 'edit_distance', - 'l2_normalize', - 'matmul', - 'topk', - 'warpctc', - 'sequence_reshape', - 'transpose', - 'im2sequence', - 'nce', - 'beam_search', - 'row_conv', - 'multiplex', - 'layer_norm', - 'softmax_with_cross_entropy', - 'smooth_l1', - 'one_hot', - 'autoincreased_step_counter', - 'reshape', - 'lod_reset', - 'lrn', - 'pad', - 'label_smooth', - 'roi_pool', - 'dice_loss', - 'resize_bilinear', - 'gather', - 'random_crop', + 'fc', 'embedding', 'dynamic_lstm', 'dynamic_lstmp', 'dynamic_gru', + 'gru_unit', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'cross_entropy', + 'square_error_cost', 'chunk_eval', 'sequence_conv', 'conv2d', + 'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', 'batch_norm', + 'beam_search_decode', 'conv2d_transpose', 'sequence_expand', 'lstm_unit', + 'reduce_sum', 'reduce_mean', 'reduce_max', 'reduce_min', 'reduce_prod', + 'sequence_first_step', 'sequence_last_step', 'dropout', 'split', + 'ctc_greedy_decoder', 'edit_distance', 'l2_normalize', 'matmul', 'topk', + 'warpctc', 'sequence_reshape', 'transpose', 'im2sequence', 'nce', + 'beam_search', 'row_conv', 'multiplex', 'layer_norm', + 'softmax_with_cross_entropy', 'smooth_l1', 'one_hot', + 'autoincreased_step_counter', 'reshape', 'lod_reset', 'lrn', 'pad', + 'label_smooth', 'roi_pool', 'dice_loss', 'image_resize', + 'image_resize_short', 'resize_bilinear', 'gather', 'random_crop', 'mean_iou' ] @@ -92,7 +47,6 @@ def fc(input, num_flatten_dims=1, param_attr=None, bias_attr=None, - use_cudnn=False, use_mkldnn=False, act=None, is_test=False, @@ -219,6 +173,7 @@ def embedding(input, have two elements which indicate the size of the dictionary of embeddings and the size of each embedding vector respectively. is_sparse(bool): The flag indicating whether to use sparse update. + is_distributed (bool): Whether to run lookup table from remote parameter server. padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup. Otherwise the given :attr:`padding_idx` indicates padding the output with zeros whenever lookup encounters it in :attr:`input`. If @@ -258,9 +213,10 @@ def embedding(input, return tmp -# TODO(qijun): expose H0 and C0 def dynamic_lstm(input, size, + h_0=None, + c_0=None, param_attr=None, bias_attr=None, use_peepholes=True, @@ -321,6 +277,13 @@ def dynamic_lstm(input, (T X 4D), where T is the total time steps in this mini-batch, D is the hidden size. size(int): 4 * hidden size. + h_0(Variable): The initial hidden state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size and D is the hidden size. + c_0(Variable): The initial cell state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size. `h_0` and `c_0` can be NULL but only at the same time. + param_attr(ParamAttr|None): The parameter attribute for the learnable hidden-hidden weights. @@ -384,12 +347,20 @@ def dynamic_lstm(input, cell = helper.create_tmp_variable(dtype) batch_gate = helper.create_tmp_variable(dtype) batch_cell_pre_act = helper.create_tmp_variable(dtype) + inputs = {'Input': input, 'Weight': weight, 'Bias': bias} + batch_size = input.shape[0] + if h_0: + assert h_0.shape == (batch_size, size), \ + 'The shape of h0 should be (batch_size, %d)' % size + inputs['H0'] = h_0 + if c_0: + assert c_0.shape == (batch_size, size), \ + 'The shape of c0 should be (batch_size, %d)' % size + inputs['C0'] = c_0 helper.append_op( type='lstm', - inputs={'Input': input, - 'Weight': weight, - 'Bias': bias}, + inputs=inputs, outputs={ 'Hidden': hidden, 'Cell': cell, @@ -651,8 +622,9 @@ def dynamic_gru(input, :attr:`False`. gate_activation(str): The activation for update gate and reset gate. Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid". - activation(str): The activation for candidate hidden state. + candidate_activation(str): The activation for candidate hidden state. Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". + h_0 (Variable): The hidden output of the first time step. Returns: Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \ @@ -673,11 +645,13 @@ def dynamic_gru(input, attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) bias = helper.create_parameter( attr=helper.bias_attr, shape=[1, 3 * size], dtype=dtype, is_bias=True) + batch_size = input.shape[0] inputs = {'Input': input, 'Weight': weight, 'Bias': bias} if h_0 != None: assert h_0.shape == ( - size, size), 'The shape of h0 should be(%d, %d)' % (size, size) - inputs['h0'] = h_0 + batch_size, size + ), 'The shape of h0 should be(batch_size, %d)' % size + inputs['H0'] = h_0 hidden = helper.create_tmp_variable(dtype) batch_gate = helper.create_tmp_variable(dtype) @@ -799,7 +773,22 @@ def gru_unit(input, return updated_hidden, reset_hidden_pre, gate +@templatedoc() def linear_chain_crf(input, label, param_attr=None): + """ + Linear Chain CRF. + + ${comment} + + Args: + input(${emission_type}): ${emission_comment} + label(${label_type}): ${label_comment} + param_attr(ParamAttr): The attribute of the learnable parameter. + + Returns: + ${log_likelihood_comment} + + """ helper = LayerHelper('linear_chain_crf', **locals()) size = input.shape[1] transition = helper.create_parameter( @@ -825,7 +814,19 @@ def linear_chain_crf(input, label, param_attr=None): return log_likelihood +@templatedoc() def crf_decoding(input, param_attr, label=None): + """ + ${comment} + + Args: + input(${emission_type}): ${emission_comment} + param_attr(ParamAttr): The parameter attribute for training. + label(${label_type}): ${label_comment} + + Returns: + ${viterbi_path_comment} + """ helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) @@ -843,6 +844,13 @@ def cos_sim(X, Y): """ This function performs the cosine similarity between two tensors X and Y and returns that as the output. + + Args: + X (Variable): The input X. + Y (Variable): The input Y. + + Returns: + Variable: the output of cosine(X, Y). """ helper = LayerHelper('cos_sim', **locals()) out = helper.create_tmp_variable(dtype=X.dtype) @@ -869,15 +877,15 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): unchanged. Args: - x(variable): The input tensor. - dropout_prob(float): Probability of setting units to zero. - is_test(bool): A flag indicating whether it is in test phrase or not. - seed(int): A Python integer used to create random seeds. If this - parameter is set to None, a random seed is used. - NOTE: If an integer seed is given, always the same output - units will be dropped. DO NOT use a fixed seed in training. - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + x (Variable): The input tensor. + dropout_prob (float): Probability of setting units to zero. + is_test (bool): A flag indicating whether it is in test phrase or not. + seed (int): A Python integer used to create random seeds. If this + parameter is set to None, a random seed is used. + NOTE: If an integer seed is given, always the same output + units will be dropped. DO NOT use a fixed seed in training. + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: A tensor variable. @@ -999,8 +1007,8 @@ def square_error_cost(input, label): * :math:`Out`: Output value, same shape with :math:`X`. Args: - input(Variable): Input tensor, has predictions. - label(Variable): Label tensor, has target labels. + input (Variable): Input tensor, has predictions. + label (Variable): Label tensor, has target labels. Returns: Variable: The tensor variable storing the element-wise squared error \ @@ -1029,6 +1037,7 @@ def square_error_cost(input, label): return square_out +@templatedoc() def chunk_eval(input, label, chunk_scheme, @@ -1037,6 +1046,18 @@ def chunk_eval(input, """ This function computes and outputs the precision, recall and F1-score of chunk detection. + + Args: + input (Variable): prediction output of the network. + label (Variable): label of the test data set. + chunk_scheme (str): ${chunk_scheme_comment} + num_chunk_types (int): ${num_chunk_types_comment} + excluded_chunk_types (list): ${excluded_chunk_types_comment} + + Returns: + tuple: tuple containing: (precision, recall, f1_score, + num_infer_chunks, num_label_chunks, + num_correct_chunks) """ helper = LayerHelper("chunk_eval", **locals()) @@ -1069,6 +1090,7 @@ def chunk_eval(input, num_correct_chunks) +@templatedoc() def sequence_conv(input, num_filters, filter_size=3, @@ -1081,6 +1103,19 @@ def sequence_conv(input, This function creates the op for sequence_conv, using the inputs and other convolutional configurations for the filters and stride as given in the input parameters to the function. + + Args: + input (Variable): ${x_comment} + num_filters (int): number of filters. + filter_size (int): the filter size (H and W). + filter_stride (int): stride of the filter. + padding (bool): if True, add paddings. + bias_attr (ParamAttr|None): attributes for bias + param_attr (ParamAttr|None): attributes for parameter + act (str): the activation type + + Returns: + Variable: output of sequence_conv """ # FIXME(dzh) : want to unify the argument of python layer @@ -1180,48 +1215,49 @@ def conv2d(input, - Input: - Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` - Filter shape: $(C_{out}, C_{in}, H_f, W_f)$ + Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` - Output: - Output shape: $(N, C_{out}, H_{out}, W_{out})$ + Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: - H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ - W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 + H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ + W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 Args: - input(Variable): The input image with [N, C, H, W] format. - num_filters(int): The number of filter. It is as same as the output - image channel. - filter_size(int|tuple|None): The filter size. If filter_size is a tuple, - it must contain two integers, (filter_size_H, filter_size_W). - Otherwise, the filter will be a square. - stride(int|tuple): The stride size. If stride is a tuple, it must - contain two integers, (stride_H, stride_W). Otherwise, the - stride_H = stride_W = stride. Default: stride = 1. - padding(int|tuple): The padding size. If padding is a tuple, it must - contain two integers, (padding_H, padding_W). Otherwise, the - padding_H = padding_W = padding. Default: padding = 0. - dilation(int|tuple): The dilation size. If dilation is a tuple, it must - contain two integers, (dilation_H, dilation_W). Otherwise, the - dilation_H = dilation_W = dilation. Default: dilation = 1. - groups(int): The groups number of the Conv2d Layer. According to grouped - convolution in Alex Krizhevsky's Deep CNN paper: when group=2, - the first half of the filters is only connected to the first half - of the input channels, while the second half of the filters is only - connected to the second half of the input channels. Default: groups=1 - param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None - bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None - use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn - library is installed. Default: True - act(str): Activation type. Default: None - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + input (Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of filter. It is as same as the output + image channel. + filter_size (int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. + stride (int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. Default: stride = 1. + padding (int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. Default: padding = 0. + dilation (int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. Default: dilation = 1. + groups (int): The groups number of the Conv2d Layer. According to grouped + convolution in Alex Krizhevsky's Deep CNN paper: when group=2, + the first half of the filters is only connected to the first half + of the input channels, while the second half of the filters is only + connected to the second half of the input channels. Default: groups=1 + param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None + bias_attr (ParamAttr): Bias parameter for the Conv2d layer. Default: None + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + use_mkldnn (bool): Use mkldnn kernels or not. + act (str): Activation type. Default: None + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The tensor variable storing the convolution and \ @@ -1379,7 +1415,7 @@ def sequence_pool(input, pool_type): def sequence_first_step(input): """ - This funciton get the first step of sequence. + This function gets the first step of sequence. .. code-block:: text @@ -1412,7 +1448,7 @@ def sequence_first_step(input): def sequence_last_step(input): """ - This funciton get the last step of sequence. + This function gets the last step of sequence. .. code-block:: text @@ -1456,6 +1492,22 @@ def pool2d(input, """ This function adds the operator for pooling in 2 dimensions, using the pooling configurations mentioned in input parameters. + + Args: + input (Variable): ${input_comment} + pool_size (int): ${ksize_comment} + pool_type (str): ${pooling_type_comment} + pool_stride (int): stride of the pooling layer. + pool_padding (int): padding size. + global_pooling (bool): ${global_pooling_comment} + use_cudnn (bool): ${use_cudnn_comment} + ceil_mode (bool): ${ceil_mode_comment} + use_mkldnn (bool): ${use_mkldnn_comment} + name (str): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: output of pool2d layer. """ if pool_type not in ["max", "avg"]: raise ValueError( @@ -1513,6 +1565,25 @@ def batch_norm(input, """ This function helps create an operator to implement the BatchNorm layer using the configurations from the input parameters. + + Args: + input (Variable): the input variable. + act (str): activation type + is_test (bool): whether to run batch_norm as test mode. + momentum (float): momentum + epsilon (float): epsilon, default 1e-05 + param_attr (ParamAttr|None): attributes for parameter + bias_attr (ParamAttr|None): attributes for bias + data_layout (str): data layout, default NCHW + in_place (bool): if True, do not create tmp variable + use_mkldnn (bool): ${use_mkldnn_comment} + name (str): The name of this layer. It is optional. + moving_mean_name (str): The name of moving mean variable name, optional. + moving_variance_name (str): The name of moving variance name, optional. + do_model_average_for_mean_and_var (bool): + + Returns: + Variable: output of batch_norm layer. """ helper = LayerHelper('batch_norm', **locals()) dtype = helper.input_dtype() @@ -1640,6 +1711,7 @@ def layer_norm(input, bias_attr(ParamAttr|None): The parameter attribute for the learnable bias :math:`b`. act(str): Activation to be applied to the output of layer normalizaiton. + name (str): The name of this layer. It is optional. Returns: Variable: A tensor variable with the same shape as the input. @@ -1691,6 +1763,17 @@ def layer_norm(input, def beam_search_decode(ids, scores, name=None): + """ + ${beam_search_decode} + + Args: + ids (Variable): ${ids_comment} + scores (Variable): ${scores_comment} + name (str): The name of this layer. It is optional. + + Returns: + tuple: a tuple of two output variable: sentence_ids, sentence_scores + """ helper = LayerHelper('beam_search_decode', **locals()) sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) @@ -1766,46 +1849,46 @@ def conv2d_transpose(input, W_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 Args: - input(Variable): The input image with [N, C, H, W] format. - num_filters(int): The number of the filter. It is as same as the output - image channel. - output_size(int|tuple|None): The output image size. If output size is a - tuple, it must contain two integers, (image_H, image_W). This - parameter only works when filter_size is None. - filter_size(int|tuple|None): The filter size. If filter_size is a tuple, - it must contain two integers, (filter_size_H, filter_size_W). - Otherwise, the filter will be a square. None if use output size to - calculate filter_size. - padding(int|tuple): The padding size. If padding is a tuple, it must - contain two integers, (padding_H, padding_W). Otherwise, the - padding_H = padding_W = padding. Default: padding = 0. - stride(int|tuple): The stride size. If stride is a tuple, it must - contain two integers, (stride_H, stride_W). Otherwise, the - stride_H = stride_W = stride. Default: stride = 1. - dilation(int|tuple): The dilation size. If dilation is a tuple, it must - contain two integers, (dilation_H, dilation_W). Otherwise, the - dilation_H = dilation_W = dilation. Default: dilation = 1. - groups(int): The groups number of the Conv2d transpose layer. Inspired by - grouped convolution in Alex Krizhevsky's Deep CNN paper, in which - when group=2, the first half of the filters is only connected to the - first half of the input channels, while the second half of the - filters is only connected to the second half of the input channels. - Default: groups=1 - param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer. - Default: None - bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None - use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn - library is installed. Default: True - act(str): Activation type. Default: None - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + input(Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of the filter. It is as same as the output + image channel. + output_size(int|tuple|None): The output image size. If output size is a + tuple, it must contain two integers, (image_H, image_W). This + parameter only works when filter_size is None. + filter_size(int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. None if use output size to + calculate filter_size. + padding(int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. Default: padding = 0. + stride(int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. Default: stride = 1. + dilation(int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. Default: dilation = 1. + groups(int): The groups number of the Conv2d transpose layer. Inspired by + grouped convolution in Alex Krizhevsky's Deep CNN paper, in which + when group=2, the first half of the filters is only connected to the + first half of the input channels, while the second half of the + filters is only connected to the second half of the input channels. + Default: groups=1 + param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer. + Default: None + bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None + use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + act(str): Activation type. Default: None + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: - Variable: The tensor variable storing the convolution transpose result. + Variable: The tensor variable storing the convolution transpose result. Raises: - ValueError: If the shapes of input, filter_size, stride, padding and - groups mismatch. + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. Examples: .. code-block:: python @@ -1942,6 +2025,17 @@ def sequence_expand(x, y, ref_level=-1, name=None): def beam_search(pre_ids, ids, scores, beam_size, end_id, level=0): ''' This function implements the beam search algorithm. + + Args: + pre_ids (Variable): ${pre_ids_comment} + ids (Variable): ${ids_comment} + scores (Variable): ${scores_comment} + beam_size (int): ${beam_size_comment} + end_id (int): ${end_id_comment} + level (int): ${level_comment} + + Returns: + tuple: a tuple of beam_search output variables: selected_ids, selected_scores ''' helper = LayerHelper('beam_search', **locals()) score_type = scores.dtype @@ -2437,19 +2531,21 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): The l2 normalize layer normalizes `x` along dimension `axis` using an L2 norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes - output = x / sqrt(max(sum(x**2), epsilon)) + .. math:: + y = \frac{x}{ \sqrt{\sum {x^2} + epsion }} For `x` with more dimensions, this layer independently normalizes each 1-D slice along dimension `axis`. Args: - x(Variable|list): The input tensor to l2_normalize layer. - axis(int): Dimension along which to normalize the input. - epsilon(float): A lower bound value for `x`'s l2 norm. sqrt(epsilon) will - be used as the divisor if the l2 norm of `x` is less than - sqrt(epsilon). - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + x(Variable|list): The input tensor to l2_normalize layer. + axis(int): The axis on which to apply normalization. If `axis < 0`, + the dimension to normalization is rank(X) + axis. -1 is the + last dimension. + epsilon(float): The epsilon value is used to avoid division by zero, + the defalut value is 1e-10. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: @@ -2468,46 +2564,17 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): axis = 0 helper = LayerHelper("l2_normalize", **locals()) - square = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op(type="square", inputs={"X": x}, outputs={"Out": square}) - - reduced_sum = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_tmp_variable(dtype=x.dtype) + norm = helper.create_tmp_variable(dtype=x.dtype) helper.append_op( - type="reduce_sum", - inputs={"X": square}, - outputs={"Out": reduced_sum}, + type="norm", + inputs={"X": x}, + outputs={"Out": out, + "Norm": norm}, attrs={ - "dim": [1] if axis is None else [axis], - "keep_dim": True, - "reduce_all": False + "axis": 1 if axis is None else axis, + "epsilon": epsilon, }) - - # TODO(caoying) A lower bound value epsilon for the norm is needed to - # imporve the numeric stability of reciprocal. This requires a maximum_op. - rsquare = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op( - type="reciprocal", inputs={"X": reduced_sum}, outputs={"Out": rsquare}) - - # TODO(caoying) the current elementwise_mul operator does not support a - # general broadcast rule which broadcasts input(Y) to have the same - # dimension with Input(X) starting from a specified dimension. So this - # exanpsion is requred. Once a general broadcast rule is spported, this - # expanding canbe removed. - rsquare_expanded = helper.create_tmp_variable(dtype=x.dtype) - expand_times = [1] * len(x.shape) - expand_times[axis] = int(x.shape[axis]) - helper.append_op( - type="expand", - inputs={"X": rsquare}, - outputs={"Out": rsquare_expanded}, - attrs={"expand_times": expand_times}) - - out = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op( - type="elementwise_mul", - inputs={"X": x, - "Y": rsquare_expanded}, - outputs={"Out": out}) return out @@ -2666,8 +2733,7 @@ def topk(input, k, name=None): return values, indices -def edit_distance(input, label, normalized=True, ignored_tokens=None, - name=None): +def edit_distance(input, label, normalized=True, ignored_tokens=None): """ EditDistance operator computes the edit distances between a batch of hypothesis strings and their references. Edit distance, also called @@ -2681,26 +2747,23 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None, "kitten" -> "sitten" -> "sittin" -> "sitting" - Input(Hyps) is a LoDTensor consisting of all the hypothesis strings with + The input is a LoDTensor consisting of all the hypothesis strings with the total number denoted by `batch_size`, and the separation is specified by the LoD information. And the `batch_size` reference strings are arranged - in order in the same way in the LoDTensor Input(Refs). + in order in the same way in the input LoDTensor. - Output(Out) contains the `batch_size` results and each stands for the edit + The output contains the `batch_size` results and each stands for the edit distance for a pair of strings respectively. If Attr(normalized) is true, the edit distance will be divided by the length of reference string. Args: - input(Variable): The indices for hypothesis strings. - label(Variable): The indices for reference strings. - - normalized(bool): Indicated whether to normalize the edit distance by + normalized(bool, default True): Indicated whether to normalize the edit distance by the length of reference string. - - ignored_tokens(list of int): Tokens that should be removed before + ignored_tokens(list, default None): Tokens that should be removed before calculating edit distance. + name (str): The name of this layer. It is optional. Returns: Variable: sequence-to-sequence edit distance in shape [batch_size, 1]. @@ -2710,7 +2773,6 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None, x = fluid.layers.data(name='x', shape=[8], dtype='float32') y = fluid.layers.data(name='y', shape=[7], dtype='float32') - cost = fluid.layers.edit_distance(input=x,label=y) """ helper = LayerHelper("edit_distance", **locals()) @@ -2790,10 +2852,10 @@ def ctc_greedy_decoder(input, blank, name=None): where Lp is the sum of all input sequences' length and num_classes is the true number of classes. (not including the blank label). - blank(int): the blank label index of Connectionist Temporal Classification (CTC) loss, which is in thehalf-opened interval [0, num_classes + 1). + name (str): The name of this layer. It is optional. Returns: Variable: CTC greedy decode result. If all the sequences in result were @@ -2830,35 +2892,33 @@ def warpctc(input, label, blank=0, norm_by_times=False): input tensor. Args: - input(Variable): (LodTensor, default: LoDTensor), - the unscaled probabilities of variable-length sequences, + input (Variable): The unscaled probabilities of variable-length sequences, which is a 2-D Tensor with LoD information. It's shape is [Lp, num_classes + 1], where Lp is the sum of all input sequences' length and num_classes is the true number of classes. (not including the blank label). - label(Variable): (LodTensor, default: LoDTensor), the ground truth - of variable-length sequence, which is a 2-D Tensor with LoD - information. It is of the shape [Lg, 1], where Lg is th sum of - all labels' length. - blank: (int, default: 0), the blank label index of Connectionist + label (Variable): The ground truth of variable-length sequence, + which is a 2-D Tensor with LoD information. It is of the shape [Lg, 1], + where Lg is th sum of all labels' length. + blank (int, default 0): The blank label index of Connectionist Temporal Classification (CTC) loss, which is in the half-opened interval [0, num_classes + 1). - norm_by_times: (bool, default: false), whether to normalize - the gradients by the number of time-step, which is also the - sequence's length. There is no need to normalize the gradients - if warpctc layer was follewed by a mean_op. + norm_by_times(bool, default false): Whether to normalize the gradients + by the number of time-step, which is also the sequence's length. + There is no need to normalize the gradients if warpctc layer was + follewed by a mean_op. Returns: Variable: The Connectionist Temporal Classification (CTC) loss, which is a 2-D Tensor of the shape [batch_size, 1]. Examples: + .. code-block:: python - y = layers.data( - name='y', shape=[11, 8], dtype='float32', lod_level=1) - y_predict = layers.data( - name='y_predict', shape=[11, 1], dtype='float32') - cost = layers.warpctc(input=y_predict, label=y) + + label = layers.data(shape=[11, 8], dtype='float32', lod_level=1) + predict = layers.data(shape=[11, 1], dtype='float32') + cost = layers.warpctc(input=predict, label=label) """ helper = LayerHelper('warpctc', **locals()) @@ -2888,16 +2948,21 @@ def sequence_reshape(input, new_dim): x is a LoDTensor: x.lod = [[0, 2, 6]] - x.data = [[1, 2], [3, 4], - [5, 6], [7, 8], [9, 10], [11, 12]] + x.data = [[1, 2], [3, 4], + [5, 6], [7, 8], + [9, 10], [11, 12]] x.dims = [6, 2] set new_dim = 4 then out is a LoDTensor: + out.lod = [[0, 1, 3]] - out.data = [[1, 2, 3, 4], - [5, 6, 7, 8], [9, 10, 11, 12]] + + out.data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] + out.dims = [3, 4] Currently, only 1-level LoDTensor is supported and please make sure @@ -2905,18 +2970,18 @@ def sequence_reshape(input, new_dim): no remainder for each sequence. Args: - input (Variable): (LodTensor, default: LoDTensor), a 2-D LoDTensor - with shape being [N, M] where M for dimension. - new_dim (int): New dimension which the input LoDTensor is reshaped to. + + input (Variable): A 2-D LoDTensor with shape being [N, M] where M for dimension. + new_dim (int): New dimension that the input LoDTensor is reshaped to. Returns: + Variable: Reshaped LoDTensor according to new dimension. Examples: .. code-block:: python - x = fluid.layers.data(name='x', shape=[5, 20], - dtype='float32', lod_level=1) + x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1) x_reshaped = layers.sequence_reshape(input=x, new_dim=10) """ helper = LayerHelper('sequence_reshape', **locals()) @@ -2929,7 +2994,10 @@ def sequence_reshape(input, new_dim): return out -@autodoc() +# FIXME(wuyi): let docstring_checker.py understand @autodoc. +# For now, the comments in c++ use types like Tensor, but in python side +# the type is often "Variable", and arguments may vary. +@templatedoc(op_type="nce") def nce(input, label, num_total_classes, @@ -2937,6 +3005,21 @@ def nce(input, param_attr=None, bias_attr=None, num_neg_samples=None): + """ + ${comment} + + Args: + input (Variable): input variable. + label (Variable): label. + num_total_classes (int):${num_total_classes_comment} + sample_weight (int): ${sample_weight_comment} + param_attr (ParamAttr|None): attributes for parameter + bias_attr (ParamAttr|None): attributes for bias + num_neg_samples (int): ${num_neg_samples_comment} + + Returns: + Variable: output of nce layer. + """ helper = LayerHelper('nce', **locals()) assert isinstance(input, Variable) dim = input.shape[1] @@ -2994,8 +3077,9 @@ def transpose(x, perm, name=None): perm[i]-th dimension of `input`. Args: - input (Variable): (Tensor), A Tensor. - perm (list): A permutation of the dimensions of `input`. + x (Variable): The input Tensor. + perm (list): A permutation of the dimensions of `input`. + name (str): The name of this layer. It is optional. Returns: Variable: A transposed Tensor. @@ -3228,9 +3312,9 @@ def multiplex(inputs, index): row of the matrix, then `O[i]` is equal to :math:`I_{ID[i]}[i]`. Args: - inputs (list): A list of variables to gather from. All variables have the + inputs (list): A list of variables to gather from. All variables have the same shape and the rank is at least 2. - index (Variable): Tensor, index variable which is a 2-D tensor + index (Variable): Tensor, index variable which is a 2-D tensor with shape [M, 1] where M is the batch size. Returns: @@ -3429,7 +3513,8 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): begin(int): The first value of this counter. step(int): The increment step between each execution. - Returns(Variable): The global run counter. + Returns: + Variable: The global run counter. """ helper = LayerHelper('global_step_counter') if counter_name is None: @@ -3490,7 +3575,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): the corresponding dimension of x. Args: - input(variable): The input tensor. + x(variable): The input tensor. shape(list): The new shape. At most one dimension of the new shape can be -1. actual_shape(variable): An optional input. If provided, reshape @@ -3502,8 +3587,10 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): inplace(bool): If this flag is set true, a new output tensor is created whose data is copied from input x, otherwise the output shares data with input without copying. + name (str): The name of this layer. It is optional. - Returns(variable): The output tensor. + Returns: + Variable: The output tensor. Examples: .. code-block:: python @@ -3929,22 +4016,25 @@ def dice_loss(input, label, epsilon=0.00001): return reduce_mean(dice_score) -def resize_bilinear(input, out_shape=None, scale=None, name=None): +def image_resize(input, + out_shape=None, + scale=None, + name=None, + resample='BILINEAR'): """ - The mathematical meaning of resize bilinear layer is - Bilinear interpolation. - Bilinear interpolation is an extension of linear interpolation for - interpolating functions of two variables (e.g. H-direction and - W-direction in this layer) on a rectilinear 2D grid. + Resize a batch of images. - For details, please refer to Wikipedia: - https://en.wikipedia.org/wiki/Bilinear_interpolation + The input must be a tensor of the shape (num_batches, channels, in_h, in_w), + and the resizing only applies on the last two dimensions(hight and width). + + Supporting resample methods: + 'BILINEAR' : Bilinear interpolation Args: - input (Variable): The input tensor of resize bilinear layer, + input (Variable): The input tensor of image resize layer, This is a 4-D tensor of the shape (num_batches, channels, in_h, in_w). - out_shape(list|tuple|Variable|None): Output shape of resize bilinear + out_shape(list|tuple|Variable|None): Output shape of image resize layer, the shape is (out_h, out_w). Default: None scale(float|None): The multiplier for the input height or width. @@ -3953,6 +4043,8 @@ def resize_bilinear(input, out_shape=None, scale=None, name=None): Default: None name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. + resample(str): The resample method. It can only be 'BILINEAR' currently. + Default: 'BILINEAR' Returns: out (Variable): The output is a 4-D tensor of the shape @@ -3961,8 +4053,12 @@ def resize_bilinear(input, out_shape=None, scale=None, name=None): Examples: .. code-block:: python - out = fluid.layers.resize_bilinear(input, out_shape=[12, 12]) + out = fluid.layers.image_resize(input, out_shape=[12, 12]) """ + resample_methods = {'BILINEAR': 'bilinear_interp'} + if resample not in resample_methods: + raise ValueError( + "The 'resample' of image_resize can only be 'BILINEAR' currently.") if out_shape is None and scale is None: raise ValueError("One of out_shape and scale must not be None") helper = LayerHelper('bilinear_interp', **locals()) @@ -3990,7 +4086,7 @@ def resize_bilinear(input, out_shape=None, scale=None, name=None): out = helper.create_tmp_variable(dtype) helper.append_op( - type="bilinear_interp", + type=resample_methods[resample], inputs=inputs, outputs={"Out": out}, attrs={"out_h": out_h, @@ -3998,6 +4094,62 @@ def resize_bilinear(input, out_shape=None, scale=None, name=None): return out +@templatedoc(op_type="bilinear_interp") +def resize_bilinear(input, out_shape=None, scale=None, name=None): + """ + ${comment} + + Args: + input(${x_type}): ${x_comment}. + + out_shape(${out_size_type}): ${out_size_comment}. + + scale(float|None): The multiplier for the input height or width. At + least one of out_shape or scale must be set. And out_shape has + a higher priority than scale. Default: None. + + name(str|None): The output variable name. + + Returns: + ${out_comment}. + """ + + return image_resize(input, out_shape, scale, name, 'BILINEAR') + + +def image_resize_short(input, out_short_len, resample='BILINEAR'): + """ + Resize a batch of images. The short edge of input images will be + resized to the given 'out_short_len'. The long edge of input images + will be resized proportionately to make images' length-width ratio + constant. + + Args: + input (Variable): The input tensor of image resize layer, + This is a 4-D tensor of the shape + (num_batches, channels, in_h, in_w). + out_short_len(int): The length of output images' short edge. + resample (str): resample method, default: BILINEAR. + + Returns: + out (Variable): The output is a 4-D tensor of the shape + (num_batches, channls, out_h, out_w). + """ + in_shape = input.shape + if len(in_shape) != 4: + raise ValueError( + "The rank of input must be 4 (num_batches, channels, in_h, in_w).") + hw = in_shape[2:4] + short_idx = hw.index(min(hw)) + long_idx = 1 - short_idx + out_shape = list(hw) + out_shape[short_idx] = out_short_len + out_shape[long_idx] = int( + float(out_shape[long_idx]) * (float(out_short_len) / float(hw[ + short_idx])) + 0.5) + return image_resize(input=input, out_shape=out_shape, resample=resample) + + def gather(input, index): """ Output is obtained by gathering entries of the outer-most dimension @@ -4005,7 +4157,7 @@ def gather(input, index): .. math:: - Out = X[Index] + Out = X[Index] .. code-block:: text @@ -4013,8 +4165,8 @@ def gather(input, index): Given: - X = [[1, 2], - [3, 4], + X = [[1, 2], + [3, 4], [5, 6]] Index = [1, 2] @@ -4032,6 +4184,7 @@ def gather(input, index): output (Variable): The output is a tensor with the same rank as input. Examples: + .. code-block:: python output = fluid.layers.gather(x, index) @@ -4047,10 +4200,31 @@ def gather(input, index): return out -def random_crop(input, shape, seed=1): +@templatedoc() +def random_crop(x, shape, seed=None): + """ + ${comment} + + Examples: + >>> img = fluid.layers.data("img", [3, 256, 256]) + >>> cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224]) + + Args: + x(${x_type}): ${x_comment} + shape(${shape_type}): ${shape_comment} + seed(int|${seed_type}|None): ${seed_comment} By default, the seed will + get from `random.randint(-65536, 65535)`. + + Returns: + ${out_comment} + + """ helper = LayerHelper("random_crop", **locals()) dtype = helper.input_dtype() out = helper.create_tmp_variable(dtype) + if seed is None: + seed = random.randint(-65536, 65535) + if isinstance(seed, int): seed_value = seed seed = helper.create_tmp_variable(dtype="int64") @@ -4069,9 +4243,59 @@ def random_crop(input, shape, seed=1): seed_out = helper.create_tmp_variable(dtype="int64") helper.append_op( type="random_crop", - inputs={"X": input, + inputs={"X": x, "Seed": seed}, outputs={"Out": out, "SeedOut": seed_out}, attrs={"shape": shape}) return out + + +def mean_iou(input, label, num_classes): + """ + Mean Intersection-Over-Union is a common evaluation metric for + semantic image segmentation, which first computes the IOU for each + semantic class and then computes the average over classes. + IOU is defined as follows: + + .. math:: + + IOU = true_positive / (true_positive + false_positive + false_negative). + + The predictions are accumulated in a confusion matrix and mean-IOU + is then calculated from it. + + + Args: + input (Variable): A Tensor of prediction results for semantic labels with type int32 or int64. + label (Variable): A Tensor of ground truth labels with type int32 or int64. + Its shape should be the same as input. + + Returns: + mean_iou (Variable): A Tensor representing the mean intersection-over-union with shape [1]. + out_wrong(Variable): A Tensor with shape [num_classes]. The wrong numbers of each class. + out_correct(Variable): A Tensor with shape [num_classes]. The correct numbers of each class. + + + Examples: + + .. code-block:: python + + iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes) + """ + helper = LayerHelper('mean_iou', **locals()) + dtype = helper.input_dtype() + out_mean_iou = helper.create_tmp_variable(dtype='float32') + out_wrong = helper.create_tmp_variable(dtype='int32') + out_correct = helper.create_tmp_variable(dtype='int32') + helper.append_op( + type="mean_iou", + inputs={"predictions": input, + "labels": label}, + outputs={ + "out_mean_iou": out_mean_iou, + "out_wrong": out_wrong, + "out_correct": out_correct + }, + attrs={"num_classes": num_classes}) + return out_mean_iou, out_wrong, out_correct From a6e69d3cc12c922b9cfc13b653fa845050390e68 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 14 Jun 2018 20:40:02 +0800 Subject: [PATCH 23/56] Add doc for 'batch' --- python/paddle/fluid/layers/io.py | 45 ++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 6d6cdffe27..e22257ea34 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -549,6 +549,41 @@ def shuffle(reader, buffer_size): def batch(reader, batch_size): + """ + This layer is a reader decorator. It takes a reader and adds + 'batching' decoration on it. When reading with the result + decorated reader, output data will be automatically organized + to the form of batches. + + Args: + reader(Variable): The reader to be decorated with 'batching'. + batch_size(int): The batch size. + + Returns: + Variable: The reader which has been decorated with 'batching'. + + Examples: + .. code-block:: python + + raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio', + './data2.recordio'], + shapes=[(3,224,224), (1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + thread_num=2, + buffer_size=2) + batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5) + + # If we read data with the raw_reader: + # data = fluid.layers.read_file(raw_reader) + # We can only get data instance by instance. + # + # However, if we read data with the batch_reader: + # data = fluid.layers.read_file(batch_reader) + # Each 5 adjacent instances will be automatically combined together + # to become a batch. So what we get('data') is a batch data instead + # of an instance. + """ return __create_unshared_decorated_reader__( 'create_batch_reader', reader, {'batch_size': int(batch_size)}) @@ -571,20 +606,20 @@ def parallel(reader): {}) -def read_file(file_obj): +def read_file(reader): """ - Read data from a file object. + Execute the given reader and get data via it. - A file object is also a Variable. It can be a raw file object generated by + A reader is also a Variable. It can be a raw reader generated by `fluid.layers.open_files()` or a decorated one generated by `fluid.layers.double_buffer()` and so on. Args: - file_obj(Variable): The file object from where to read data. + reader(Variable): The reader to execute. Returns: - Tuple[Variable]: Data read from the given file object. + Tuple[Variable]: Data read via the given reader. Examples: .. code-block:: python From 16a3d88a20b4c8e029efb837e6523ac651722003 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 06:25:33 -0700 Subject: [PATCH 24/56] fix typo --- paddle/fluid/operators/clip_by_norm_op.cc | 11 ++++++++++- python/paddle/fluid/layers/control_flow.py | 1 + python/paddle/fluid/layers/nn.py | 6 ++---- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/clip_by_norm_op.cc b/paddle/fluid/operators/clip_by_norm_op.cc index c87bded034..eae86a373b 100644 --- a/paddle/fluid/operators/clip_by_norm_op.cc +++ b/paddle/fluid/operators/clip_by_norm_op.cc @@ -54,10 +54,19 @@ be linearly scaled to make the L2 norm of $Out$ equal to $max\_norm$, as shown in the following formula: $$ -Out = \frac{max\_norm * X}{norm(X)}, +Out = \\frac{max\\_norm * X}{norm(X)}, $$ where $norm(X)$ represents the L2 norm of $X$. + +Examples: + .. code-block:: python + + data = fluid.layer.data( + name='data', shape=[2, 4, 6], dtype='float32') + reshaped = fluid.layers.clip_by_norm( + x=data, max_norm=0.5) + )DOC"); } }; diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 80e8ff484a..be4dd41577 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -866,6 +866,7 @@ def array_write(x, i, array=None): Variable: The output LOD_TENSOR_ARRAY where the input tensor is written. Examples: + .. code-block::python tmp = fluid.layers.zeros(shape=[10], dtype='int32') diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 2c1f988828..2c7e04c1e6 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3159,8 +3159,6 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None): Examples: - As an example: - .. code-block:: text Given: @@ -3204,7 +3202,7 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None): output.lod = [[0, 4, 8]] - The simple usage is: + Examples: .. code-block:: python @@ -3738,7 +3736,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): Output(i, x, y) = Input(i, x, y) / \left( k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} - (Input(j, x, y))^2 \right)^{\beta} + (Input(j, x, y))^2\right)^{\beta} In the above equation: From d91060d300edf3c908f25b741adc999b065887da Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 15 Jun 2018 11:38:31 +0800 Subject: [PATCH 25/56] fix errors --- paddle/fluid/operators/activation_op.cc | 2 +- paddle/fluid/operators/pool_op.cc | 8 ++++---- python/paddle/fluid/layers/nn.py | 6 +++--- python/paddle/fluid/layers/tensor.py | 3 ++- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index af1d85047e..790c012fdb 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -444,7 +444,7 @@ class SwishOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Swish Activation Operator. -$$out = \frac{x}{1 + e^{- \beta x}}$$ +$$out = \\frac{x}{1 + e^{- \beta x}}$$ )DOC"); } diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index d94ddc7a53..f8ad63690e 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -224,17 +224,17 @@ Example: For ceil_mode = false: $$ - H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 + H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 $$ $$ - W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 + W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 $$ For ceil_mode = true: $$ - H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 + H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 $$ $$ - W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 + W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 $$ )DOC"); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1218766e8d..b073955e2f 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1495,9 +1495,9 @@ def pool2d(input, Args: input (Variable): The input tensor of pooling operator. The format of - input tensor is NCHW, where N is batch size, C is the number of - channels, H is the height of the feature, and W is the width of - the feature. + input tensor is NCHW, where N is batch size, C is + the number of channels, H is the height of the + feature, and W is the width of the feature. pool_size (int): The side length of pooling windows. All pooling windows are squares with pool_size on a side. pool_type: ${pooling_type_comment} diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 392fa6a422..81f42ff470 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -146,7 +146,8 @@ def concat(input, axis=0, name=None): Examples: .. code-block:: python - out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) + + out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) """ helper = LayerHelper('concat', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) From 3380737cb7a23f5670e5fe9a7b3d4de193b7a1e0 Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Fri, 15 Jun 2018 12:18:17 +0800 Subject: [PATCH 26/56] update by comment --- paddle/fluid/operators/chunk_eval_op.cc | 67 +++++++------ paddle/fluid/operators/cos_sim_op.cc | 4 +- .../operators/detection/iou_similarity_op.cc | 4 +- paddle/fluid/operators/roi_pool_op.cc | 2 +- paddle/fluid/operators/scale_op.cc | 7 +- python/paddle/fluid/layers/io.py | 2 + python/paddle/fluid/layers/nn.py | 94 ++++++++++++++++++- 7 files changed, 133 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc index 62636bb2f9..dc43c69be0 100644 --- a/paddle/fluid/operators/chunk_eval_op.cc +++ b/paddle/fluid/operators/chunk_eval_op.cc @@ -91,32 +91,31 @@ class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { "(int64_t). The number of chunks both in Inference and Label on the " "given mini-batch."); AddAttr("num_chunk_types", - "(int). The number of chunk type. See below for details."); - AddAttr( - "chunk_scheme", - "(string, default IOB). The labeling scheme indicating " - "how to encode the chunks. Must be IOB, IOE, IOBES or plain. See below " - "for details.") + "The number of chunk type. See the description for details."); + AddAttr("chunk_scheme", + "The labeling scheme indicating " + "how to encode the chunks. Must be IOB, IOE, IOBES or " + "plain. See the description" + "for details.") .SetDefault("IOB"); AddAttr>("excluded_chunk_types", - "(list) A list including chunk type ids " + "A list including chunk type ids " "indicating chunk types that are not counted. " - "See below for details.") + "See the description for details.") .SetDefault(std::vector{}); AddComment(R"DOC( For some basics of chunking, please refer to -‘Chunking with Support Vector Machines ’. +'Chunking with Support Vector Machines '. - -CheckEvalOp computes the precision, recall, and F1-score of chunk detection, +ChunkEvalOp computes the precision, recall, and F1-score of chunk detection, and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes. Here is a NER example of labeling for these tagging schemes: - - Li Ming works at Agricultural Bank of China in Beijing. - IO: I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC - IOB: B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC - IOE: I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC - IOBES: B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC + + Li Ming works at Agricultural Bank of China in Beijing. + IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC + IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC + IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC + IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC There are three chunk types(named entity types) including PER(person), ORG(organization) and LOC(LOCATION), and we can see that the labels have the form -. @@ -124,31 +123,31 @@ and LOC(LOCATION), and we can see that the labels have the form -("scale", - "(float, default 1.0)" - "The scaling factor of the scale operator.") + AddAttr("scale", "The scaling factor of the scale operator.") .SetDefault(1.0); } }; diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 76ef82ddb0..15be646f47 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -109,6 +109,8 @@ class BlockGuardServ(BlockGuard): class ListenAndServ(object): """ + ***ListenAndServ Layer*** + ListenAndServ is used to create a rpc server bind and listen on specific TCP port, this server will run the sub-block when received variables from clients. diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 70b7aba27d..8e4780837b 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -825,6 +825,12 @@ def crf_decoding(input, param_attr, label=None): Returns: Variable: ${viterbi_path_comment} + + Examples: + .. code-block:: python + + crf_decode = layers.crf_decoding( + input=hidden, param_attr=ParamAttr(name="crfw")) """ helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) @@ -1043,9 +1049,70 @@ def chunk_eval(input, num_chunk_types, excluded_chunk_types=None): """ + ***Chunk Evaluator*** + This function computes and outputs the precision, recall and F1-score of chunk detection. + For some basics of chunking, please refer to + 'Chunking with Support Vector Machines '. + + ChunkEvalOp computes the precision, recall, and F1-score of chunk detection, + and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes. + Here is a NER example of labeling for these tagging schemes: + + .. code-block:: python + + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + Li Ming works at Agricultural Bank of China in Beijing. + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC + IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC + IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC + IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + + There are three chunk types(named entity types) including PER(person), ORG(organization) + and LOC(LOCATION), and we can see that the labels have the form -. + + Since the calculations actually use label ids rather than labels, extra attention + should be paid when mapping labels to ids to make CheckEvalOp work. The key point + is that the listed equations are satisfied by ids. + + .. code-block:: python + + tag_type = label % num_tag_type + chunk_type = label / num_tag_type + + where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type` + is the num of chunk types, and `tag_type` get its value from the following table. + + .. code-block:: python + + Scheme Begin Inside End Single + plain 0 - - - + IOB 0 1 - - + IOE - 0 1 - + IOBES 0 1 2 3 + + Still use NER as example, assuming the tagging scheme is IOB while chunk types are ORG, + PER and LOC. To satisfy the above equations, the label map can be like this: + + .. code-block:: python + + B-ORG 0 + I-ORG 1 + B-PER 2 + I-PER 3 + B-LOC 4 + I-LOC 5 + O 6 + + It's not hard to verify the equations noting that the num of chunk types + is 3 and the num of tag types in IOB scheme is 2. For example, the label + id of I-LOC is 5, the tag type id of I-LOC is 1, and the chunk type id of + I-LOC is 2, which consistent with the results from the equations. + Args: input (Variable): prediction output of the network. label (Variable): label of the test data set. @@ -1057,6 +1124,19 @@ def chunk_eval(input, tuple: tuple containing: precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks + + Examples: + .. code-block:: python + + crf = fluid.layers.linear_chain_crf( + input=hidden, label=label, param_attr=ParamAttr(name="crfw")) + crf_decode = fluid.layers.crf_decoding( + input=hidden, param_attr=ParamAttr(name="crfw")) + fluid.layers.chunk_eval( + input=crf_decode, + label=label, + chunk_scheme="IOB", + num_chunk_types=(label_dict_len - 1) / 2) """ helper = LayerHelper("chunk_eval", **locals()) @@ -1803,7 +1883,7 @@ def conv2d_transpose(input, act=None, name=None): """ - **Convlution2D transpose layer** + ***Convlution2D Transpose Layer**** The convolution2D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) @@ -1832,13 +1912,13 @@ def conv2d_transpose(input, - Input: - Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` - Filter shape: $(C_{in}, C_{out}, H_f, W_f)$ + Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)` - Output: - Output shape: $(N, C_{out}, H_{out}, W_{out})$ + Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where @@ -3513,6 +3593,12 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): Returns: Variable: The global run counter. + + Examples: + .. code-block:: python + + global_step = fluid.layers.autoincreased_step_counter( + counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1) """ helper = LayerHelper('global_step_counter') if counter_name is None: From 1f38cbf79b8369fbfc6fa626446b9c98a7314426 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 21:26:51 -0700 Subject: [PATCH 27/56] "fix some typo" --- paddle/fluid/operators/uniform_random_batch_size_like_op.cc | 2 +- python/paddle/fluid/layers/control_flow.py | 3 +-- python/paddle/fluid/layers/nn.py | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc index 78fee77df8..192366fd44 100644 --- a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -35,7 +35,7 @@ class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { protected: void Apply() override { AddComment(R"DOC( -Uniform random operator +UniformRandomBatchSizeLike operator. This operator initializes a tensor with the same batch_size as the Input tensor with random values sampled from a uniform distribution. diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 850945001c..f4e95dd363 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -866,8 +866,7 @@ def array_write(x, i, array=None): Variable: The output LOD_TENSOR_ARRAY where the input tensor is written. Examples: - - .. code-block::python + .. code-block:: python tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b5745e20f1..dc1124a512 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -2935,7 +2935,7 @@ def split(input, num_or_sections, dim=-1, name=None): will be named automatically. Returns: - List: The list of segmented tensor variables. + list(Variable): The list of segmented tensor variables. Examples: .. code-block:: python From 9397a2e0c8128e2de2c8eeb8fa5d446f1a6f2f13 Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Fri, 15 Jun 2018 12:38:36 +0800 Subject: [PATCH 28/56] fix format --- python/paddle/fluid/layers/io.py | 5 +---- python/paddle/fluid/layers/nn.py | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 15be646f47..906364f693 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -109,7 +109,7 @@ class BlockGuardServ(BlockGuard): class ListenAndServ(object): """ - ***ListenAndServ Layer*** + **ListenAndServ Layer** ListenAndServ is used to create a rpc server bind and listen on specific TCP port, this server will run the sub-block when @@ -120,9 +120,6 @@ class ListenAndServ(object): inputs(list): a list of variables that the server will get from clients. fan_in(int): how many client are expected to report to this server, default: 1. optimizer_mode(bool): whether to run the server as a parameter server, default: True. - - Returns: - None Examples: .. code-block:: python diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 8e4780837b..3704e1149a 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1049,7 +1049,7 @@ def chunk_eval(input, num_chunk_types, excluded_chunk_types=None): """ - ***Chunk Evaluator*** + **Chunk Evaluator** This function computes and outputs the precision, recall and F1-score of chunk detection. @@ -1883,7 +1883,7 @@ def conv2d_transpose(input, act=None, name=None): """ - ***Convlution2D Transpose Layer**** + **Convlution2D Transpose Layer** The convolution2D transpose layer calculates the output based on the input, filter, and dilations, strides, paddings. Input(Input) and output(Output) From dbe0fe6d181fb8e747856d93e60ae78216e6734c Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 21:44:17 -0700 Subject: [PATCH 29/56] 'fix typo' --- python/paddle/fluid/layers/nn.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index dc1124a512..3456a1bde8 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -2051,15 +2051,25 @@ def layer_norm(input, def beam_search_decode(ids, scores, name=None): """ + This layers is to pack the output of beam search layer into sentences and + associated scores. It is usually called after the beam search layer. + ${beam_search_decode} Args: ids (Variable): ${ids_comment} scores (Variable): ${scores_comment} name (str): The name of this layer. It is optional. - + Returns: - tuple: a tuple of two output variable: sentence_ids, sentence_scores + tuple(Variable): a tuple of two output variable: sentence_ids, sentence_scores + + Examples: + .. code-block:: python + ids, scores = fluid.layers.beam_search( + pre_ids, ids, scores, beam_size, end_id) + sentence_ids, sentence_scores = fluid.layers.beam_search_decode( + ids, scores) """ helper = LayerHelper('beam_search_decode', **locals()) sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) From dd711c3754ddce6bf1e5ee9b52964870aaa7f944 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 21:55:21 -0700 Subject: [PATCH 30/56] "add beam search" --- python/paddle/fluid/layers/nn.py | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3456a1bde8..bdbb6c47e2 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -834,11 +834,14 @@ def linear_chain_crf(input, label, param_attr=None): Args: input(${emission_type}): ${emission_comment} + input(${transition_type}): ${transition_comment} label(${label_type}): ${label_comment} param_attr(ParamAttr): The attribute of the learnable parameter. Returns: ${log_likelihood_comment} + ${transitionexps_comment} + ${emissionexps_comment} """ helper = LayerHelper('linear_chain_crf', **locals()) @@ -1170,10 +1173,6 @@ def sequence_conv(input, Variable: output of sequence_conv """ - # FIXME(dzh) : want to unify the argument of python layer - # function. So we ignore some unecessary attributes. - # such as, padding_trainable, context_start. - helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() filter_shape = [filter_size * input.shape[1], num_filters] @@ -2051,18 +2050,31 @@ def layer_norm(input, def beam_search_decode(ids, scores, name=None): """ + Beam Search Decode + This layers is to pack the output of beam search layer into sentences and associated scores. It is usually called after the beam search layer. + Typically, the output of beam search layer is a tensor of selected ids, with + a tensor of the score of each id. Beam search layer's output ids, however, + are generated directly during the tree search, and they are stacked by each + level of the search tree. Thus we need to reorganize them into sentences, + based on the score of each id. This layer takes the output of beam search + layer as input and repack them into sentences. ${beam_search_decode} Args: - ids (Variable): ${ids_comment} - scores (Variable): ${scores_comment} + ids (Variable): The selected ids, output of beam search layer. + scores (Variable): The associated scores of the ids, out put of beam + search layer. name (str): The name of this layer. It is optional. Returns: - tuple(Variable): a tuple of two output variable: sentence_ids, sentence_scores + tuple(Variable): a tuple of two output tensors: sentence_ids, sentence_scores. + sentence_ids is a tensor with shape [size, length], where size is the + beam size of beam search, and length is the length of each sentence. + Note that the length of sentences may vary. + sentence_scores is a tensor with the same shape as sentence_ids. Examples: .. code-block:: python From 6e3d168b9d3b6147efa7740023286b866d7d6e4a Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 22:00:59 -0700 Subject: [PATCH 31/56] "fix latex" --- python/paddle/fluid/layers/nn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index bdbb6c47e2..283d3c4b2a 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4210,8 +4210,8 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): .. math:: - Output(i, x, y) = Input(i, x, y) / \left( - k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} + Output(i, x, y) = Input(i, x, y) / \left( \\ + k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} \\ (Input(j, x, y))^2\right)^{\beta} In the above equation: From 541ddf7e24e263abbee3b342a69100b99ec413d7 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 22:05:36 -0700 Subject: [PATCH 32/56] squash to one pr --- python/paddle/fluid/framework.py | 31 ++++++++++++++++++++++++++++ python/paddle/fluid/layers/metric.py | 26 ++++++++++++++++++++++- 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index f6438c82ac..595f67961f 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1034,6 +1034,37 @@ class Block(object): class Program(object): + """ + Python Program. Beneath it is a ProgramDesc, which is used for + create c++ Program. A program is a self-contained programing + language like container. It has at least one Block, when the + control flow op like conditional_block, while_op is included, + it will contains nested block. + Please reference the framework.proto for details. + + Notes: we have default_startup_program and default_main_program + by default, a pair of them will shared the parameters. + The default_startup_program only run once to initialize parameters, + default_main_program run in every minibatch and adjust the weights. + + Args: + None + + Returns: + Python Program + + Examples: + .. code-block:: python + + main_program = Program() + startup_program = Program() + with fluid.program_guard(main_program=main_program, startup_program=startup_program): + fluid.layers.data(name="x", shape=[-1, 784], dtype='float32') + fluid.layers.data(name="y", shape=[-1, 1], dtype='int32') + fluid.layers.fc(name="fc", shape=[10], dtype='float32', act="relu") + + """ + def __init__(self): self.desc = core.ProgramDesc() self.blocks = [Block(self, 0)] diff --git a/python/paddle/fluid/layers/metric.py b/python/paddle/fluid/layers/metric.py index a1c64ce277..069c060da4 100644 --- a/python/paddle/fluid/layers/metric.py +++ b/python/paddle/fluid/layers/metric.py @@ -27,8 +27,32 @@ __all__ = ['accuracy', 'auc'] def accuracy(input, label, k=1, correct=None, total=None): """ + accuracy layer. + Refer to the https://en.wikipedia.org/wiki/Precision_and_recall + This function computes the accuracy using the input and label. - The output is the top k inputs and their indices. + If the correct label occurs in top k predictions, then correct will increment by one. + Note: the dtype of accuracy is determined by input. the input and label dtype can be different. + + Args: + input(Variable): The input of accuracy layer, which is the predictions of network. + Carry LoD information is supported. + label(Variable): The label of dataset. + k(int): The top k predictions for each class will be checked. + correct(Variable): The correct predictions count. + total(Variable): The total entries count. + + Returns: + Variable: The correct rate. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[-1, 32, 32], dtype="float32") + label = fluid.layers.data(name="data", shape=[-1,1], dtype="int32") + predict = fluid.layers.fc(input=data, size=10) + acc = fluid.layers.accuracy(input=predict, label=label, k=5) + """ helper = LayerHelper("accuracy", **locals()) topk_out, topk_indices = nn.topk(input, k=k) From 9de779f1cfae9daba1b38b8df829cb0e56247592 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 15 Jun 2018 13:18:33 +0800 Subject: [PATCH 33/56] update switch class --- python/paddle/fluid/layers/control_flow.py | 25 ++++++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 209a767e73..2bc43c5ce9 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1156,16 +1156,14 @@ class ConditionalBlock(object): class Switch(object): """ - **Switch Class** - - Many programming languages provide `switch` as a generalization of `if-elif-else`. - Switch class works just like a `if-elif-else`. + Switch class works just like a `if-elif-else`. Can be used in learning rate scheduler + to modify learning rate The Semantics: 1. A `switch` control-flow checks cases one-by-one. - 2. The condition of each case is a boolean value, which is a scalar. + 2. The condition of each case is a boolean value, which is a scalar Variable. 3. It runs the first matched case, or the default case if there is one. @@ -1174,9 +1172,22 @@ class Switch(object): Examples: .. code-block:: python - with fluid.control_flow.Switch() as switch: + lr = fluid.layers.tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + one_var = tensor.fill_constant( + shape=[1], dtype='float32', value=1.0) + two_var = tensor.fill_constant( + shape=[1], dtype='float32', value=2.0) + + with fluid.layers.control_flow.Switch() as switch: with switch.case(global_step == zero_var): - fluid.tensor.assign(input=one_var, output=div_res) + fluid.layers.tensor.assign(input=one_var, output=lr) + with switch.default(): + fluid.layers.tensor.assign(input=two_var, output=lr) """ From e2783bb6afeb4e5b4160ff4283c18672f2f0632e Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 15 Jun 2018 14:22:21 +0800 Subject: [PATCH 34/56] update split_lod_tensor doc --- python/paddle/fluid/layers/control_flow.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 2bc43c5ce9..e261e3f63a 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -69,8 +69,10 @@ def split_lod_tensor(input, mask, level=0): level(int): The specific lod level to split. Returns: - Variable: The true branch of tensor as per the mask applied to input. - Variable: The false branch of tensor as per the mask applied to input. + tuple(Variable, Variable): + The true branch of tensor as per the mask applied to input. + + The false branch of tensor as per the mask applied to input. Examples: .. code-block:: python From f2c9c33f158890e0cfba827db3a328c317962d4c Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 23:24:32 -0700 Subject: [PATCH 35/56] "fix lrn" --- python/paddle/fluid/layers/nn.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 283d3c4b2a..2d33742e73 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -839,9 +839,9 @@ def linear_chain_crf(input, label, param_attr=None): param_attr(ParamAttr): The attribute of the learnable parameter. Returns: - ${log_likelihood_comment} - ${transitionexps_comment} - ${emissionexps_comment} + output(${emission_exps_type}): ${emission_exps_comment} \n + output(${transition_exps_type}): ${transition_exps_comment} \n + output(${log_likelihood_type}): ${log_likelihood_comment} """ helper = LayerHelper('linear_chain_crf', **locals()) @@ -4210,7 +4210,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): .. math:: - Output(i, x, y) = Input(i, x, y) / \left( \\ + Output(i, x, y) = Input(i, x, y) / \left( \\ k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} \\ (Input(j, x, y))^2\right)^{\beta} From 0d86f13ce2455a1ae4f24d4e5550d6256530122f Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Jun 2018 23:43:18 -0700 Subject: [PATCH 36/56] "fix math" --- python/paddle/fluid/layers/nn.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 2d33742e73..8c8d6328eb 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4210,9 +4210,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): .. math:: - Output(i, x, y) = Input(i, x, y) / \left( \\ - k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} \\ - (Input(j, x, y))^2\right)^{\beta} + Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C, c + n/2)}_{j = \\max(0, c - n/2)}(Input(j, x, y))^2\\right)^{\\beta} In the above equation: From 1c9fc655d0b4745f74940f99acc8421faf8656f5 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 15 Jun 2018 15:16:14 +0800 Subject: [PATCH 37/56] update --- python/paddle/fluid/layers/detection.py | 73 ++++++++++--------- .../fluid/layers/learning_rate_scheduler.py | 12 ++- python/paddle/fluid/layers/tensor.py | 8 +- 3 files changed, 48 insertions(+), 45 deletions(-) diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index edf528a595..dacb31f8b6 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -603,7 +603,7 @@ def prior_box(input, offset=0.5, name=None): """ - **Prior box operator** + **Prior Box Operator** Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. Each position of the input produce N prior boxes, N is determined by @@ -632,26 +632,30 @@ def prior_box(input, name(str): Name of the prior box op. Default: None. Returns: - boxes(Variable): the output prior boxes of PriorBox. - The layout is [H, W, num_priors, 4]. - H is the height of input, W is the width of input, - num_priors is the total - box count of each position of input. - Variances(Variable): the expanded variances of PriorBox. - The layout is [H, W, num_priors, 4]. - H is the height of input, W is the width of input - num_priors is the total - box count of each position of input + tuple: A tuple with two Variable (boxes, variances) + + boxes: the output prior boxes of PriorBox. + The layout is [H, W, num_priors, 4]. + H is the height of input, W is the width of input, + num_priors is the total + box count of each position of input. + + variances: the expanded variances of PriorBox. + The layout is [H, W, num_priors, 4]. + H is the height of input, W is the width of input + num_priors is the total + box count of each position of input Examples: .. code-block:: python - box, var = prior_box( - input=conv1, - image=images, - min_sizes=[100.], - flip=True, - clip=True) + + box, var = fluid.layers.prior_box( + input=conv1, + image=images, + min_sizes=[100.], + flip=True, + clip=True) """ helper = LayerHelper("prior_box", **locals()) dtype = helper.input_dtype() @@ -721,11 +725,9 @@ def multi_box_head(inputs, stride=1, name=None): """ - **Prior_boxes** - Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. The details of this algorithm, please refer the - section 2.2 of SSD paper (SSD: Single Shot MultiBox Detector) + section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector `_ . Args: @@ -766,24 +768,27 @@ def multi_box_head(inputs, name(str): Name of the prior box layer. Default: None. Returns: - mbox_loc(Variable): The predicted boxes' location of the inputs. - The layout is [N, H*W*Priors, 4]. where Priors - is the number of predicted boxes each position of each input. - mbox_conf(Variable): The predicted boxes' confidence of the inputs. - The layout is [N, H*W*Priors, C]. where Priors - is the number of predicted boxes each position of each input - and C is the number of Classes. - boxes(Variable): the output prior boxes of PriorBox. - The layout is [num_priors, 4]. num_priors is the total - box count of each position of inputs. - Variances(Variable): the expanded variances of PriorBox. - The layout is [num_priors, 4]. num_priors is the total - box count of each position of inputs + tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances) + + mbox_loc: The predicted boxes' location of the inputs. The layout + is [N, H*W*Priors, 4]. where Priors is the number of predicted + boxes each position of each input. + + mbox_conf: The predicted boxes' confidence of the inputs. The layout + is [N, H*W*Priors, C]. where Priors is the number of predicted boxes + each position of each input and C is the number of Classes. + + boxes: the output prior boxes of PriorBox. The layout is [num_priors, 4]. + num_priors is the total box count of each position of inputs. + + variances: the expanded variances of PriorBox. The layout is + [num_priors, 4]. num_priors is the total box count of each position of inputs Examples: .. code-block:: python - mbox_locs, mbox_confs, box, var = layers.multi_box_head( + + mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], image=images, num_classes=21, diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 2dbc51c23f..e76f15d838 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -163,8 +163,6 @@ def polynomial_decay(learning_rate, power=1.0, cycle=False): """ - **Polynomial Decay** - Applies polynomial decay to the initial learning rate. .. code-block:: python @@ -178,14 +176,14 @@ def polynomial_decay(learning_rate, Args: learning_rate(Variable|float32): A scalar float32 value or a Variable. This - will be the initial learning rate during training + will be the initial learning rate during training. decay_steps(int32): A Python `int32` number. - end_learning_rate(float, Default: 0.0001): A Python `float` number. - power(float, Default: 1.0): A Python `float` number - cycle(bool, Default: False): Boolean. If set true, decay the learning rate every decay_steps. + end_learning_rate(float): A Python `float` number. + power(float): A Python `float` number. + cycle(bool): If set true, decay the learning rate every decay_steps. Returns: - The decayed learning rate + Variable: The decayed learning rate """ global_step = _decay_step_counter() diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 25505e4427..978f7dde29 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -40,14 +40,14 @@ __all__ = [ def create_tensor(dtype, name=None, persistable=False): """ - **Create a Tensor** + Create an variable, which will hold a LoDTensor with data type dtype. Args: - dtype (string): 'float32'|'int32'|..., the data type of the + dtype(string): 'float32'|'int32'|..., the data type of the created tensor. - name (string, Default: None): The name of the created tensor, if not set, + name(string): The name of the created tensor, if not set, the name will be a random unique one. - persistable (bool, Default: False): Set the persistable flag of the create tensor. + persistable(bool): Set the persistable flag of the create tensor. Returns: Variable: The tensor variable storing the created tensor. From 6ac8383f28a572cbfcd6d2ec87a87174faa26a12 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Fri, 15 Jun 2018 00:28:02 -0700 Subject: [PATCH 38/56] "fix based comments" --- .../operators/uniform_random_batch_size_like_op.cc | 2 +- python/paddle/fluid/framework.py | 10 ++++++++-- python/paddle/fluid/layers/control_flow.py | 3 +-- python/paddle/fluid/layers/nn.py | 2 -- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc index 192366fd44..75d6181749 100644 --- a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -38,7 +38,7 @@ class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { UniformRandomBatchSizeLike operator. This operator initializes a tensor with the same batch_size as the Input tensor - with random values sampled from a uniform distribution. +with random values sampled from a uniform distribution. )DOC"); AddAttr("min", diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 595f67961f..df0625649d 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1130,6 +1130,8 @@ class Program(object): def clone(self, for_test=False): """Clone the Program object + Args: + for_test(bool): indicate whether clone for test. Set for_test to False when we want to clone the program for training. Set for_test to True when we want to clone the program for testing. @@ -1140,8 +1142,9 @@ class Program(object): the is_test attributes in these operators will be set to True for testing purposes, otherwise, they remain unchanged. - Returns(Program): - The cloned Program object. + Returns: + Program: The cloned Program object. + """ if for_test: p = self.inference_optimize() @@ -1259,6 +1262,7 @@ class Program(object): def copy_param_info_from(self, other): """ Copy the information of parameters from other program. + Args: other(Program): Other program @@ -1277,6 +1281,7 @@ class Program(object): def copy_data_info_from(self, other): """ Copy the information of data variables from other program. + Args: other(Program): Other program @@ -1330,6 +1335,7 @@ class Parameter(Variable): def to_string(self, throw_on_error, with_details=False): """ To debug string. + Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index f4e95dd363..df2f9dedab 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -823,8 +823,7 @@ def increment(x, value=1.0, in_place=True): in_place (bool): If the increment should be performed in-place. Returns: - Variable: The tensor variable storing the transformation of - element-wise increment of each value in the input. + Variable: The elementwise-incremented object. Examples: .. code-block:: python diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 8c8d6328eb..4fd353171a 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -2061,8 +2061,6 @@ def beam_search_decode(ids, scores, name=None): based on the score of each id. This layer takes the output of beam search layer as input and repack them into sentences. - ${beam_search_decode} - Args: ids (Variable): The selected ids, output of beam search layer. scores (Variable): The associated scores of the ids, out put of beam From b00fbd962c0bff30307bcc833761e6a0e2ca075a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 15 Jun 2018 15:30:04 +0800 Subject: [PATCH 39/56] fix error --- python/paddle/fluid/layers/io.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index e397515ff6..b004312d2d 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -637,10 +637,10 @@ def read_file(reader): out = [ helper.create_tmp_variable( stop_gradient=True, dtype='float32') - for _ in range(len(file_obj.desc.shapes())) + for _ in range(len(reader.desc.shapes())) ] helper.append_op( - type='read', inputs={'Reader': [file_obj]}, outputs={'Out': out}) + type='read', inputs={'Reader': [reader]}, outputs={'Out': out}) if len(out) == 1: return out[0] else: From 7ad46ec03ccc1961bb1a8b22e178ce5406cfbee2 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Fri, 15 Jun 2018 00:36:19 -0700 Subject: [PATCH 40/56] "show example" --- python/paddle/fluid/layers/nn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 4fd353171a..b49560a526 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -2076,6 +2076,7 @@ def beam_search_decode(ids, scores, name=None): Examples: .. code-block:: python + ids, scores = fluid.layers.beam_search( pre_ids, ids, scores, beam_size, end_id) sentence_ids, sentence_scores = fluid.layers.beam_search_decode( From 7b82353010976533ad10df80637fd88b4d26c627 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 15 Jun 2018 15:06:57 +0800 Subject: [PATCH 41/56] fix conv3d/conv3d_trans/slice/mean_iou doc --- paddle/fluid/operators/slice_op.cc | 37 ++++++++------- python/paddle/fluid/layers/nn.py | 72 +++++++++++++++--------------- 2 files changed, 56 insertions(+), 53 deletions(-) diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 61bb445e8b..4bd23d5941 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -95,23 +95,26 @@ of that dimension. If the value passed to start or end is larger than the n (the number of elements in this dimension), it represents n. For slicing to the end of a dimension with unknown size, it is recommended to pass in INT_MAX. If axes are omitted, they are set to [0, ..., ndim-1]. - - Example 1: - Given: - data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] - axes = [0, 1] - starts = [1, 0] - ends = [2, 3] - Then: - result = [ [5, 6, 7], ] - - Example 2: - Given: - data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] - starts = [0, 1] - ends = [-1, 1000] - Then: - result = [ [2, 3, 4], ] +Following examples will explain how slice works: + + .. code-block:: text + + Cast1: + Given: + data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] + axes = [0, 1] + starts = [1, 0] + ends = [2, 3] + Then: + result = [ [5, 6, 7], ] + + Cast2: + Given: + data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] + starts = [0, 1] + ends = [-1, 1000] + Then: + result = [ [2, 3, 4], ] )DOC"); } }; diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 5e162a4ae0..d1985efc58 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1326,10 +1326,8 @@ def conv2d(input, Examples: .. code-block:: python - data = fluid.layers.data( - name='data', shape=[3, 32, 32], dtype='float32') - conv2d = fluid.layers.conv2d( - input=data, num_filters=2, filter_size=3, act="relu") + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") """ num_channels = input.shape[1] @@ -1431,8 +1429,7 @@ def conv3d(input, * :math:`\\ast`: Convolution operation. * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. * :math:`\\sigma`: Activation function. - * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be - different. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: @@ -1494,10 +1491,8 @@ def conv3d(input, Examples: .. code-block:: python - data = fluid.layers.data( - name='data', shape=[3, 12, 32, 32], dtype='float32') - conv2d = fluid.layers.conv3d( - input=data, num_filters=2, filter_size=3, act="relu") + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu") """ l_type = 'conv3d' @@ -2105,32 +2100,36 @@ def conv2d_transpose(input, represent height and width, respectively. The details of convolution transpose layer, please refer to the following explanation and references `therein `_. + If bias attribution and activation type are provided, bias is added to + the output of the convolution, and the corresponding activation function + is applied to the final result. For each input :math:`X`, the equation is: .. math:: - Out = W \\ast X + Out = \sigma (W \\ast X + b) - In the above equation: + Where: * :math:`X`: Input value, a tensor with NCHW format. * :math:`W`: Filter value, a tensor with MCHW format. - * :math:`\\ast` : Convolution transpose operation. - * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be - different. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: - Input: - Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` - Filter shape: $(C_{in}, C_{out}, H_f, W_f)$ + Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)` - Output: - Output shape: $(N, C_{out}, H_{out}, W_{out})$ + Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where @@ -2184,10 +2183,8 @@ def conv2d_transpose(input, Examples: .. code-block:: python - data = fluid.layers.data( - name='data', shape=[3, 32, 32], dtype='float32') - conv2d_transpose = fluid.layers.conv2d_transpose( - input=data, num_filters=2, filter_size=3) + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3) """ helper = LayerHelper("conv2d_transpose", **locals()) if not isinstance(input, Variable): @@ -2267,32 +2264,36 @@ def conv3d_transpose(input, two elements. These two elements represent height and width, respectively. The details of convolution transpose layer, please refer to the following explanation and references `therein `_. + If bias attribution and activation type are provided, bias is added to + the output of the convolution, and the corresponding activation function + is applied to the final result. For each input :math:`X`, the equation is: .. math:: - Out = W \\ast X + Out = \sigma (W \\ast X + b) In the above equation: * :math:`X`: Input value, a tensor with NCDHW format. * :math:`W`: Filter value, a tensor with MCDHW format. - * :math:`\\ast` : Convolution transpose operation. - * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be - different. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: - Input: - Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$ + Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` - Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$ + Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)` - Output: - Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$ + Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` Where @@ -2347,10 +2348,8 @@ def conv3d_transpose(input, Examples: .. code-block:: python - data = fluid.layers.data( - name='data', shape=[3, 12, 32, 32], dtype='float32') - conv2d_transpose = fluid.layers.conv3d_transpose( - input=data, num_filters=2, filter_size=3) + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3) """ l_type = "conv3d_transpose" helper = LayerHelper(l_type, **locals()) @@ -4680,8 +4679,8 @@ def mean_iou(input, label, num_classes): IOU is defined as follows: .. math:: - - IOU = true_positive / (true_positive + false_positive + false_negative). + + IOU = \\frac{true\_positiv}{(true\_positive + false\_positive + false\_negative)}. The predictions are accumulated in a confusion matrix and mean-IOU is then calculated from it. @@ -4689,8 +4688,9 @@ def mean_iou(input, label, num_classes): Args: input (Variable): A Tensor of prediction results for semantic labels with type int32 or int64. - label (Variable): A Tensor of ground truth labels with type int32 or int64. + label (Variable): A Tensor of ground truth labels with type int32 or int64. Its shape should be the same as input. + num_classes (int): The possible number of labels. Returns: mean_iou (Variable): A Tensor representing the mean intersection-over-union with shape [1]. From 6ace04f655be6ea7898b5cbe61dfbdb1e16b7806 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 15 Jun 2018 16:00:07 +0800 Subject: [PATCH 42/56] update --- paddle/fluid/operators/activation_op.cc | 2 +- python/paddle/fluid/layers/nn.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index c73482eb12..8743c9500a 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -133,7 +133,7 @@ $out = \max(x, 0)$ __attribute__((unused)) constexpr char TanhDoc[] = R"DOC( Tanh Activation Operator. -$$out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ +$$out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ )DOC"; diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index c6c8c7c2d1..485470f281 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4475,6 +4475,7 @@ def image_resize(input, and the resizing only applies on the last two dimensions(hight and width). Supporting resample methods: + 'BILINEAR' : Bilinear interpolation Args: @@ -4494,8 +4495,8 @@ def image_resize(input, Default: 'BILINEAR' Returns: - out (Variable): The output is a 4-D tensor of the shape - (num_batches, channls, out_h, out_w). + Variable: The output is a 4-D tensor of the shape + (num_batches, channls, out_h, out_w). Examples: .. code-block:: python @@ -4579,7 +4580,7 @@ def image_resize_short(input, out_short_len, resample='BILINEAR'): resample (str): resample method, default: BILINEAR. Returns: - out (Variable): The output is a 4-D tensor of the shape + Variable: The output is a 4-D tensor of the shape (num_batches, channls, out_h, out_w). """ in_shape = input.shape From f24dec713663fd1fd5b678e90e0e7779dbd1bde0 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Fri, 15 Jun 2018 16:06:51 +0800 Subject: [PATCH 43/56] Change 'layers' to 'fluid.layers' --- python/paddle/fluid/layers/control_flow.py | 2 +- python/paddle/fluid/layers/nn.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 8cd389910c..b211bc8377 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1004,7 +1004,7 @@ def array_read(array, i): tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) - arr = layers.array_read(tmp, i=i) + arr = fluid.layers.array_read(tmp, i=i) """ helper = LayerHelper('array_read', **locals()) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index eba22f5962..06fb3504a5 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -2916,9 +2916,9 @@ def warpctc(input, label, blank=0, norm_by_times=False): .. code-block:: python - label = layers.data(shape=[11, 8], dtype='float32', lod_level=1) - predict = layers.data(shape=[11, 1], dtype='float32') - cost = layers.warpctc(input=predict, label=label) + label = fluid.layers.data(shape=[11, 8], dtype='float32', lod_level=1) + predict = fluid.layers.data(shape=[11, 1], dtype='float32') + cost = fluid.layers.warpctc(input=predict, label=label) """ helper = LayerHelper('warpctc', **locals()) @@ -2982,7 +2982,7 @@ def sequence_reshape(input, new_dim): .. code-block:: python x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1) - x_reshaped = layers.sequence_reshape(input=x, new_dim=10) + x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10) """ helper = LayerHelper('sequence_reshape', **locals()) out = helper.create_tmp_variable(helper.input_dtype()) From 8f59d79d751e3174f0a6f98783fa1dbdbc279cc2 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 15 Jun 2018 16:35:53 +0800 Subject: [PATCH 44/56] update doc for sigmoid_cross_entropy_with_logits --- .../fluid/operators/sigmoid_cross_entropy_with_logits_op.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc index 135e2a6f7f..c3b0fe3209 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -113,14 +113,14 @@ The logistic loss is given as follows: $$loss = -Labels * \log(\sigma(X)) - (1 - Labels) * \log(1 - \sigma(X))$$ -We know that $$\sigma(X) = (1 / (1 + \exp(-X)))$$. By substituting this we get: +We know that $$\sigma(X) = \\frac{1}{1 + \exp(-X)}$$. By substituting this we get: $$loss = X - X * Labels + \log(1 + \exp(-X))$$ For stability and to prevent overflow of $$\exp(-X)$$ when X < 0, we reformulate the loss as follows: - $$loss = \max(X, 0) - X * Labels + \log(1 + \exp(-|X|))$$ + $$loss = \max(X, 0) - X * Labels + \log(1 + \exp(-\|X\|))$$ Both the input `X` and `Labels` can carry the LoD (Level of Details) information. However the output only shares the LoD with input `X`. From f6daab438db0570b098963fb7f91dd01110918db Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 15 Jun 2018 17:59:04 +0800 Subject: [PATCH 45/56] fix a bug --- python/paddle/fluid/layers/tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 09c0b2fa79..973059a2ce 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -115,7 +115,7 @@ def create_global_var(shape, """ helper = LayerHelper("global_var", **locals()) var = helper.create_global_variable( - dtype=dtype, shape=shape, persistable=persistable) + dtype=dtype, shape=shape, persistable=persistable, name=name) helper.set_variable_initializer( var, initializer=Constant( value=float(value), force_cpu=force_cpu)) From a4ee0d0dd165cdc79beca3e0904a7adf5bf58d9c Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 16 Jun 2018 08:58:48 +0800 Subject: [PATCH 46/56] add reverse --- python/paddle/fluid/layers/tensor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 4c97ca40d8..18e0fedcc4 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -35,6 +35,7 @@ __all__ = [ 'argmax', 'ones', 'zeros', + 'reverse', ] From 24766a170d8199bdc8b7159ad74d6911ff2fd6cb Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Sun, 17 Jun 2018 11:39:31 +0800 Subject: [PATCH 47/56] Reformat nn.py --- python/paddle/fluid/layers/nn.py | 83 ++++++++++++++++++++++++++------ 1 file changed, 68 insertions(+), 15 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 11af237b82..8afc65d60e 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -25,21 +25,74 @@ import utils import random __all__ = [ - 'fc', 'embedding', 'dynamic_lstm', 'dynamic_lstmp', 'dynamic_gru', - 'gru_unit', 'linear_chain_crf', 'crf_decoding', 'cos_sim', 'cross_entropy', - 'square_error_cost', 'chunk_eval', 'sequence_conv', 'conv2d', 'conv3d', - 'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', 'pool3d', - 'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'conv3d_transpose', - 'sequence_expand', 'lstm_unit', 'reduce_sum', 'reduce_mean', 'reduce_max', - 'reduce_min', 'reduce_prod', 'sequence_first_step', 'sequence_last_step', - 'dropout', 'split', 'ctc_greedy_decoder', 'edit_distance', 'l2_normalize', - 'matmul', 'topk', 'warpctc', 'sequence_reshape', 'transpose', 'im2sequence', - 'nce', 'beam_search', 'row_conv', 'multiplex', 'layer_norm', - 'softmax_with_cross_entropy', 'smooth_l1', 'one_hot', - 'autoincreased_step_counter', 'reshape', 'lod_reset', 'lrn', 'pad', - 'label_smooth', 'roi_pool', 'dice_loss', 'image_resize', - 'image_resize_short', 'resize_bilinear', 'gather', 'random_crop', - 'mean_iou', 'relu', 'log' + 'fc', + 'embedding', + 'dynamic_lstm', + 'dynamic_lstmp', + 'dynamic_gru', + 'gru_unit', + 'linear_chain_crf', + 'crf_decoding', + 'cos_sim', + 'cross_entropy', + 'square_error_cost', + 'chunk_eval', + 'sequence_conv', + 'conv2d', + 'conv3d', + 'sequence_pool', + 'sequence_softmax', + 'softmax', + 'pool2d', + 'pool3d', + 'batch_norm', + 'beam_search_decode', + 'conv2d_transpose', + 'conv3d_transpose', + 'sequence_expand', + 'lstm_unit', + 'reduce_sum', + 'reduce_mean', + 'reduce_max', + 'reduce_min', + 'reduce_prod', + 'sequence_first_step', + 'sequence_last_step', + 'dropout', + 'split', + 'ctc_greedy_decoder', + 'edit_distance', + 'l2_normalize', + 'matmul', + 'topk', + 'warpctc', + 'sequence_reshape', + 'transpose', + 'im2sequence', + 'nce', + 'beam_search', + 'row_conv', + 'multiplex', + 'layer_norm', + 'softmax_with_cross_entropy', + 'smooth_l1', + 'one_hot', + 'autoincreased_step_counter', + 'reshape', + 'lod_reset', + 'lrn', + 'pad', + 'label_smooth', + 'roi_pool', + 'dice_loss', + 'image_resize', + 'image_resize_short', + 'resize_bilinear', + 'gather', + 'random_crop', + 'mean_iou', + 'relu', + 'log', ] From 82a4cf19608c7655a6b6394c65a10933f3b64dc0 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 17 Jun 2018 11:44:25 +0800 Subject: [PATCH 48/56] update image_resize_short and shape doc --- paddle/fluid/operators/shape_op.cc | 9 ++++++--- python/paddle/fluid/layers/nn.py | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/shape_op.cc b/paddle/fluid/operators/shape_op.cc index c75fce7959..b44d5f8980 100644 --- a/paddle/fluid/operators/shape_op.cc +++ b/paddle/fluid/operators/shape_op.cc @@ -36,10 +36,13 @@ class ShapeOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("Input", "(Tensor), The input tensor."); - AddOutput("Out", "(Tensor), The shape of input tensor."); + AddOutput("Out", + "(Tensor), The shape of input tensor, the data type of the shape" + " is int64_t, will be on the same device with the input Tensor."); AddComment(R"DOC( -Shape Operator. -Get the shape of input tensor. +Shape Operator + +Get the shape of input tensor. Only support CPU input Tensor now. )DOC"); } }; diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a3b2d2b777..40e72aa488 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4650,7 +4650,7 @@ def image_resize_short(input, out_short_len, resample='BILINEAR'): Returns: Variable: The output is a 4-D tensor of the shape - (num_batches, channls, out_h, out_w). + (num_batches, channls, out_h, out_w). """ in_shape = input.shape if len(in_shape) != 4: From 811f5cca7eda2192b0c758ba1111eafbc035c0ac Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Sun, 17 Jun 2018 11:58:49 +0800 Subject: [PATCH 49/56] Hide StaticRNNMemory --- python/paddle/fluid/layers/control_flow.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index b0534b251b..8ab433e3f0 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -27,7 +27,6 @@ __all__ = [ 'merge_lod_tensor', 'BlockGuard', 'BlockGuardWithCompletion', - 'StaticRNNMemoryLink', 'WhileGuard', 'While', 'Switch', @@ -410,16 +409,17 @@ class StaticRNNMemoryLink(object): """ StaticRNNMemoryLink class. - Args: - init: the initial variable for Memory - init: Variable - pre_mem: the memory variable in previous time step - pre_mem: Variable - mem: the memory variable in current time step - mem: Variable - StaticRNNMemoryLink class is used to create a link between two memory cells of a StaticRNN. + + + NOTE: This is a internal data structure of a very low-level API. + Please use StaticRNN instead. + + Args: + init(Variable): the initial variable for Memory. + pre_mem(Variable): the memory variable in previous time step. + mem(Variable): the memory variable in current time step. """ def __init__(self, init, pre_mem, mem=None): From 962711dc3fc48af12cc317f015fbe00ce801ea64 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Sat, 16 Jun 2018 23:23:29 -0500 Subject: [PATCH 50/56] Add some paddleenforce. (#11516) --- paddle/fluid/memory/detail/system_allocator.cc | 8 +++++--- paddle/fluid/operators/detail/grpc_client.cc | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index d539052916..9b1ab1e228 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -43,14 +43,16 @@ void* CPUAllocator::Alloc(size_t* index, size_t size) { *index = 0; // unlock memory - void* p; + void* p = nullptr; #ifdef PADDLE_WITH_MKLDNN // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp // memory alignment - PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0); + PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0, "Alloc %ld error!", + size); #else - PADDLE_ENFORCE_EQ(posix_memalign(&p, 32ul, size), 0); + PADDLE_ENFORCE_EQ(posix_memalign(&p, 32ul, size), 0, "Alloc %ld error!", + size); #endif PADDLE_ENFORCE(p, "Fail to allocate CPU memory: size = %d .", size); diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index 02ffe3651e..ea004f7cd3 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -245,7 +245,7 @@ void GRPCClient::Proceed() { if (c->status_.ok()) { c->Process(); } else { - LOG(ERROR) << "var: " << c->var_h_.String() + LOG(FATAL) << "var: " << c->var_h_.String() << " grpc error:" << c->status_.error_message(); } delete c; From 7a56705e4ae7229b96046be113af053eefbfaf27 Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Sun, 17 Jun 2018 12:53:54 +0800 Subject: [PATCH 51/56] polish doc --- paddle/fluid/operators/lstm_op.cc | 14 ++++++------- .../fluid/layers/layer_function_generator.py | 21 +++++++------------ 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index 29cec6ae10..3225bf9bb6 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -184,19 +184,17 @@ Long-Short Term Memory (LSTM) Operator. The defalut implementation is diagonal/peephole connection (https://arxiv.org/pdf/1402.1128.pdf), the formula is as follows: -$$ -i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) \\ +$$ i_t = \\sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) $$ -f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) \\ +$$ f_t = \\sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) $$ -\tilde{c_t} = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) \\ +$$ \\tilde{c_t} = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) $$ -o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) \\ +$$ o_t = \\sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) $$ -c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\ +$$ c_t = f_t \\odot c_{t-1} + i_t \\odot \\tilde{c_t} $$ -h_t = o_t \odot act_h(c_t) -$$ +$$ h_t = o_t \\odot act_h(c_t) $$ - W terms denote weight matrices (e.g. $W_{xi}$ is the matrix of weights from the input gate to the input), $W_{ic}, W_{fc}, W_{oc}$ diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 7a95afa9a6..3096389101 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -49,6 +49,13 @@ _single_dollar_pattern_ = re.compile(r"\$([^\$]+)\$") _two_bang_pattern_ = re.compile(r"!!([^!]+)!!") +def escape_math(text): + return _two_bang_pattern_.sub( + r'$$\1$$', + _single_dollar_pattern_.sub(r':math:`\1`', + _two_dollar_pattern_.sub(r"!!\1!!", text))) + + def _generate_doc_string_(op_proto): """ Generate docstring by OpProto @@ -60,12 +67,6 @@ def _generate_doc_string_(op_proto): str: the document string """ - def escape_math(text): - return _two_bang_pattern_.sub( - r'$$\1$$', - _single_dollar_pattern_.sub( - r':math:`\1`', _two_dollar_pattern_.sub(r"!!\1!!", text))) - if not isinstance(op_proto, framework_pb2.OpProto): raise TypeError("OpProto should be `framework_pb2.OpProto`") @@ -233,9 +234,6 @@ def autodoc(comment=""): return __impl__ -_inline_math_single_dollar = re.compile(r"\$([^\$]+)\$") - - def templatedoc(op_type=None): """ Decorator of layer function. It will use the docstring from the layer @@ -253,9 +251,6 @@ def templatedoc(op_type=None): def trim_ending_dot(msg): return msg.rstrip('.') - def escape_inline_math(msg): - return _inline_math_single_dollar.sub(repl=r':math:`\1`', string=msg) - def __impl__(func): if op_type is None: op_type_name = func.__name__ @@ -269,7 +264,7 @@ def templatedoc(op_type=None): for line in comment_lines: line = line.strip() if len(line) != 0: - comment += escape_inline_math(line) + comment += escape_math(line) comment += " " elif len(comment) != 0: comment += "\n \n " From 46ae1c93c28d346d9a4c6a4bf7c9d1019216403b Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 17 Jun 2018 14:00:49 +0800 Subject: [PATCH 52/56] add doc for softmax --- python/paddle/fluid/layers/nn.py | 39 ++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 6032573393..d31d12f971 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1258,6 +1258,45 @@ def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True, name=None): + """ + The input of the softmax layer is a 2-D tensor with shape N x K (N is the + batch_size, K is the dimension of input feature). The output tensor has the + same shape as the input tensor. + + For each row of the input tensor, the softmax operator squashes the + K-dimensional vector of arbitrary real values to a K-dimensional vector of real + values in the range [0, 1] that add up to 1. + + It computes the exponential of the given dimension and the sum of exponential + values of all the other dimensions in the K-dimensional vector input. + Then the ratio of the exponential of the given dimension and the sum of + exponential values of all the other dimensions is the output of the softmax + operator. + + For each row :math:`i` and each column :math:`j` in Input(X), we have: + + .. math:: + + Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])} + + Args: + input (Variable): The input variable. + bias_attr (ParamAttr): attributes for bias + param_attr (ParamAttr): attributes for parameter + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \ + library is installed. + + Returns: + Variable: output of softmax + + Examples: + + .. code-block:: python + + fc = fluid.layers.fc(input=x, size=10) + softmax = fluid.layers.softmax(input=fc) + + """ helper = LayerHelper('softmax', **locals()) dtype = helper.input_dtype() softmax_out = helper.create_tmp_variable(dtype) From d7345959789a22437c7065078cc3a7d457c7e70e Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Mon, 18 Jun 2018 17:16:06 +0800 Subject: [PATCH 53/56] Feature/pass manager (#11440) --- .../fluid/inference/analysis/CMakeLists.txt | 39 +++--- paddle/fluid/inference/analysis/argument.cc | 15 +++ paddle/fluid/inference/analysis/argument.h | 53 ++++++++ .../inference/analysis/data_flow_graph.cc | 15 +-- .../analysis/data_flow_graph_to_fluid_pass.cc | 77 ++++++++++++ .../analysis/data_flow_graph_to_fluid_pass.h | 59 +++++++++ .../data_flow_graph_to_fluid_pass_tester.cc | 5 +- .../analysis/dfg_graphviz_draw_pass.cc | 54 ++++++++ .../analysis/dfg_graphviz_draw_pass.h | 41 ++++--- .../analysis/dfg_graphviz_draw_pass_tester.cc | 10 +- .../analysis/fluid_to_data_flow_graph_pass.cc | 22 ++-- .../analysis/fluid_to_data_flow_graph_pass.h | 11 +- .../fluid_to_data_flow_graph_pass_tester.cc | 6 +- paddle/fluid/inference/analysis/helper.h | 1 + paddle/fluid/inference/analysis/node.cc | 3 + paddle/fluid/inference/analysis/node.h | 23 ++-- paddle/fluid/inference/analysis/pass.h | 27 ++-- .../fluid/inference/analysis/pass_manager.cc | 44 +++++++ .../fluid/inference/analysis/pass_manager.h | 116 ++++++++++++++++++ .../inference/analysis/pass_manager_tester.cc | 85 +++++++++++++ .../analysis/subgraph_splitter_tester.cc | 45 +++++-- .../analysis/tensorrt_subgraph_pass.cc | 33 +++++ .../analysis/tensorrt_subgraph_pass.h | 47 +++++++ .../analysis/tensorrt_subgraph_pass_tester.cc | 71 +++++++++++ paddle/fluid/inference/analysis/ut_helper.h | 34 +++-- .../operators/tensorrt_engine_op_test.cc | 2 +- 26 files changed, 830 insertions(+), 108 deletions(-) create mode 100644 paddle/fluid/inference/analysis/argument.cc create mode 100644 paddle/fluid/inference/analysis/argument.h create mode 100644 paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc create mode 100644 paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h create mode 100644 paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.cc create mode 100644 paddle/fluid/inference/analysis/pass_manager.cc create mode 100644 paddle/fluid/inference/analysis/pass_manager.h create mode 100644 paddle/fluid/inference/analysis/pass_manager_tester.cc create mode 100644 paddle/fluid/inference/analysis/tensorrt_subgraph_pass.cc create mode 100644 paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h create mode 100644 paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index 5083578444..2bb2c8135d 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -1,23 +1,32 @@ set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor init) -cc_library(analysis SRCS dot.cc node.cc data_flow_graph.cc graph_traits.cc subgraph_splitter.cc fluid_to_data_flow_graph_pass.cc - DEPS paddle_fluid) +cc_library(analysis SRCS pass_manager.cc dot.cc node.cc data_flow_graph.cc graph_traits.cc subgraph_splitter.cc + fluid_to_data_flow_graph_pass.cc + data_flow_graph_to_fluid_pass.cc + tensorrt_subgraph_pass.cc + dfg_graphviz_draw_pass.cc + DEPS framework_proto) cc_test(test_node SRCS node_tester.cc DEPS analysis) cc_test(test_dot SRCS dot_tester.cc DEPS analysis) set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) -cc_test(test_data_flow_graph SRCS data_flow_graph_tester.cc DEPS analysis ${FLUID_CORE_MODULES} paddle_fluid - ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model) -set_tests_properties(test_data_flow_graph PROPERTIES DEPENDS test_word2vec) +function (inference_analysis_test TARGET) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS) + cmake_parse_arguments(analysis_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) -cc_test(test_subgraph_splitter - SRCS subgraph_splitter_tester.cc - DEPS analysis paddle_fluid tensor - ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model) -set_tests_properties(test_subgraph_splitter PROPERTIES DEPENDS test_word2vec) + cc_test(${TARGET} + SRCS "${analysis_test_SRCS}" + DEPS analysis + ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model --fraction_of_gpu_memory_to_use=0.5) + set_tests_properties(${TARGET} PROPERTIES DEPENDS test_word2vec) +endfunction(inference_analysis_test) -cc_test(test_dfg_graphviz_draw_pass - SRCS dfg_graphviz_draw_pass_tester.cc - DEPS analysis - ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model) -set_tests_properties(test_dfg_graphviz_draw_pass PROPERTIES DEPENDS test_word2vec) +inference_analysis_test(test_data_flow_graph SRCS data_flow_graph_tester.cc) +inference_analysis_test(test_data_flow_graph_to_fluid_pass SRCS data_flow_graph_to_fluid_pass_tester.cc) +inference_analysis_test(test_fluid_to_data_flow_graph_pass SRCS fluid_to_data_flow_graph_pass_tester.cc) +inference_analysis_test(test_subgraph_splitter SRCS subgraph_splitter_tester.cc) +inference_analysis_test(test_dfg_graphviz_draw_pass SRCS dfg_graphviz_draw_pass_tester.cc) +#inference_analysis_test(test_tensorrt_subgraph_pass SRCS tensorrt_subgraph_pass_tester.cc) +inference_analysis_test(test_pass_manager SRCS pass_manager_tester.cc) diff --git a/paddle/fluid/inference/analysis/argument.cc b/paddle/fluid/inference/analysis/argument.cc new file mode 100644 index 0000000000..cb0263d5d9 --- /dev/null +++ b/paddle/fluid/inference/analysis/argument.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/argument.h" diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h new file mode 100644 index 0000000000..7d7131ed7a --- /dev/null +++ b/paddle/fluid/inference/analysis/argument.h @@ -0,0 +1,53 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file defines the class Argument, which is the input and output of the + * analysis module. All the fields that needed either by Passes or PassManagers + * are contained in Argument. + * + * TODO(Superjomn) Find some way better to contain the fields when it grow too + * big. + */ + +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/inference/analysis/data_flow_graph.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * The argument definition of both Pass and PassManagers. + * + * All the fields should be registered here for clearness. + */ +struct Argument { + // The graph that process by the Passes or PassManagers. + std::unique_ptr main_dfg; + + // The original program desc. + std::unique_ptr origin_program_desc; +}; + +#define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) +#define ANALYSIS_ARGUMENT_CHECK_FIELD(field__) \ + if (!UNLIKELY(field__)) { \ + LOG(ERROR) << "field " << #field__ << " should be set."; \ + return false; \ + } + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph.cc b/paddle/fluid/inference/analysis/data_flow_graph.cc index 4220451e3c..c30a7c26ce 100644 --- a/paddle/fluid/inference/analysis/data_flow_graph.cc +++ b/paddle/fluid/inference/analysis/data_flow_graph.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/dot.h" +#include "paddle/fluid/inference/analysis/node.h" namespace paddle { namespace inference { @@ -57,19 +58,7 @@ std::string DataFlowGraph::DotString() const { // Add nodes for (size_t i = 0; i < nodes.size(); i++) { const Node &node = nodes.Get(i); - switch (node.type()) { - case Node::Type::kValue: - dot.AddNode(node.repr(), node.dot_attrs()); - break; - case Node::Type::kFunction: - dot.AddNode(node.repr(), node.dot_attrs()); - break; - case Node::Type::kFunctionBlock: - dot.AddNode(node.repr(), node.dot_attrs()); - break; - default: - PADDLE_THROW("unsupported Node type %d", static_cast(node.type())); - } + dot.AddNode(node.repr(), node.dot_attrs()); } // Add edges diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc new file mode 100644 index 0000000000..f7d4cca213 --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc @@ -0,0 +1,77 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h" +#include "paddle/fluid/framework/proto_desc.h" + +namespace paddle { +namespace inference { +namespace analysis { + +bool DataFlowGraphToFluidPass::Initialize(Argument* argument) { + ANALYSIS_ARGUMENT_CHECK_FIELD(argument) + ANALYSIS_ARGUMENT_CHECK_FIELD(argument->origin_program_desc) + desc_ = argument->origin_program_desc.get(); + // Here some logic from program_desc.cc and will not add new interfaces into + // framework::ProgramDesc class, use some UT to assure the correctness. + auto* block = desc_->mutable_blocks()->Add(); + block->set_idx(framework::kRootBlockIndex); + block->set_parent_idx(framework::kNoneBlockIndex); + return true; +} + +bool DataFlowGraphToFluidPass::Finalize() { return true; } + +void DataFlowGraphToFluidPass::Run(DataFlowGraph* graph) { + auto traits = GraphTraits(graph); + for (auto it = traits.nodes().begin(); it != traits.nodes().end(); ++it) { + if (it->deleted()) continue; + switch (it->type()) { + case Node::Type::kFunction: + LOG(INFO) << "add function " << it->name(); + AddFluidOp(&(*it)); + break; + case Node::Type::kFunctionBlock: + AddEngineOp(&(*it)); + break; + default: + continue; + } + } +} + +void DataFlowGraphToFluidPass::AddFluidOp(Node* node) { + LOG(INFO) << "processing func " << node->name(); + auto* ori_op = static_cast(node->pb_desc()); + // currently only the main block is analyzed. + auto* main_block = desc_->mutable_blocks(framework::kRootBlockIndex); + auto* op = main_block->add_ops(); + LOG(INFO) << "to copy the op"; + *op = *ori_op; // copy the attributes, by default, these will not be changed + // by analysis phrase. + // The inputs and outputs of the existing ops are not changed by tensorrt + // subgraph pass. + // NOTE It might be changed by other passes in the long run. +} + +void DataFlowGraphToFluidPass::AddEngineOp(Node* node) { + // auto* ori_op = static_cast(node->extra_info()); + // auto* main_block = desc_->mutable_blocks(framework::kRootBlockIndex); + // auto* op = main_block->add_ops(); + // TODO(Superjomn) Here need to expose some arguments for default setting. +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h new file mode 100644 index 0000000000..cbb05f622c --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h @@ -0,0 +1,59 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +/* + * This file implements the transformation from fluid ProgramDesc to data flow + * graph. + */ + +#pragma once + +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { +namespace analysis { +class DataFlowGraphToFluidPass final : public DataFlowGraphPass { + public: + DataFlowGraphToFluidPass() = default; + + bool Initialize(Argument *argument) override; + bool Finalize() override; + + void Run(DataFlowGraph *graph) override; + + std::string repr() const override { return "DFG to fluid"; } + std::string description() const override { + return "Transform a DFG to a Fluid ProgramDesc"; + } + + Pass *CreatePrinterPass(std::ostream &os, + const std::string &banner) const override { + return nullptr; + } + + protected: + // Add a Fluid Op into the ProgramDesc. + void AddFluidOp(Node *node); + // Add a EngineOp into the ProgramDesc. + void AddEngineOp(Node *node); + + private: + framework::proto::ProgramDesc *desc_; +}; +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc index dcee75cee5..d8fc5e580a 100644 --- a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc @@ -27,13 +27,12 @@ namespace inference { namespace analysis { TEST_F(DFG_Tester, Test) { - framework::proto::ProgramDesc new_desc; DataFlowGraph graph; FluidToDataFlowGraphPass pass0; DataFlowGraphToFluidPass pass1; - pass0.Initialize(desc); - pass1.Initialize(&new_desc); + ASSERT_TRUE(pass0.Initialize(&argument)); + ASSERT_TRUE(pass1.Initialize(&argument)); pass0.Run(&graph); pass1.Run(&graph); diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.cc b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.cc new file mode 100644 index 0000000000..afffb3feb0 --- /dev/null +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.cc @@ -0,0 +1,54 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void DFG_GraphvizDrawPass::Run(DataFlowGraph *graph) { + auto content = Draw(graph); + std::ofstream file(GenDotPath()); + file.write(content.c_str(), content.size()); + file.close(); + LOG(INFO) << "draw dot to " << GenDotPath(); +} + +std::string DFG_GraphvizDrawPass::Draw(DataFlowGraph *graph) { + Dot dot; + // Add nodes + for (size_t i = 0; i < graph->nodes.size(); i++) { + const Node &node = graph->nodes.Get(i); + if (config_.display_deleted_node || !node.deleted()) { + dot.AddNode(node.repr(), node.dot_attrs()); + } + } + // Add edges + for (size_t i = 0; i < graph->nodes.size(); i++) { + const Node &node = graph->nodes.Get(i); + if (!config_.display_deleted_node && node.deleted()) continue; + for (auto &in : node.inlinks) { + if (!config_.display_deleted_node && in->deleted()) continue; + for (auto &in : node.inlinks) { + dot.AddEdge(in->repr(), node.repr(), {}); + } + } + } + return dot.Build(); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h index 41d4475382..93ebff59ae 100644 --- a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h @@ -21,6 +21,7 @@ limitations under the License. */ #include #include +#include "paddle/fluid/inference/analysis/dot.h" #include "paddle/fluid/inference/analysis/pass.h" namespace paddle { @@ -32,35 +33,39 @@ namespace analysis { */ class DFG_GraphvizDrawPass : public DataFlowGraphPass { public: - DFG_GraphvizDrawPass(const std::string& dir, const std::string& id) - : dir_(dir), id_(id) {} - - bool Initialize() override { return Pass::Initialize(); } - void Run(DataFlowGraph* graph) override { - auto content = Draw(graph); - std::ofstream file(GenDotPath()); - file.write(content.c_str(), content.size()); - file.close(); - LOG(INFO) << "draw dot to " << GenDotPath(); - } + struct Config { + Config(const std::string &dir, const std::string &id, + bool display_deleted_node = false) + : dir(dir), id(id), display_deleted_node(display_deleted_node) {} + + // The directory to store the .dot or .png files. + const std::string dir; + // The identifier for this dot file. + const std::string id; + // Whether to display deleted nodes, default false. + const bool display_deleted_node; + }; + + DFG_GraphvizDrawPass(const Config &config) : config_(config) {} + bool Initialize(Argument *argument) override { return true; } + void Run(DataFlowGraph *graph) override; bool Finalize() override { return Pass::Finalize(); } - Pass* CreatePrinterPass(std::ostream& os, - const std::string& banner) const override { - return nullptr; + std::string repr() const override { return "DFG graphviz drawer"; } + std::string description() const override { + return "Debug a DFG by draw with graphviz"; } private: // Path of the dot file to output. std::string GenDotPath() const { - return dir_ + "/" + "graph_" + id_ + ".dot"; + return config_.dir + "/" + "graph_" + config_.id + ".dot"; } - std::string Draw(DataFlowGraph* graph) { return graph->DotString(); } + std::string Draw(DataFlowGraph *graph); - std::string dir_; - std::string id_; + Config config_; }; } // namespace analysis diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc index 3fc1cc18b8..f4b5c5fd22 100644 --- a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc @@ -24,9 +24,10 @@ namespace inference { namespace analysis { TEST_F(DFG_Tester, dfg_graphviz_draw_pass_tester) { - auto dfg = ProgramDescToDFG(desc); - DFG_GraphvizDrawPass pass("./", "test"); - pass.Initialize(); + auto dfg = ProgramDescToDFG(*argument.origin_program_desc); + DFG_GraphvizDrawPass::Config config("./", "test"); + DFG_GraphvizDrawPass pass(config); + pass.Initialize(&argument); pass.Run(&dfg); // test content @@ -38,7 +39,8 @@ TEST_F(DFG_Tester, dfg_graphviz_draw_pass_tester) { while (std::getline(file, line)) { no++; } - ASSERT_EQ(no, 82); + // DFG is sensitive to ProgramDesc, be careful to change the existing models. + ASSERT_EQ(no, 112); } } // namespace analysis diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc index 9f67c989cc..5f62eef528 100644 --- a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc @@ -21,19 +21,23 @@ namespace paddle { namespace inference { namespace analysis { -FluidToDataFlowGraphPass::FluidToDataFlowGraphPass() {} - -bool FluidToDataFlowGraphPass::Initialize() { return Pass::Initialize(); } - -bool FluidToDataFlowGraphPass::Initialize( - const framework::proto::ProgramDesc &desc) { - desc_ = &desc; +bool FluidToDataFlowGraphPass::Initialize(Argument *argument) { + ANALYSIS_ARGUMENT_CHECK_FIELD(argument); + ANALYSIS_ARGUMENT_CHECK_FIELD(argument->origin_program_desc); + PADDLE_ENFORCE(argument); + if (!argument->main_dfg) { + LOG(INFO) << "Init DFG"; + argument->main_dfg.reset(new DataFlowGraph); + } + desc_ = argument->origin_program_desc.get(); return true; } bool FluidToDataFlowGraphPass::Finalize() { return Pass::Finalize(); } void FluidToDataFlowGraphPass::Run(DataFlowGraph *graph) { + PADDLE_ENFORCE(graph); + PADDLE_ENFORCE(desc_); // insert vars std::unordered_map var2id; auto &main_block = desc_->blocks(framework::kRootBlockIndex); @@ -41,7 +45,7 @@ void FluidToDataFlowGraphPass::Run(DataFlowGraph *graph) { const auto &var = main_block.vars(i); auto *v = graph->nodes.Create(Node::Type::kValue); v->SetName(var.name()); - v->SetExtraInfo(const_cast(static_cast(&var))); + v->SetPbDesc(const_cast(static_cast(&var))); var2id[var.name()] = v->id(); } for (int i = 0; i < main_block.ops_size(); i++) { @@ -51,7 +55,7 @@ void FluidToDataFlowGraphPass::Run(DataFlowGraph *graph) { static_cast(o)->SetFuncType(op.type()); // Link to the original protobuf message's memory, make it easier to // generate from a data flow graph to fluid ProgramDesc. - o->SetExtraInfo(const_cast(static_cast(&op))); + o->SetPbDesc(const_cast(static_cast(&op))); // set inputs and outputs // TODO(Superjomn) make sure the InputNames is the real variable name. for (int j = 0; j < op.inputs_size(); j++) { diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h index 33517e57be..176faf0220 100644 --- a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h @@ -34,13 +34,18 @@ namespace analysis { */ class FluidToDataFlowGraphPass final : public DataFlowGraphPass { public: - FluidToDataFlowGraphPass(); - bool Initialize() override; - bool Initialize(const framework::proto::ProgramDesc &desc) override; + FluidToDataFlowGraphPass() = default; + + bool Initialize(Argument *argument) override; bool Finalize() override; void Run(DataFlowGraph *graph) override; + std::string repr() const override { return "fluid-to-data-flow-graph"; } + std::string description() const override { + return "transform a fluid ProgramDesc to a data flow graph."; + } + Pass *CreatePrinterPass(std::ostream &os, const std::string &banner) const override; diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc index 817d32c92c..cfbbc284e4 100644 --- a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc @@ -23,11 +23,11 @@ namespace analysis { TEST_F(DFG_Tester, Init) { FluidToDataFlowGraphPass pass; - pass.Initialize(); - pass.Initialize(desc); + pass.Initialize(&argument); DataFlowGraph graph; pass.Run(&graph); - ASSERT_GT(graph.nodes.size(), 0); + // Analysis is sensitive to ProgramDesc, careful to change the original model. + ASSERT_EQ(graph.nodes.size(), 37); pass.Finalize(); LOG(INFO) << '\n' << graph.DotString(); } diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index 58eb0e715c..f0039e1131 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -62,6 +62,7 @@ struct DataTypeNamer { SET_TYPE(int); SET_TYPE(bool); SET_TYPE(float); + SET_TYPE(void *); } std::unordered_map inlinks; // Output links. std::vector outlinks; // A helper class to maintain the status from Pass. - // TODO(superjomn) add a checker here to ensure the T is primary. struct Attr { // NOTE T should be a primary type or a struct combined by several primary // types. // NOTE the STL containers should not use here. // Some usages - // Attr attr; - // T data; - // attr.data.assign((char*)data, sizeof(data)); + // Attr attr; + // attr.Bool() = true; bool &Bool() { return As(); } float &Float() { return As(); } int32_t &Int32() { return As(); } int64_t &Int64() { return As(); } + void *&Pointer() { return As(); } private: template @@ -130,6 +131,7 @@ class Node { size_t type_hash_{std::numeric_limits::max()}; }; + // Type checks. bool IsFunction() const { return type_ == Node::Type::kFunction; } bool IsValue() const { return type_ == Node::Type::kValue; } bool IsFunctionBlock() const { return type_ == Node::Type::kFunctionBlock; } @@ -148,9 +150,6 @@ class Node { Type type_{Type::kNone}; // Mark this node is deleted by some pass. bool deleted_{false}; - - void *extra_info_; - mutable std::unordered_map attrs_; }; diff --git a/paddle/fluid/inference/analysis/pass.h b/paddle/fluid/inference/analysis/pass.h index aa0e8667b5..65632b7491 100644 --- a/paddle/fluid/inference/analysis/pass.h +++ b/paddle/fluid/inference/analysis/pass.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/inference/analysis/argument.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/helper.h" #include "paddle/fluid/inference/analysis/node.h" @@ -30,19 +31,24 @@ namespace analysis { class Pass { public: Pass() = default; - virtual ~Pass() {} + virtual ~Pass() = default; // Virtual method overridden by subclasses to do only necessary initialization // before any pass is run. - virtual bool Initialize() { return false; } + // virtual bool Initialize() { return false; } // There is some passes such as FlowToDataFlowGraphPass that needs a // ProgramDesc. Here use the native ProgramDesc ProtoBuf message, so that it // only couple with the proto file. - virtual bool Initialize(const framework::proto::ProgramDesc &desc) { - return false; - } + // virtual bool Initialize(const framework::proto::ProgramDesc &desc) { return + // false; } // There are some Passes such as DataFlowGraphToFluidPass that will output a // ProgramDesc. - virtual bool Initialize(framework::proto::ProgramDesc *desc) { return false; } + // virtual bool Initialize(framework::proto::ProgramDesc *desc) { return + // false; } + + // Mutable Pass. + virtual bool Initialize(Argument *argument) { return false; } + // Readonly Pass. + virtual bool Initialize(const Argument &argument) { return false; } // Virtual method overriden by subclasses to do any necessary clean up after // all passes have run. @@ -50,7 +56,9 @@ class Pass { // Get a Pass appropriate to print the Node this pass operates on. virtual Pass *CreatePrinterPass(std::ostream &os, - const std::string &banner) const = 0; + const std::string &banner) const { + return nullptr; + } // Run on a single Node. virtual void Run(Node *x) { LOG(FATAL) << "not valid"; } @@ -60,6 +68,11 @@ class Pass { virtual void Run(FunctionBlock *x) { LOG(FATAL) << "not valid"; } // Run on a single DataFlowGraph. virtual void Run(DataFlowGraph *x) { LOG(FATAL) << "not valid"; } + + // Human-readable short representation. + virtual std::string repr() const = 0; + // Human-readable long description. + virtual std::string description() const = 0; }; // NodePass process on any Node types. diff --git a/paddle/fluid/inference/analysis/pass_manager.cc b/paddle/fluid/inference/analysis/pass_manager.cc new file mode 100644 index 0000000000..b17c0e0d72 --- /dev/null +++ b/paddle/fluid/inference/analysis/pass_manager.cc @@ -0,0 +1,44 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/pass_manager.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void DfgPassManager::RunAll() { + PADDLE_ENFORCE(argument_); + for (auto& pass : data_) { + VLOG(4) << "Running pass [" << pass->repr() << "]"; + pass->Run(argument_->main_dfg.get()); + } +} + +void NodePassManager::RunAll() { + PADDLE_ENFORCE(argument_); + PADDLE_ENFORCE(argument_->main_dfg.get()); + auto trait = + GraphTraits(argument_->main_dfg.get()).nodes_in_DFS(); + for (auto& node : trait) { + for (auto& pass : data_) { + pass->Run(&node); + } + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/pass_manager.h b/paddle/fluid/inference/analysis/pass_manager.h new file mode 100644 index 0000000000..7841c4b9d0 --- /dev/null +++ b/paddle/fluid/inference/analysis/pass_manager.h @@ -0,0 +1,116 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file defines the logic of pass management. The analysis for inference is + * a pipeline of Passes, a PassManager is a agency that helps to manage the + * executation of the Passes. + * + * There are two modes of Passes, the first one is called NodePass and takes + * an Node as input and output; the second one is called DFGPass and takes a + * DFG(Data Flow Graph) as input and output. It is hard to put all the passes in + * the same pipeline, there are two kinds of PassManagers, both takes a DFG as + * input and output a DFG, but the Passes inside are different: + * + * 1. NodePassManager: the passes inside are all NodePasses, it can have + * different graph trivial algorithm, for example, DFS_NodePassManager will + * trigger the passes in depth first order; + * 2. DfgPassManager: the passes inside are all DfgPasses. + */ + +#pragma once + +#include +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * PassManager is the base class for all pass managers, a pass manager has + * several Pass-es registered, and execute them in the linear order. + */ +class PassManager : public OrderedRegistry { + public: + PassManager() = default; + // Call all the passes' Initialize methods. The desc and data_flow_graph are + // globally shared, so pass them as the arguemnts for all the pass managers. + virtual bool Initialize(const Argument& argument) { return false; } + + virtual bool Initialize(Argument* argument) { + argument_ = argument; + for (auto& pass : data_) { + LOG(INFO) << "Initializing pass " << pass->repr(); + if (!pass->Initialize(argument)) { + LOG(ERROR) << "Failed to initialize pass [" << pass->repr() << "]"; + return false; + } + } + return true; + } + + // Call all the passes' Finalize methods. + virtual bool Finalize() { + for (auto& pass : data_) { + if (!pass->Finalize()) { + LOG(ERROR) << "Failed to finalize pass [" << pass->repr() << "]"; + return false; + } + } + return true; + } + + // Run all the passes. + virtual void RunAll() = 0; + + // Short identifier. + virtual std::string repr() const = 0; + // Long description. + virtual std::string description() const = 0; + + virtual ~PassManager() = default; + + protected: + Argument* argument_{nullptr}; +}; + +/* + * A pass manager that process a DFG. + */ +class DfgPassManager : public PassManager { + public: + DfgPassManager() = default; + + void RunAll() override; + + virtual ~DfgPassManager() = default; +}; + +/* + * A pass manager that process a Node each time. + */ +class NodePassManager : public PassManager { + public: + NodePassManager() = default; + + void RunAll() override; + + virtual ~NodePassManager() = default; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/pass_manager_tester.cc b/paddle/fluid/inference/analysis/pass_manager_tester.cc new file mode 100644 index 0000000000..7af6a19951 --- /dev/null +++ b/paddle/fluid/inference/analysis/pass_manager_tester.cc @@ -0,0 +1,85 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/pass_manager.h" +#include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h" +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" + +#include + +namespace paddle { +namespace inference { +namespace analysis { + +class TestDfgPassManager final : public DfgPassManager { + public: + TestDfgPassManager() = default; + virtual ~TestDfgPassManager() = default; + // Short identifier. + std::string repr() const override { return "test-pass-manager"; } + // Long description. + std::string description() const override { return "test doc"; } +}; + +class TestNodePassManager final : public NodePassManager { + public: + virtual ~TestNodePassManager() = default; + + std::string repr() const override { return "test-node-pass-manager"; } + std::string description() const override { return "test doc"; } +}; + +class TestNodePass final : public NodePass { + public: + virtual ~TestNodePass() = default; + + bool Initialize(Argument* argument) override { return true; } + + void Run(Node* node) override { + LOG(INFO) << "- Processing node " << node->repr(); + } + + std::string repr() const override { return "test-node"; } + std::string description() const override { return "some doc"; } +}; + +TEST_F(DFG_Tester, DFG_pass_manager) { + TestDfgPassManager manager; + DFG_GraphvizDrawPass::Config config("./", "dfg.dot"); + + manager.Register("fluid-to-flow-graph", new FluidToDataFlowGraphPass); + manager.Register("graphviz", new DFG_GraphvizDrawPass(config)); + manager.Register("dfg-to-fluid", new DataFlowGraphToFluidPass); + + ASSERT_TRUE(manager.Initialize(&argument)); + manager.RunAll(); +} + +TEST_F(DFG_Tester, Node_pass_manager) { + // Pre-process: initialize the DFG with the ProgramDesc first. + FluidToDataFlowGraphPass pass0; + pass0.Initialize(&argument); + pass0.Run(argument.main_dfg.get()); + + TestNodePassManager manager; + manager.Register("test-node-pass", new TestNodePass); + ASSERT_TRUE(manager.Initialize(&argument)); + manager.RunAll(); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc index 0644c0db12..8134494f8b 100644 --- a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc +++ b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc @@ -19,22 +19,23 @@ namespace paddle { namespace inference { namespace analysis { +SubGraphSplitter::NodeInsideSubgraphTeller teller = [](const Node* node) { + if (node->type() != Node::Type::kFunction) return false; + const auto* func = static_cast(node); + if (func->func_type() == "elementwise_add" || func->func_type() == "relu" || + func->func_type() == "conv2d" || func->func_type() == "mul" || + func->func_type() == "sigmoid" || func->func_type() == "softmax") { + LOG(INFO) << "sub-graph marked " << node->repr(); + return true; + } + return false; +}; + TEST_F(DFG_Tester, Split) { auto desc = LoadProgramDesc(); auto dfg = ProgramDescToDFG(desc); LOG(INFO) << "spliter\n" << dfg.DotString(); - SubGraphSplitter::NodeInsideSubgraphTeller teller = [](const Node* node) { - if (node->type() != Node::Type::kFunction) return false; - const auto* func = static_cast(node); - if (func->func_type() == "elementwise_add" || func->func_type() == "relu" || - func->func_type() == "conv2d" || func->func_type() == "mul" || - func->func_type() == "sigmoid" || func->func_type() == "softmax") { - LOG(INFO) << "sub-graph marked " << node->repr(); - return true; - } - return false; - }; ASSERT_GT(dfg.nodes.size(), 5UL); auto subgraphs = SubGraphSplitter(&dfg, teller)(); @@ -62,6 +63,28 @@ TEST_F(DFG_Tester, Split) { ASSERT_EQ(subgraphs.back().size(), 6UL); } +TEST_F(DFG_Tester, Fuse) { + auto desc = LoadProgramDesc(); + auto dfg = ProgramDescToDFG(desc); + + size_t count0 = dfg.nodes.size(); + + SubGraphFuse fuse(&dfg, teller); + fuse(); + + int count1 = 0; + for (auto& node : dfg.nodes.nodes()) { + if (node->deleted()) { + LOG(INFO) << "deleted " << node->repr(); + } + count1 += node->deleted(); + } + + // At least one nodes should be deleted. + ASSERT_EQ(dfg.nodes.size(), count0 + 1); // added a new FunctionBlock + ASSERT_EQ(6UL, count1); +} + } // namespace analysis } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.cc new file mode 100644 index 0000000000..b75df33b71 --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.cc @@ -0,0 +1,33 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h" +#include "paddle/fluid/inference/analysis/subgraph_splitter.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TensorRTSubGraphPass::TensorRTSubGraphPass( + const TensorRTSubGraphPass::NodeInsideSubgraphTeller &teller) + : node_inside_subgraph_teller_(teller) {} + +void TensorRTSubGraphPass::Run(DataFlowGraph *graph) { + SubGraphFuse(graph, node_inside_subgraph_teller_); +} + +} // analysis +} // inference + +} // paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h new file mode 100644 index 0000000000..79e9e2bcc9 --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/inference/analysis/node.h" +#include "paddle/fluid/inference/analysis/pass.h" +#include "paddle/fluid/inference/analysis/subgraph_splitter.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * Parse the graph and replace TensorRT supported nodes with SubGraphNode + */ +class TensorRTSubGraphPass : public DataFlowGraphPass { + public: + // Tell whether to transform a sub-graph into TensorRT. + using NodeInsideSubgraphTeller = SubGraphFuse::NodeInsideSubgraphTeller; + + TensorRTSubGraphPass(const NodeInsideSubgraphTeller& teller); + + bool Initialize(Argument* argument) override { return true; } + + // This class get a sub-graph as input and determine whether to transform this + // sub-graph into TensorRT. + void Run(DataFlowGraph* graph) override; + + private: + NodeInsideSubgraphTeller node_inside_subgraph_teller_; +}; + +} // namespace analysis +} // namespace inference +} // paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc new file mode 100644 index 0000000000..d12dcf0d0f --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc @@ -0,0 +1,71 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h" + +#include +#include +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +DEFINE_string(model_dir, "", "inference test model dir"); + +TEST(TensorRTSubGraph, single_pass) { + auto desc = LoadProgramDesc(); + auto dfg = ProgramDescToDFG(desc); + + SubGraphSplitter::NodeInsideSubgraphTeller teller = [](const Node* node) { + if (node->type() != Node::Type::kFunction) return false; + const auto* func = static_cast(node); + if (func->func_type() == "elementwise_add" || func->func_type() == "relu" || + func->func_type() == "conv2d" || func->func_type() == "mul" || + func->func_type() == "sigmoid" || func->func_type() == "softmax") { + LOG(INFO) << "sub-graph marked " << node->repr(); + return true; + } + return false; + }; + + DFG_GraphvizDrawPass::Config config{"./", "test"}; + DFG_GraphvizDrawPass dfg_pass(config); + dfg_pass.Initialize(); + + DFG_GraphvizDrawPass dfg_pass1(config); + dfg_pass1.Initialize(); + + dfg_pass.Run(&dfg); + + TensorRTSubGraphPass trt_pass(std::move(teller)); + trt_pass.Initialize(); + + trt_pass.Run(&dfg); + + dfg_pass1.Run(&dfg); + + // Check the TRT op's block desc + for (auto node : dfg.nodes.nodes()) { + if (node->IsFunctionBlock()) { + } + } +} + +TEST(TensorRTSubGraph, pass_manager) {} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/ut_helper.h b/paddle/fluid/inference/analysis/ut_helper.h index 722fa99a48..ce1191a567 100644 --- a/paddle/fluid/inference/analysis/ut_helper.h +++ b/paddle/fluid/inference/analysis/ut_helper.h @@ -15,33 +15,46 @@ limitations under the License. */ #pragma once #include #include +#include #include #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" #include "paddle/fluid/inference/analysis/ut_helper.h" -#include "paddle/fluid/inference/io.h" namespace paddle { namespace inference { + +// Read ProgramDesc from a __model__ file, defined in io.cc +extern void ReadBinaryFile(const std::string& filename, std::string* contents); + namespace analysis { DEFINE_string(inference_model_dir, "", "inference test model dir"); static framework::proto::ProgramDesc LoadProgramDesc( const std::string& model_dir = FLAGS_inference_model_dir) { - paddle::platform::CPUPlace place; - paddle::framework::Executor executor(place); - paddle::framework::Scope scope; - auto program = Load(&executor, &scope, model_dir); - return *program->Proto(); + std::string msg; + std::string net_file = FLAGS_inference_model_dir + "/__model__"; + std::ifstream fin(net_file, std::ios::in | std::ios::binary); + PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s", net_file); + fin.seekg(0, std::ios::end); + msg.resize(fin.tellg()); + fin.seekg(0, std::ios::beg); + fin.read(&(msg.at(0)), msg.size()); + fin.close(); + framework::proto::ProgramDesc program_desc; + program_desc.ParseFromString(msg); + return program_desc; } static DataFlowGraph ProgramDescToDFG( const framework::proto::ProgramDesc& desc) { DataFlowGraph graph; FluidToDataFlowGraphPass pass; - pass.Initialize(desc); + Argument argument; + argument.origin_program_desc.reset(new framework::proto::ProgramDesc(desc)); + pass.Initialize(&argument); pass.Run(&graph); pass.Finalize(); return graph; @@ -49,9 +62,12 @@ static DataFlowGraph ProgramDescToDFG( class DFG_Tester : public ::testing::Test { protected: - void SetUp() override { desc = LoadProgramDesc(FLAGS_inference_model_dir); } + void SetUp() override { + auto desc = LoadProgramDesc(FLAGS_inference_model_dir); + argument.origin_program_desc.reset(new framework::proto::ProgramDesc(desc)); + } - framework::proto::ProgramDesc desc; + Argument argument; }; } // namespace analysis diff --git a/paddle/fluid/operators/tensorrt_engine_op_test.cc b/paddle/fluid/operators/tensorrt_engine_op_test.cc index 85330958cd..3a2fef4805 100644 --- a/paddle/fluid/operators/tensorrt_engine_op_test.cc +++ b/paddle/fluid/operators/tensorrt_engine_op_test.cc @@ -240,7 +240,7 @@ void Execute(int batch_size, int input_dim, int output_dim, int nlayers = 1) { } // Test with a larger FC layer. -TEST(TensorRTEngineOp, fc) { Execute(40, 256, 256); } +TEST(TensorRTEngineOp, fc) { Execute(40, 28, 28); } } // namespace operators } // namespace paddle From 792d3b240605d50dfad53a95f1f3283dd3fa7871 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Mon, 11 Jun 2018 10:11:24 +0200 Subject: [PATCH 54/56] MKLDNN layout: Support for activation operator --- .../fluid/operators/activation_mkldnn_op.cc | 316 +++++++++++------- paddle/fluid/operators/activation_op.cc | 29 +- 2 files changed, 212 insertions(+), 133 deletions(-) diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index 46ed99bcf2..137bca5e2b 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -12,16 +12,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "mkldnn.hpp" #include "paddle/fluid/operators/activation_op.h" -#include "paddle/fluid/operators/mkldnn_activation_op.h" #include "paddle/fluid/platform/mkldnn_helper.h" namespace paddle { namespace operators { -using paddle::framework::Tensor; -using paddle::platform::MKLDNNDeviceContext; +using framework::DataLayout; +using framework::Tensor; +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::stream; +using platform::GetMKLDNNFormat; +using platform::MKLDNNDeviceContext; +using platform::to_void_cast; namespace { std::string gethash(const mkldnn::memory::dims &operand_dims, @@ -35,188 +39,260 @@ std::string gethash(const mkldnn::memory::dims &operand_dims, }; return dim2str(operand_dims) + std::to_string(algorithm); } +} // namespace + +template +class MKLDNNActivationKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *x = ctx.Input("X"); + PADDLE_ENFORCE(x->layout() == DataLayout::kMKLDNN && + x->format() != memory::format::format_undef, + "Wrong layout/format set for Input x tensor"); + + Functor functor; + + auto attrs = functor.GetAttrs(); + for (auto &attr : attrs) { + *attr.second = ctx.Attr(attr.first); + } + functor(ctx); + } +}; -template -void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, - const T alpha = 0, const T beta = 0) { +template +class MKLDNNActivationGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *diff_y = ctx.Input(framework::GradVarName("Out")); + PADDLE_ENFORCE(diff_y->layout() == DataLayout::kMKLDNN && + diff_y->format() != memory::format::format_undef, + "Wrong layout/format set for Input OutGrad tensor"); + + Functor functor; + + auto attrs = functor.GetAttrs(); + for (auto &attr : attrs) { + *attr.second = ctx.Attr(attr.first); + } + functor(ctx); + } +}; + +template +void eltwise_forward(const framework::ExecutionContext &ctx, + mkldnn::algorithm algorithm, const T alpha = 0, + const T beta = 0) { PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), "It must use CPUPlace."); - auto &dev_ctx = ctx.template device_context(); const auto &mkldnn_engine = dev_ctx.GetEngine(); - // get buffers - const auto *src = ctx.template Input("X"); - const auto *src_data = src->template data(); + const auto *x = ctx.Input("X"); + auto *y = ctx.Output("Out"); - auto *dst = ctx.template Output("Out"); - T *dst_data = dst->template mutable_data(ctx.GetPlace()); + const T *x_data = x->data(); + T *y_data = y->mutable_data(ctx.GetPlace()); - // get memory dim - PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4, + PADDLE_ENFORCE(x->dims().size() == 2 || x->dims().size() == 4, "Input dim must be with 2 or 4"); - std::vector src_tz = framework::vectorize2int(src->dims()); + + std::vector src_tz = framework::vectorize2int(x->dims()); + + auto src_format = + src_tz.size() == 2 ? mkldnn::memory::format::nc : x->format(); const std::string key = gethash(src_tz, algorithm); const std::string key_src_data = key + ctx.op().Output("Out") + "@eltwise_fwd_src_data"; - const std::string key_src_mem = key + "@eltwise_fwd_src_mem"; - const std::string key_dst_mem = key + "@eltwise_fwd_dst_mem"; - const std::string key_fwd = key + "@eltwise_fwd"; + const std::string key_src_layout = + key + ctx.op().Output("Out") + "@eltwise_fwd_src_layout"; + const std::string key_with_layout = key + std::to_string(src_format); + const std::string key_src_mem = key_with_layout + "@eltwise_fwd_src_mem"; + const std::string key_dst_mem = key_with_layout + "@eltwise_fwd_dst_mem"; + const std::string key_fwd = key_with_layout + "@eltwise_fwd"; + const std::string key_fwd_pd = key_with_layout + "@eltwise_fwd_pd"; + + // save input data and layout to be referred in backward path + auto p_src_data = std::make_shared(x_data); + dev_ctx.SetBlob(key_src_data, p_src_data); + auto p_src_layout = std::make_shared(src_format); + dev_ctx.SetBlob(key_src_layout, p_src_layout); auto p_fwd = std::static_pointer_cast( dev_ctx.GetBlob(key_fwd)); - // save input data to be referred in backward path - auto p_src_data = std::make_shared(src_data); - dev_ctx.SetBlob(key_src_data, p_src_data); + std::shared_ptr dst_memory; if (p_fwd == nullptr) { - // create memory description - auto data_md = src_tz.size() == 2 - ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nc) - : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - - // create memory primitives - auto p_src_mem = std::make_shared(mkldnn::memory( - {data_md, mkldnn_engine}, platform::to_void_cast(src_data))); - dev_ctx.SetBlob(key_src_mem, p_src_mem); - - auto p_dst_mem = std::make_shared(mkldnn::memory( - {data_md, mkldnn_engine}, platform::to_void_cast(dst_data))); - dev_ctx.SetBlob(key_dst_mem, p_dst_mem); - - auto fwd_desc = mkldnn::eltwise_forward::desc( - mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); - auto p_fwd_pd = std::make_shared( - fwd_desc, mkldnn_engine); - const std::string key_fwd_pd = key + "eltwise_fwd_pd"; - dev_ctx.SetBlob(key_fwd_pd, p_fwd_pd); - p_fwd = std::make_shared( - *p_fwd_pd, *(p_src_mem.get()), *(p_dst_mem.get())); + // create mkldnn memory for input X + auto src_md = platform::MKLDNNMemDesc( + src_tz, platform::MKLDNNGetDataType(), src_format); + auto src_memory = std::shared_ptr( + new memory({src_md, mkldnn_engine}, to_void_cast(x_data))); + // save src_memory to be referred in backward path + dev_ctx.SetBlob(key_src_mem, src_memory); + + // create primitive descriptor for activation forward and save it + auto forward_desc = mkldnn::eltwise_forward::desc( + mkldnn::prop_kind::forward_training, algorithm, + src_memory->get_primitive_desc().desc(), alpha, beta); + auto forward_pd = std::make_shared( + forward_desc, mkldnn_engine); + + // save prim desc into global device context to be referred in backward path + dev_ctx.SetBlob(key_fwd_pd, forward_pd); + + // create mkldnn memory for output y + dst_memory = + std::make_shared(forward_pd->dst_primitive_desc(), y_data); + + dev_ctx.SetBlob(key_dst_mem, dst_memory); + + // create activation primitive + p_fwd = std::make_shared(*forward_pd, *src_memory, + *dst_memory); dev_ctx.SetBlob(key_fwd, p_fwd); } else { // primitives already exist - auto p_src_mem = + auto src_memory = std::static_pointer_cast(dev_ctx.GetBlob(key_src_mem)); - PADDLE_ENFORCE(p_src_mem != nullptr, - "Fail to find eltwise p_src_mem in device context."); - auto p_dst_mem = + PADDLE_ENFORCE(src_memory != nullptr, + "Fail to find eltwise src_memory in device context."); + dst_memory = std::static_pointer_cast(dev_ctx.GetBlob(key_dst_mem)); - PADDLE_ENFORCE(p_dst_mem != nullptr, - "Fail to find eltwise p_src_mem in device context."); + PADDLE_ENFORCE(dst_memory != nullptr, + "Fail to find eltwise dst_memory in device context."); - p_src_mem->set_data_handle(platform::to_void_reinterpret_cast(src_data)); - p_dst_mem->set_data_handle(dst_data); + src_memory->set_data_handle(platform::to_void_cast(x_data)); + dst_memory->set_data_handle(y_data); } // push primitive to stream and wait until it's executed - std::vector pipeline = {*(p_fwd.get())}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + std::vector pipeline; + pipeline.push_back(*p_fwd); + stream(stream::kind::eager).submit(pipeline).wait(); + + y->set_layout(DataLayout::kMKLDNN); + y->set_format(GetMKLDNNFormat(*dst_memory)); } -template -void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, - const T alpha = 0, const T beta = 0) { +template +void eltwise_grad(const framework::ExecutionContext &ctx, + mkldnn::algorithm algorithm, const T alpha = 0, + const T beta = 0) { auto &dev_ctx = ctx.template device_context(); const auto &mkldnn_engine = dev_ctx.GetEngine(); - // get buffers - const auto *out = ctx.template Input("Out"); - - auto *dout = ctx.template Input(framework::GradVarName("Out")); - const auto *diff_dst = dout->template data(); + const auto *diff_y = ctx.Input(framework::GradVarName("Out")); + auto *diff_x = ctx.Output(framework::GradVarName("X")); - auto *dx = - ctx.template Output(framework::GradVarName("X")); - const T *diff_src = dx->template mutable_data(ctx.GetPlace()); + const T *diff_y_data = diff_y->data(); + T *diff_x_data = diff_x->mutable_data(ctx.GetPlace()); - // get memory dim - std::vector src_tz = framework::vectorize2int(out->dims()); + std::vector diff_dst_tz = framework::vectorize2int(diff_y->dims()); - const std::string key = gethash(src_tz, algorithm); - const std::string key_diff_src_mem = key + "@eltwise_diff_src_mem"; - const std::string key_diff_dst_mem = key + "@eltwise_diff_dst_mem"; - const std::string key_grad = key + "@eltwise_grad"; + auto diff_y_format = + diff_dst_tz.size() == 2 ? mkldnn::memory::format::nc : diff_y->format(); + const std::string key = gethash(diff_dst_tz, algorithm); const std::string key_src_data = key + ctx.op().Input("Out") + "@eltwise_fwd_src_data"; + const std::string key_src_layout = + key + ctx.op().Input("Out") + "@eltwise_fwd_src_layout"; + const auto p_src_layout = + std::static_pointer_cast(dev_ctx.GetBlob(key_src_layout)); + const std::string key_src_mem = + key + std::to_string(*p_src_layout) + "@eltwise_fwd_src_mem"; + const std::string key_fwd_pd = + key + std::to_string(*p_src_layout) + "@eltwise_fwd_pd"; + const std::string key_with_layouts = + key + std::to_string(*p_src_layout) + "-" + std::to_string(diff_y_format); + const std::string key_diff_src_mem = + key_with_layouts + "@eltwise_diff_src_mem"; + const std::string key_diff_dst_mem = + key_with_layouts + "@eltwise_diff_dst_mem"; + const std::string key_grad = key_with_layouts + "@eltwise_grad"; + const auto p_src_data = std::static_pointer_cast(dev_ctx.GetBlob(key_src_data)); - const std::string key_src_mem = key + "@eltwise_fwd_src_mem"; - auto p_src_mem = + auto src_memory = std::static_pointer_cast(dev_ctx.GetBlob(key_src_mem)); - p_src_mem->set_data_handle(*p_src_data.get()); + PADDLE_ENFORCE(src_memory != nullptr, + "Fail to find src_memory in device context"); + src_memory->set_data_handle(*p_src_data.get()); + + std::shared_ptr diff_src_memory; - auto p_grad = std::static_pointer_cast( + auto p_grad = std::static_pointer_cast( dev_ctx.GetBlob(key_grad)); if (p_grad == nullptr) { - // create memory description - auto data_md = src_tz.size() == 2 - ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nc) - : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - - // create memory primitives - std::shared_ptr p_diff_src_mem = - std::make_shared(mkldnn::memory( - {data_md, mkldnn_engine}, platform::to_void_cast(diff_src))); - dev_ctx.SetBlob(key_diff_src_mem, p_diff_src_mem); - std::shared_ptr p_diff_dst_mem = - std::make_shared(mkldnn::memory( - {data_md, mkldnn_engine}, platform::to_void_cast(diff_dst))); - dev_ctx.SetBlob(key_diff_dst_mem, p_diff_dst_mem); - - auto bwd_desc = mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, - alpha, beta); - - const std::string key_fwd_pd = key + "eltwise_fwd_pd"; - auto *p_fwd_pd = static_cast( - dev_ctx.GetBlob(key_fwd_pd).get()); - - auto eltwise_bwd_prim_desc = mkldnn::eltwise_backward::primitive_desc( - bwd_desc, mkldnn_engine, *p_fwd_pd); - + // create mkldnn memory for input diff_y + auto diff_dst_md = platform::MKLDNNMemDesc( + diff_dst_tz, platform::MKLDNNGetDataType(), diff_y_format); + auto diff_dst_memory = std::shared_ptr( + new memory({diff_dst_md, mkldnn_engine}, to_void_cast(diff_y_data))); + dev_ctx.SetBlob(key_diff_dst_mem, diff_dst_memory); + + // retrieve eltwise primitive desc from device context + auto forward_pd = + std::static_pointer_cast( + dev_ctx.GetBlob(key_fwd_pd)); + PADDLE_ENFORCE(forward_pd != nullptr, + "Fail to find eltwise_fwd_pd in device context"); + + // ceate primitive descriptor for activation backward + auto backward_desc = mkldnn::eltwise_backward::desc( + algorithm, diff_dst_memory->get_primitive_desc().desc(), + src_memory->get_primitive_desc().desc(), alpha, beta); + auto backward_pd = mkldnn::eltwise_backward::primitive_desc( + backward_desc, mkldnn_engine, *forward_pd); + + // create mkldnn memory for output diff_src + diff_src_memory = std::make_shared( + backward_pd.diff_src_primitive_desc(), diff_x_data); + dev_ctx.SetBlob(key_diff_src_mem, diff_src_memory); + + // create activation backward primitive p_grad = std::make_shared( - eltwise_bwd_prim_desc, *static_cast(p_src_mem.get()), - *(static_cast(p_diff_dst_mem.get())), - *(static_cast(p_diff_src_mem.get()))); + backward_pd, *src_memory, *diff_dst_memory, *diff_src_memory); + dev_ctx.SetBlob(key_grad, p_grad); } else { // primitives already exist - auto p_diff_src_mem = std::static_pointer_cast( + diff_src_memory = std::static_pointer_cast( dev_ctx.GetBlob(key_diff_src_mem)); - auto p_diff_dst_mem = std::static_pointer_cast( + auto diff_dst_memory = std::static_pointer_cast( dev_ctx.GetBlob(key_diff_dst_mem)); - p_diff_src_mem->set_data_handle( - platform::to_void_reinterpret_cast(diff_src)); - p_diff_dst_mem->set_data_handle( - platform::to_void_reinterpret_cast(diff_dst)); + diff_src_memory->set_data_handle( + platform::to_void_reinterpret_cast(diff_x_data)); + diff_dst_memory->set_data_handle( + platform::to_void_reinterpret_cast(diff_y_data)); } // push primitive to stream and wait until it's executed - std::vector pipeline = {*(p_grad.get())}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + std::vector pipeline; + pipeline.push_back(*p_grad); + stream(stream::kind::eager).submit(pipeline).wait(); + + diff_x->set_layout(DataLayout::kMKLDNN); + diff_x->set_format(GetMKLDNNFormat(*diff_src_memory)); } -} // anonymous namespace template struct MKLDNNActivationFunc : public BaseActivationFunctor { - template - void operator()(const ExecContext &ctx) const { + void operator()(const framework::ExecutionContext &ctx) const { eltwise_forward(ctx, algorithm); } }; template struct MKLDNNActivationGradFunc : public BaseActivationFunctor { - template - void operator()(const ExecContext &ctx) const { + void operator()(const framework::ExecutionContext &ctx) const { eltwise_grad(ctx, algorithm); } }; diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index a06ca7952f..b6b498a616 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -19,18 +19,20 @@ limitations under the License. */ namespace paddle { namespace operators { -#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ - class OP_NAME##OpMaker \ - : public ::paddle::framework::OpProtoAndCheckerMaker { \ - public: \ - void Make() override { \ - AddInput("X", "Input of " #OP_NAME " operator"); \ - AddOutput("Out", "Output of " #OP_NAME " operator").Reuse("X"); \ - AddAttr("use_mkldnn", \ - "(default false) Only used in mkldnn kernel") \ - .SetDefault(false); \ - AddComment(OP_COMMENT); \ - } \ +using paddle::framework::Tensor; + +#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ + class OP_NAME##OpMaker \ + : public ::paddle::framework::OpProtoAndCheckerMaker { \ + public: \ + void Make() override { \ + AddInput("X", "Input of " #OP_NAME " operator"); \ + AddOutput("Out", "Output of " #OP_NAME " operator").Reuse("X"); \ + AddAttr("use_mkldnn", \ + "(bool, default false) Only used in mkldnn kernel") \ + .SetDefault(false); \ + AddComment(#OP_COMMENT); \ + } \ } #define REGISTER_ACTIVATION_OP_GRAD_MAKER(OP_NAME, KERNEL_TYPE) \ @@ -58,7 +60,6 @@ framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx, const framework::OperatorWithKernel& oper, const std::string& name) { framework::LibraryType library{framework::LibraryType::kPlain}; - framework::DataLayout layout = framework::DataLayout::kAnyLayout; #ifdef PADDLE_WITH_MKLDNN auto it = oper.Attrs().find("use_mkldnn"); @@ -82,6 +83,7 @@ class ActivationOp : public framework::OperatorWithKernel { ctx->ShareLoD("X", /*->*/ "Out"); } + protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return GetKernelType(ctx, *this, "X"); @@ -96,6 +98,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out")); } + protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { return GetKernelType(ctx, *this, "Out"); From 4dda54aa5af6cc7f53d4ee5e5212293d9a67023b Mon Sep 17 00:00:00 2001 From: gongweibao Date: Mon, 18 Jun 2018 19:59:43 -0500 Subject: [PATCH 55/56] Fix unlikely (#11537) --- paddle/fluid/inference/analysis/argument.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 7d7131ed7a..f7f4e03968 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -21,6 +21,8 @@ * big. */ +#pragma once + #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h" @@ -43,7 +45,7 @@ struct Argument { #define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) #define ANALYSIS_ARGUMENT_CHECK_FIELD(field__) \ - if (!UNLIKELY(field__)) { \ + if (UNLIKELY(!(field__))) { \ LOG(ERROR) << "field " << #field__ << " should be set."; \ return false; \ } From d00a0436b1c562060b49aa2981be094d78bcbdf5 Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Mon, 18 Jun 2018 18:50:19 -0700 Subject: [PATCH 56/56] Remove tape (#11548) * Remove tape * remove tape in cmake * fix CI --- paddle/contrib/CMakeLists.txt | 1 - paddle/contrib/tape/CMakeLists.txt | 25 -- paddle/contrib/tape/README.md | 252 -------------------- paddle/contrib/tape/computation_graph.png | Bin 96637 -> 0 bytes paddle/contrib/tape/function.h | 131 ----------- paddle/contrib/tape/tape.cc | 265 ---------------------- paddle/contrib/tape/tape.h | 64 ------ paddle/contrib/tape/test_tape.cc | 61 ----- paddle/contrib/tape/variable.cc | 33 --- paddle/contrib/tape/variable.h | 85 ------- 10 files changed, 917 deletions(-) delete mode 100644 paddle/contrib/tape/CMakeLists.txt delete mode 100644 paddle/contrib/tape/README.md delete mode 100644 paddle/contrib/tape/computation_graph.png delete mode 100644 paddle/contrib/tape/function.h delete mode 100644 paddle/contrib/tape/tape.cc delete mode 100644 paddle/contrib/tape/tape.h delete mode 100644 paddle/contrib/tape/test_tape.cc delete mode 100644 paddle/contrib/tape/variable.cc delete mode 100644 paddle/contrib/tape/variable.h diff --git a/paddle/contrib/CMakeLists.txt b/paddle/contrib/CMakeLists.txt index 70e3a0583d..4b19256ef4 100644 --- a/paddle/contrib/CMakeLists.txt +++ b/paddle/contrib/CMakeLists.txt @@ -14,4 +14,3 @@ # add_subdirectory(inference) -add_subdirectory(tape) diff --git a/paddle/contrib/tape/CMakeLists.txt b/paddle/contrib/tape/CMakeLists.txt deleted file mode 100644 index 5450359d85..0000000000 --- a/paddle/contrib/tape/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -if(APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") -endif(APPLE) - -cc_library(tape_variable SRCS variable.cc DEPS ${FLUID_CORE_MODULES} device_context framework_proto proto_desc operator) -cc_library(tape SRCS tape.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB} tape_variable) - -cc_test(test_tape - SRCS test_tape.cc - DEPS tape tape_variable) diff --git a/paddle/contrib/tape/README.md b/paddle/contrib/tape/README.md deleted file mode 100644 index 16c22a45d5..0000000000 --- a/paddle/contrib/tape/README.md +++ /dev/null @@ -1,252 +0,0 @@ -# Dynamic Graph on Fluid - -PaddlePaddle Fluid is targeting the autodiff without tape, which, however, is very -challenging and we are still way from there. DyNet and PyTorch provide a good design -idea, the *tape*, that significantly eases the challenge. Also, DyNet provides -a C++ API that is as convenient as Python but with higher efficiency and could -conveniently integrate with industrial/production systems. This package, `tape`, -combines the good of - -1. tape from PyTorch and DyNet -2. C++ API and core from DyNet -3. rich set of operators from PaddlePaddle - -## Overview - -We can implement Dynet-like Tape(See this [survey](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/survey/dynamic_graph.md)) -by wrapping Paddle Fluid's `Operator` and `Variable`. - -The user API is straight forward since - -1. it is imperative. And it uses host language's control flow logic. -1. it avoids extra concepts such as `Scope` and `Executor`. - -All of these benefits come at the cost of just adding one line `reset_global_tape` -at every iteration. - -## Code Structure - -In short, the `Tape` contains a vector of `OpHandle`s. And an `OpHandle` contains its -`type`, the pointers to the `Variable`s, and necessary attributes. - -```c++ -class Variable { -public: - VriableHandle Grad(); // returns its gradient variable -private: - framework::VarDesc desc_; // compile time infershape, necessary for lazy execution - framework::Variable var_; // run time variable, holds data memory -}; - -using VariableHandle = shared_ptr; - -struct OpHandle { - string type_; - map> inputs_; - map> outputs_; - AttributeMap attrs_; -}; - -class Tape { -public: - void AddOp(OpHandle); // add op - void Forward(); // execute the tape_ - void Backward(); // execute the backward of the tape_ -private: - vector tape_; -}; -``` - -We uses `Function` to indicate layers. It takes care of parameter -initialization and `AddOp` to the Tape when it is called. - -```c++ -class Linear { - public: - Linear(int in_dim, int out_dim, const std::string &act) - : w_(new Variable("LinearWeight")), - b_(new Variable("LinearBias")), - act_(act) { - Tape init_tape; - - std::string initializer = "fill_constant"; - framework::AttributeMap attrs; - attrs["dtype"] = paddle::framework::proto::VarType::Type::VarType_Type_FP32; - attrs["shape"] = std::vector{in_dim, out_dim}; - attrs["value"] = 1.0f; - init_tape.AddOp(initializer, {}, {{"Out", {w_}}}, attrs); - - attrs["dtype"] = paddle::framework::proto::VarType::Type::VarType_Type_FP32; - attrs["shape"] = std::vector{out_dim}; - attrs["value"] = 1.0f; - init_tape.AddOp(initializer, {}, {{"Out", {b_}}}, attrs); - - init_tape.Forward(); - } - - VariableHandle operator()(VariableHandle input) { - VariableHandle pre_bias(new Variable("linear")); - get_global_tape().AddOp("mul", - {{"X", {input}}, {"Y", {w_}}}, - {{"Out", {pre_bias}}}, - {{"x_num_col_dims", 1}, {"y_num_col_dims", 1}}); - VariableHandle pre_act(new Variable("linear")); - get_global_tape().AddOp("elementwise_add", - {{"X", {pre_bias}}, {"Y", {b_}}}, - {{"Out", {pre_act}}}, - {{"axis", 1}}); - VariableHandle post_act(new Variable("linear")); - get_global_tape().AddOp(act_, - {{"X", {pre_act}}}, - {{"Out", {post_act}}}, - {}); - return post_act; - } - - std::vector Params() { return {w_, b_}; } - - private: - VariableHandle w_; - VariableHandle b_; - std::string act_; -}; -``` - -## User API - -```c++ -// Model function -paddle::tape::Linear linear1(3, 3, "relu"); // init weight and bias -paddle::tape::Linear linear2(3, 3, "relu"); // init weight and bias -paddle::tape::Mean mean; - -// Optimizer -paddle::tape::SGD sgd(0.001); - -// Data Feeder -paddle::tape::Fill data_feeder(...); -VariableHandle input(new paddle::tape::Variable("input")); -VariableHandle label(new paddle::tape::Variable("label")); - -for (int i = 0; i < 2; ++i) { - reset_global_tape(); - - data_feeder(input, label); - - auto loss = softmax(linear2(linear1(input)), label); // compile time InferShape & InferVarType - LOG(INFO) << loss.value(); // Run forward up to loss - - // Run backward, store gradient of w at w->Grad() - get_global_tape.Backward(loss); - - // Update w - sgd(linear1.Params()); - sgd(linear2.Params()); -} -``` - -
- -digraph G { - - subgraph cluster_0 { - node [shape=record,style=filled]; - style=filled; - color=lightgrey; - linear1 [label="{type: mul | {input | {X: before_mul1 | Y: weight1}} | {output | Out: before_bias1}}"]; - elementwise_add1 [label="{type: elementwise_add | {input | {X: before_bias1 | Y: bias1}} | {output | Out: before_act1}}"]; - relu1 [label="{type: relu | {input | {X: before_act1 }} | {output | Out: after_act1}}"]; - - linear1 -> elementwise_add1->relu1; - label = "forward tape"; - } - - linear1:before_mul1->before_mul1 - linear1:weight1->weight1 - linear1:before_bias1->before_bias1 - - elementwise_add1:bias1->bias1 - elementwise_add1:before_bias1->before_bias1 - elementwise_add1:before_act1->before_act1 - - relu1:before_act1->before_act1 - relu1:after_act1->after_act1 - - subgraph cluster_1 { - node [shape=record,style=filled]; - style=filled; - color=lightgrey; - linear1_grad [label="{type: mul_grad | {input | {X: before_mul1 | Y: weight1| Out_grad: before_bias1_grad}} | {output |{X_grad: before_mul1_grad | Y_grad: weight1_grad}}}"]; - - elementwise_add1_grad [label="{type: elementwise_add_grad | {input | Out_grad: before_act1_grad} | {output |{X_grad: before_bias1_grad | Y_grad: bias1_grad}}}"]; - - relu1_grad [label="{type: relu_grad | {input | Out_grad: after_act1_grad} | {ouput | {X_grad: before_act1_grad }}}"]; - - linear1_grad -> elementwise_add1_grad ->relu1_grad [dir=back]; - label = "backward tape"; - } - - relu1_grad:after_act1_grad->after_act1_grad - relu1_grad:before_act1_grad->before_act1_grad - - elementwise_add1_grad:before_act1_grad->before_act1_grad - elementwise_add1_grad:before_bias1_grad->before_bias1_grad - elementwise_add1_grad:bias1_grad->bias1_grad - - linear1_grad:before_mul1->before_mul1 - linear1_grad:weight1->weight1 - linear1_grad:before_bias1_grad->before_bias1_grad - linear1_grad:before_mul1_grad->before_mul1_grad - linear1_grad:weight1_grad->weight1_grad - - - subgraph cluster_2 { - node [shape=record]; - label = "Linear1"; - weight1 - bias1 - } - - weight1 -> weight1_grad [ label="Grad()", style="dashed" ]; - bias1 -> bias1_grad [ label="Grad()", style="dashed"]; - - - -} -
- -![Image](https://github.com/tonyyang-svail/Paddle/blob/cpp_tap/paddle/contrib/tape/computation_graph.png) - -## Code Reuse - -We want to stay close to Paddle Fluid as much as possible. - -### Reuse All Operators - -As all Ops are registered at `OpInfoMap`, the effort of adding a new `Function` -is about 10 lines of code, similar to expose an operator to Python. - -### Reuse Compile Time InferShape and InferVarType - -Note that all the symbolic information is stored at `tape::Varaible::desc_`, instead -of `ProgramDesc.block.vars`, we create a temporary `BlockDesc` to do `InferShape` and -`InferVarType` every time we `AddOp` to the tape. - -### Reuse Operator::Run - -We use smart pointer, instead of `Scope`, to manage memory. So we create a temporary -`Scope` for every `Operator::Run()`. - -## Possible Feature - -### Release Memory on Backward - -We can release memory aggressively. During backward, we can delete the OpHandle once -we have finished its backward. Since all the variable is managed by smart pointer, the -memory is automatically released when its `ref_count` goes to 0. - -### Kernel Fusion - -As a symbolic representation of the Tape is constructed first before the actual -execution, it would be possible to perform graph optimization. One use case is kernel -fusion. diff --git a/paddle/contrib/tape/computation_graph.png b/paddle/contrib/tape/computation_graph.png deleted file mode 100644 index 6cf5ead735d5d18b204b079771e53d44483cf016..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96637 zcmcG01yq&Y_a-VJk^%xsOM`Svie5?SZlt@rL6J);-5}lF4I&^?UgFXv-5}j_uHW~Y z`L8vzX3eZM<62(0Z=L<_v(Mhoe)c|uD9A}*p%bGcAt7N&NkWv6kRFhO|7U3T!4VdG z9BT06fzxX#6*M%onI-u}@b#JVTMcJrJ5y)3_l_n=X0~=VCM-@yjwU9yPUd#b`w!Yh zkdP>mq#$A{?x{O-e%{(Ht)d5ReqSDRAy+*7^k{%=%G!8d=sYT1Y-^6#MCJyj)1hMD zm|*2JPSRuDU>PbzyqHXNyfKkd@&02}2m1#YQgV|p@g8@pL3WHve&_qkr?Xsqjx7Vq zq#=~_M#onY^&EAs+whu<8V}*Ssd%S9FZb~se0GmK)}1WHB_tjJk&%98#=G1@{NnU5 zivaw1HASg~_#TV(|KRJG{GXQkc~1-+$4}@g+S*=lO|!}}pufd0ed5KfUT5*rxfU!a zi`?ei$vr*da8)oaDd!Z5*gH)z6k;;6ZoecXBn8D^Rw&?v0xwhz{6I4KVE(`Rsyq%W z`XY1ed?6r|E-7xJ8y3BZb(w1FHy=?S=<-$s?R6ur>auDp}_lfb}tAx&I6iz5M9 zEw~9d+Yzf`SEE$j=M1a(^~%_}IfWm-nwl8>Y=A(62%!~v)oz$&PC@i17lN29({BMA z3Q;c4z;Sz}7@tpzvsuzNOOkeFIgU$_k^E*3VXXM84?kpXRQ$A(>La=!{5)*3B-QsF z!jbsfXG!fPxy+vKb$79zmrydDm6um@@!&Unfg z+Wc?FWTV3O66!xpD5Yy`4&er@6zMbeeSe%sc6=Xz-+F=;}{Wb8RhUY;TU zk*phA%JenKH-^cgn4PSvqYkR%)$4Bvn89p?6mlk$Yx9bQfOK!7ctvDo$>}cb)e7 zS~gXt(V=rDk=^*4frIg63|ce2bn%jLmqwK}5Uo=2Awnr_f!k!#zOJewn=y)dRQD=F zuGT0>UCy{=XqRKi@p4)=KH(p3%03zy$HD@LPn!r8Rc!$-@c|D6%(cY_bC(*2`@h^V zd7Gs$X-7Sz32gK}i-)?MVXf3oQTG*n`BNWO#71tp6v7!`eA!^q_C|$R#w!0+sJoDq z6*JsoMRJo&?h`%v#V^u$ajQe9Z-1dKDtI%whlI!>Q0TVBH%hwPvoyta(GBDd#6_PYW1i@@pi_p9|7BM6AV>J||q6 zLw}SPG)B2m$k0C-4A!)PIHguZCI4<<>bsSmQ*2eGL+8Gg8u=%sP&-?mQ3W-t`Yot4 z9B#TBmJ~hi$}eMqr4`NfG6v)@SC=s9=NG`Z#Mt-9#yGv5e4v=P)BWh)|)#_6}yZLyvTyczDT+ncQ3 zUnQG~4xz_u4l1AZ>ivna9pWrglGn7vLefSN`aNbw>CH%sObX#}CL0=jeEjgbu&x+* z%6AFLFc_>aHKBhMo0Ok@ZoFf&A9;toQ23cmab6;afItzl2nT#X|!s{hKZIi4x7OCNQ9X%!@ZQR9XFO?bmQ^JC=S0q+X4B zTU}XMtZgPB9bK*vY%A4is8WO3wT^8vpp`!w0-=V(`ttRjel2e}v7o*GWU?0gzQ$ zG5z7(0MA7y^eOq5u+Va&SM7g1m!0m|zQi66vVQIzHB_k%G3Sp~DQ%NWd4lol@IJdEB&gHBn9B&ON04P!kx0cUd4ewk!xo6k|qo08Ld)MiOz_&Af$5e2V+tq1FCY1=rh-YAZse(5XflH;umwHBQan$ z9arz+mO`KC>bnYHi8_XoJJ$o85(&L<1*+E4?jN|D?_8Z8*qymd6SM5OV#sS*Nw_Pw z*ik-DUagqOP=G>npygaF=iiRts?tE3Q4XryETo-F3aUi$Ed-$S_MzXSIA;HXIsGCfulG zxkwGN9%_lkH=E*ShYie@bWuuB$B-5K596jQP$+VY;tL!T8mbWwZ))aiDV7UtHo-+rJGSAZumJ<1wKeur(AO$zYuK9qDw%-g9rZZuK* zv#gpD)!6vzkxPwRE{guSLRb@w?1{FEMMn12{Fct{VK;U#a~xEj! z&frXPx6gbAS>_|Vi`7IK4{?MQyyWHWuZ@iY@oWxa7z}uzxck*=`IHI7yf5eL$Jd{m z`yg@ws@FBi;nsF`k&=Vm-;}B!93e6Tb`D5!jB;=`xmU5wfM6@jzV?~&|L|vZ9YsP2 zrISuhY^^Zgu5>g!Jg0Lj=o9W52Zvvwbd`Xx@;^iI*!+pD2)_9r#+-lsh+WGe>NXy@ zkgiw=X1P9;X3KZk_+ATu>Eg1cP#44_VwBR&T?Tn32DauV?bQ#Q( zIz1&8GA!tNN06$88PEH z$=*o4qA9hxuJJIAsj>JIt#EG{_(BMvcQ)dBF8@mfR?^A_=Plm}Z43ur2DW>5xU>Qa_6X7G1~U){6Zt@(EU;9}?7`GfQgR~_H30@A%S z;Ly4q_(Fi4{Wg{W*8;l~%@I|iS+_g~?^)cKGgWEgj#@fY(u@hr?^G=I`Z|WZXcL&JU62qrBVFWG!u{ZkL*GcNBZUvHz zL9=PTkU7^FbHS5OyCaiqMq!r3F)t{m>F|vrBPDY~6H6Ewl&utgfalV?t zgrxS(>{_ct->baiCLCDNNk5JVj3ho5Qq+rx7EoBVKh%qdNW@3-IM>U z)k$Ivd|yR?xyy7;q9}ZES3pRrJE_H2-H3w`HS*&NM)>^f&Zh8G-nKLoFT0_oQzFz^ zEm-!-P$=~ac7D9WD2AEK^o2E59L1H92#hbMvR11%i8W=#KR5%1>73+!n~reqpe+99(Xf*Voj*gm^aW1tmaANKz1c(U&Dsw~vNCIT!YPtF1@-(Q%_}WNs0GDd zGQKZUTlcdl~gbLoz=DHPJxhkUlPm;LHbP7KKW4)H$N2Hv{} zfd`E1_!%G7E%ebA*=dZq+nX=jiG)>t@)MB`TbZf||l*ArNE1@>S~O#B1VQ`7j)q`3h*}^f zUG2BGmjn$KqySewZB_2ls8fOw;83F53^(U4)2v=Qf0Lb^O&rl<&OPLfKrqX%NxV)J z@kHcl_=dhwnwevK)P*vFjjJ`990Y}AgjJ3r@3?9A3cT!|%)5%@ag>7Fe|tly%}7=<+TU(SexIZO3< z%EsZO?4a&5DkW6vbhYZKDcGt5ulAs+)6v!|>E~2D6EjOS$veAkk*;RNt&NT2qg-_o zrXZpKuyv&b0nA5z{+w;ynQ`G4S>*d6?_bioI-h%1PNRWwxgy`zj3klTHnX302>ni( z-6dqPcvqG_`O1?4x8g^L0kg#SXEQCXt>U8_=x>*O`ivW7caJWp@S-yNEci%01rOtS z6H1p38dyonEh;WvKd;2{LOix3yf}7{h@Yi=8T!MZmYA`Wv^J+VB8XvV!`d#=J*F^q zM)!2=c&~YXZ>pL;rZ3jZ0Nhl-8i!X(7r=!EklH9rGPh`JZ7_w;Ze9q>s7r-KyDr=X zL9fN|gk_C$3v8EEdy7zzJ1Zx_%XIsjfYSZ@lHY27D)NxZ3=$83Lb)N3WuamOujQXP zgmDYCt7;gMkkexu^pGDmx&7>h0#sseru5umtRm)fxVL=_ns@^pSl$0pU z#HU42RcWup8j;f_%pqj&L0qWB^xG+UaJTh57F?TGE75g(@+Y7`; zKT{!22^XLexZWLF%HpPpDtt!^#2%oH6SU%4w-^7s(5N8$u=|WIDi%`F4qHxclaSg@ zZpr*I^DV+lAr953H3{GLejb7n{N<%jYtG)>^cH3^lDY`O0Fo*u%o5dsu6&`=-k6dm z4;^?g_$;C-IT}wSB6{@V)b)vtWriFzWrD&CM zZ^`Aoyn@TSM)w{aua8&qxxFj};eScEO{GwD(cZ$3SovmT~sOXT3?(epYusIm6q$K3~rt@+>HxP@G3R3*6M^nJN)bl0dCr%U|{K#0H zdAd2TV42?bu3sys%`C}yW>tW!gXSa-ugWt)a=2}SnSnm~&z zQ~0s}IFbbm2ow21ulQM>)t?Kk;FgeA`B}8VlO8!@8C)LA7W?jYOMs+7iMHroT4YR+ z#3ssxNITRV#>(#6T65syn&3-$WX@PpQvD`o5f9C;4wZ5DyYlm8ptd5BHF_pxR>hoS z&nsag8;t(zk3b~Uy|^|inmX>trE@~x&!<10#qWBVf`@qUnOSNk%)G~!>-6eqNN_*& zMOni}Rv2s3Mi!3Wc~?mo3X;ENusP4NGq0V*(W`a;+4&jM#4*A1HMQyu`8O->{x7sw-0#mP(@Ys$?9f)hVRL^y1viJIAF^4+?Rqd2$_FvpAhLfM^U25 z#4~GF8TY*<(*1r(2b_Txr4~mA#_I>U9%7bX$FVVFo!1395!{r8ihJx(lQd5+) z+FH$RS!aohHP(P-GT!;o7vjj(X9{`0HczINw4y`g8XhDrZaL>Ree>0i&W?t|@m=K( z<~pZj$|tr@E^}oP$ZrfY6($>Qn4BRQl6YYNxZn+&_v}r|>XYX!98sKAUCp zJq(K=;cL78({EaRAvrR#DR&+M>^Ajr);MNAPS5WLNylh$(vK=7Dr@-3BZ>Q5LP2=)121wiv@zwsP+zR4b3cG`5K$2-h4EKCd)KbA`v zWSD%{B09YpLGF;~)Dx4Pq(O2cci<`E_{TpLUmFur_pTIt!%9Q85vQefaR9{k)s4?I>;kdX2B0 zBV}+x*KH1qZ??jq^FG|xhCCa*Ezqt8xt05@j36K z!3jOD(*9LL-5wI@E&eesfwc{V?lovNyOzStb*fDV4}&B3Ym>7; z{bzg1EW>9l$*j(LiVE$U3mEq1WP(kzDo1De0a8b5uuo}`#B%~nqSDi|ye8+szE`B5 z)5aIL?B-jx%bNEjL@#0bw|^1<#VQ}aD(|zU97e>px3e?b=5s}JM+Us~I3}#f)1Fk~ zt&~U={hR0bA*=7?W{!nT%?OLj%k>)_P54RUO+1!+A|B$9>$G~F&?e|mg=S@`45jM> zlXduIr7FyyA|LzxYj5_rB~-dQ8a_sjU2l+Zf?rfv*tnK#RR%uxefGV83PsO5aYX(^ zeXgmin|JJInb2{<+TH@j_?uEo6@UIWA&V_sbwE${@4wJ2q3eULV)b(PrZf5H^^CiV zjA4;0Yk2NGq*u8efs`GEOCGnUGj@Kfaq54kz=G>RTqSs4eaPmuldRF_pslslRz3PVHg5u?#wmUho=O}6{BDf3LXD?<2LWH4i?5y$M@ia-_^{5=`xGThu4`MO&^t= zU~h?nx|U|_+HdM+Y_(h59pvRd_pbuAzt<%K;rHVoNbzUjpQ{hW_?lgJo0dM2kb579 zMW6~stC!VUO;oWiIeVXXQTPJuO844pWb?b|&s5mxd`Qvq=jRJeB7mqyu>r$6YJTx& zm96!7WNW-w^rBC`Y0lO==rNMP5JGEuCnqQMbFMQ72kf{$J4O^<*N45D>Hwqx_5$62 zZXiQc6fZ>RWK6k!!MC|Jq8wT6;WUok_$B`*~6+n-R z=UVp$3aY_m14+EM3iI1;Ps)uh(Lt#|tmeLfL3b!Yqt4#-(J&uz#75hFq(qs=rHRV9 zD&Hq8dA;!R8+?>~+3^l8gm!g9u8&p3K{fE#yfwhY`J4AuWsAo}fK|zhT&(c5UksAi zwJ55p!U=#$J)_i;4UCpOJ{Q_e2bIJ1BNulevVcj0HXTv4mz`_2JP!Gnyo|rMh9_&yT=uN9FC=i8SYCiGQT7^?o%r)Gbc?VSQXE?}Ol!fI{`lnzopPNFE{? zU^5KIXO2D><=3#Z#FM+a37TDGUoc+E*OAETV)+Sw#cs_-`cpK>I;XlT&M~OVc3Fxl z3Q+ajva+8F)f`9}JAFIj@FtEQb@`JkzhE^zs&ZPIDAN|)Vx?zZUoZyF0kW7YEXBwjyx$X)8Y z5T)}LA`m0Eh+FmEkJ&A{ejPtxFGpyVsYg*o9LgV`@`FU{`hDdtclvHV&&!*vjU>;l zv^SPkR{PESZNcQ8KbGvh&zcuZbg>cRJWH-+TEWXycnv?8j*iY3PZ*w|vIlha7&V>r z?qP$rg40cqq9AXm0S_g^vpY$J58rV}Gexm(k<~53x7}0b zXk2^oHS5#q8Zf*nMV~^k8K3NIuqW!=tGD3a*pftVOPu7?EBZm-Ec}Sk?;N!?_?ymi z{B7*~P@!_gCqrwyj!DkabcB(4aU+sh&D7bj8J{$kPY$B*5!BQ7216pB*PhDEr6d^cPRYtiMl`NmiA7ng6}D18y!Fqrmv|#v+rR%yJ1ZTIlXZRq+$iRE(FdmCfC+ zAU_S*9}nV{voKa!o=D8mQT=w#7a3J0D{H^K1U3EOu1cS)(_V5f%wZQ)q{IkQW4QF@ znOET3{HNFNB$4ca8N5Qwq4qhL63L?7*^?{X>glWVox zJjvqf48;2~bX_)<8N)ZD1_|9(+2ip$3_%^WCq0m@JK40Dma#z+4 zV15nC`QLxMN?J@avtK+xcd2_yclq&UX158ExiP;oXOp*Hv1{Nx`9Q9w`!LzVl~*nO zi5RZuS8ZNzUFL=a{brm+G5gaq+IN^=f{h~yNri1zTIH%x$>adCETVBLi<;^; z$>8PhUmXXBcEWx1B?Q*IF_e?nZSf;3s6wAUkz664=eZ^nCd!#c${Try&2YdKdya$% z!a}7Xp;=FI-)quR4XwCp%6UGgq)L;USXfwq!(Bnl61m!`F6qAqYD!%A7=a@d@216X zB}JA_JmunOk~WT$$6kKu81?5q5igR`2_3~NTwbGBWx?H)%MSCxQy<6hln!TLilRvf zl^$E)rlwK~BhC~P93r8+dgMeSZ`<+c*|BHjr&Zi6BQ;z=XV&m3uhbAvr@e^KO{i{4 z^(^10p)YaGA6+%)G@!M2w6wHryVp;56{qt25P1>8+@3lNDWtedDVfGLRacMX8r99`76H~@P4r(e?iwV~v z(kf$^UO@Q0k9>K%l!WEa1pXS@4?`o*_T4b)@DJTL;@cn+#xY(@{ng`Z)fqQCzkYq` zT0&q~zz0}ay*QDf2kjxTN$34qkB?PI6(ir>FjSQ|eyT*x8%^p_IEsFa5n+VSNlcYx zVfA|{C9Uq0p@7Hip6rD)r_9}=ITtQ$bFTYQfrk`%b0?~p{98?tDdf2jk)<=h#^Eci zv=k8#l9MpxzEdN3*L%`M6nlR&T-GKnQ^Vb*%bt@ZZO^4x)yQZWP;wPsXM5Y_?YAJo z_&aAYDIwt9pWXD1oV6Du((BgQUMSP<0Wx$h=aOVsKVe!YA6-rsr?!Amh1 z8@6@@W)C{<{17OO&#|t11wq;Tp=9%%43o?fUX(i!DN-glOx#?`$#cpf*xCFJ{F&7+x^CgfXi>Wh+}~ zbUDg%ca$8_Yx6!lpTTIkz1(hXZPmZ{slK(fMaQaF;nU-}XYbvW$1*G`S^+{I%dqvF z*o3ccOSVa8=IYp^{cjz^GcnVo5w!K~kcbIl99zG<{new}!%HioVZk*jmz*81m(46m zJKJHRl_vFz*xuXK82nTtES1SypX#m8eHrlNZDp{ej#zE9N@A2c?H5kf^{%#mSY0hF zB`~kKEc<;cik)s?d|BfrirdS^K9s=+^T(eT#wGK%8J3xe>1%6iGfVd<8~aUUSGmEE zu^G{${HA_L(~|{^)9Xd&17=c4C-5W3p~| z;kbY^E28>DxL9dT_(`<*sT?2YZv0Qi*mUA&gFM=b0ahiRWnbfL28=rPJTv>g?2yoJQmd8WcYE;(mAU*r zl1dif?+lvI%Is&mo6r)&o0^(>Dg3UBLB9aVDQ`sYjzm3gAj3nT++k|kInjtQrNPgb zAU?8`u3<{YTYbMkhlhs;NBHS+vS$9*J%YSpxGmNFicCG=@Op0g_QJz%HcM= zRPbnkOY|^A{(4Kj{o5=7(yO;gC@^%S$g{b9-!rfxng>m1+I$3?rtcv!^)hQbM)r3A zJo1n+XQ@{07Qp4Ex*%^L4Fo_|dwgt6==OY>;=JP_$hevSv=4T934rWJaI$!OV&d%N zr1f&ESpWJr5q+Zs8K@c-|2fv$oo^K+#ZE9ONaE(?d<{WJ9pY;A5bnA^uS=T}n4XS6 zz-FRXQzN-Dpyt}_d*g|K$12U7kQX01JC3~RVv;TuxQ4BH?@sI6)0V>aX6pABJ3mld z{t7ldZ>k=Wr}OsDk{cA?W%7(Vfp)`?`&o7?l+j(#90J&m{_M{pmp`aKS0ILN0_2WAuWRDHy91oN zgKUbM4TL!j?9ZYiDGWX?efo+-a3CDv+esGAd)7tH8Uf_`D>H%>@;67}S_8R}W{r8& z=;n7~ZlsQZS2c!6Fm%&*va*9QAdvu+&*|~;-=UcP9ZCw}H$VFxQ@1a)8}(u%L>%ye zs$m)&p(MV~xEL^lmuNb5Vez#91XE&l4=HgO$k97pTZkOrG~ zBk}6{C*{M;(|s>DDelgDKa1Y-Z)<@D1z-oLzk0CWp#>gvJizf@eW4#k6XF8;5u6CK z2oSRcRqOZ;($p5q@gk7u;L41EqzoArdXE|H{u>frYxj%Qg#GqAUw9A@PZo&xHG=Hv z<>Eupr`W;mC#7~boAQ2!z{)#FKLROCOZ`jh>gwX-2^0Fen;s+izeIzD27s!|juncv z&1A(=Zi1dxyRR^!DQS%uh*m*~HW0yrap^6>jBy_+mI%nC@%W>(zlj)<|E529;%RLl z`r+aSoZH*8cE4kogeK8_BzCudlui)1e({R(V9?IDT7$U?Ort^MKWb;{Ev=s~n%LfN zSjPSz7z01 ze-$u=1#~%}C58eL?u$+w-wLoocM!J_Af62deb=AB1_+egiER_aWHNsOFP%l8Qewb< zKALgse0928f&nJK6k&ulfuXhLFsjeJPW>$P6`*WxQ1=?xSqj@L+AFZUZ+^Zgx1Mc? za(Rw~RE-BzX>JrO?Fu-AIk0jU03)^Fxy|zTr9}|(j*q|Rb1g7NutzV~Q>jC#5VK-R zfmlP9mSAGycEY)a8o(<;6jh!CA#A1^^Ph+QTz3{g5o-VoPy;a?{r^~Wfj980XY*eB zfQ5MQPUjv{D&9ZqF$8T_O<_q(TRTGBi1M)k;x(-U&AU4^YQ7~?sG*6V_Z0L<{yM71w`@2-kvGq#*%=)W@bRx2u7mtJzx6ky@4iN!PI{(D~o(w zY7nmh*j0Lw!$$}q-XRoWgb@f;W7l?ea})jZgF3MR@Bx_rSYLHN@C0gQIv#*B15F!9 z280L`ve{wZo&`$#EPSdVl7U!duX5ki7#fsJpkHZqG8yA$w1a{7BoXR zx@p4D&=3|0@AXN!A7UzBy#;q5osQ_>Ia0?6+nPIV8NRdYjI)t@^vV6ub&T(1Tw#4~px z4SZxIe$*@ilG@r@-~*;?0pkUN3}EGn{)#U4xO5VbblpHoSA^>XBO8J3nA`4EEJ0w?b~dHVRtAX?|>Hy_|Nf(eYlt7DWNC8iAbR}#`_{lq z0jURQ^)E*fd_kRKf1y1CF~H7qt;zs%6sNvWf)|Jq2f!$)|2eLxt{x0diG%0>eD5_P z3W3+~7Y0LAloJeaAjnw1qm+de$Crq)*@06)1)7S4ZJ!Jd4*tg~03m7k0EWJn?sqfc z=d)AUdqBa9q|)dKM^Rw}G1MznF28qq3RoQoSTEtVD%l?%;@tlA*C?-47d`ayM`fL# z0PP^I9TGYkNU?3&jFKP+8u$&`41A6T`3@5w`fsX%Z3Uocf}e_bQ*!4AP9q6z$GHSA z)OHry@BSh=nYh1n;{6U%w~>s+joQB&q}O#CT!~xVxc182uzzi4QOW+0~QY|9!xoQ7-hsSHgS@z&Fm?`@k8dq^tj z(Bgvet@qUM`Boj=Wm5W!*xrc6xVVf-8tkdWus>Q% zYpa2MM7uSlX^tw4OGn5A)%{LB%Ln2dLJHh3=XQ6?=pICYoeC`-PAWf6dyQc3u$?0h zo4_YWP)4j_r|uRAum0zmEaiWCo%9=2BYUGgWb9)m5-Bi4m)=G39gr4AdtawLsroOZ z8q}u8Vej?e$>nusfMIrP6wT&=uG=&wD8%ZNGtf!YwB!|hDV*E4`E9o77tjczA{Lmy z)UzNq?eOjay*~+R5vL-)V-2?f+ey;Qor45qgZHOE zAUe-epr@vxscLVUooMl(&?lN|KPX$yY+j{-&pkulS=@+(R2GRKmbS)m=F1pRNd6KN zJ!PV9*k{}}$P*n+yc--HmrN1 zl@%}kd#){@@=6h4P(kTsx)B_4@@n04hXq8TrL)BB(Uld%wzD&D0i z0^5AF?d5qRE1;y@23cE);dAtao_R6f8psc{_1Op=7Y%JyIJQBU$`fd0QW`1}4^D9; z>42g1$|-V*xvf_e^KEfHUZr|d_#HVH7gm^;I@*8{(MRyu22I)IBZ?Tpg^a&?jGPUF z@`x1}Sn5SvWGYfdb84!xCq-)dET4F$=gjUoER@vOHe0i1^m3CD813(_7FO5sT2+Va z6K;mnbIg=#F_Q05kf^iBvER%6+X_>8vr69(cBZU!$gMfEgF2!~1|D^2|5`wZ zpSbkr*FDg*lRn@Pnnj5o3;SD+!SS`<9LX)gNIC(`i#z#(X`v3NO`ZZkJPw07fGSc| znMtJA*_;~zN(xDu4jJQ>-jJ=#-_|qhDVBx@3zs|hBhA}>RhUsW-GV(679rXtarNyD zUOAmL-iN)FjT#)$HgQX-qQO4=^Jr!(}ynIm-cJaYa_Rf0yXxNOv z_i(l6f>cph)Na4-roWZ%={hdV`?hv$h*sZ0-(g+@CDm7KF47DhOr@RPzehwEJMxBY zfBoazSYN9DvSFl{9?=Nr(RD-8)-Dq7PRCJE-{7~u*Gipw$TWJLov`nWbs|`AJO4x; zTWY$0ri`Gc6=NFr^PS_qm69uEl6#ih!~IM7JFNQ>-yf;#!YEJdG$=x?<*a3F{0j9n zOjlqpoL>VZh+qZ~B+SyIn4R;!RmX?eqzwA+$M=AD#7ADT(!V0!er;M0gDCC}^2q*)u$I*zVFh*-k7&Xk#r_8pcoUHo{RFHI>cfYs1fO9b z{RAhlK=xJJ2zZSl&?Kh9^fxQp3<6;oq|BDe%F0Ke42NzNh{V2*7?Ibt;I(=EeSgb; z1BrVu0oYNNLR8Qg(1VGsczHvtj`KoIM830oDq+>nPfjdDLlGxECR|1xUmYubs@I-$n{J zu{9~EjkwFYT`)9S^@1$726=(rrfeN54@5Fs)ZS9q*I2-Y!8orcW zE)Q7a!@p>@z)>cFl{O2pf9UVEsRmAsDJNXMif+xiiBn-i!v7AUMisLL9+_{iw-~pvzTro8n(}Qi@9Ns*`I#RIAY(m}<7g^mOA2qSEqo=xUW}*se0&#+ zD9wy_Slg;H)9}156RkaqEwf~{p(8}7JwF(CT9zQG{7*Z(01-|E7C;~%Sep8 z(gOP$BS}~JBcV_({#n;np)Wu13O6BI98;x2xKTsKgk#nM#duMRq1c$DTqY;vrFwS1 z98%>f`1mYUBKab3`Yd%NsiiBK>!T@t{+JdY)wm}RWAYr!%t>OdaNLxGm|0IhC7^+> zeum$beAuc$gBeeeHR^|@%@AQ5^EgPQHN{8-k#K=8#`LOsK#iKxg@;Z9dfyVnq;pS7 z#pb{^rU=x~e#ZcJO+^{%L)!%;&;+L{R!)0ztR2Hor6hmU+WX61MSxRHiX|$ZB%dh~ z3VIm{Wn{ok)fMZg8d@qPTB$75UQ9nC2RV>SQYQ2x=)B9Bg3Zs(P5z)ofh%r)Hg zq9#^@4uv8K3ZGaP0)^aj(|pzA6MEWw9-f+CdjewOBj(sQmX0V8?c5U&5@xtKj|jVl zx#dVytX#im@i6y%NKkCMu8Pm00)~9-Bbw2Xs5hymJwHqDL)EzQi;F@2zrUeAJu~z9 z+SlIRzURe#gOb8R3(yt@3TlXM9Y8OSOPJZ%z&&F^a;SLf8tBp9IkL)o7K=$>d8RY4 z^^K1G)=mEAg`%GSVw~XgDw;H|rY^l=lyh%xXJ{q#GIcT(>=^pz?*i~Hb71(9makhF zXwY%K%S7cz_bfGy>|agZ_ywv55sWViUL*KB;48y<#1e6d<=%jTpr!Ktq2CopT^t-% zj^Tp+Z&aSX!wl!Ht4;bZP5o@GWS6R;wS0VDp1Fq-iO4n&VytnmR3 z;)r!n`8X_}n3(VbG*bG7Clb@W=~9Edr^z!VI@1el$HH z$398FvaiOTMiyXS7@a2y{b{{ASK zwuPJ$LJ{feY4>c|wYH|F*D;oYwAqwuHl+_A_mS&o<%|=-WNq~;{2YBrB|rWs<(Dz) z$J*oO&81*Fnl%nKfm9!a`h#i($|QGTk};RdKh5~g>Rjt5$Hzh!D>1#Id^Yy>kHMC( z_s-5L*|4V(;_#r3%_~3#-yM(2FM9u=2rRh%FLT{j4)B9!G#>7Kl+*72+9R zRTj#uJA;3Vop<%pK*Kp_!R!~HEgIX#h;IqTqf~s3Z|}hgH`iZBc5(oH;s3|T0(Xl{X zp6aodguiO&Z)s4e0o_0_&69RWrmx%D-6p3^UtoQTZjnhvxvN2$i^n zj=ws;1Akg0mSo(6{q`XVH;enn2vkDx?wzNkg{S@gx`hpKN=;M>jQLYiDU!7A((MxH zc9GF^cs@?1nVLMQ(y~=CPjWt)O1`r69YCen9AlRuE$l5k{cbk?AkH%tJ4mv7@F|pz zW56)mVkaE)^bBXWY**L9&r@CN%J;UamMdF=dUGgyeZG;%BEtA?$4qo)_tlsFPdt?2 z52tVLRdZEh;%jfat3mBQxtkTtC{l9$A`7${w5+X*mwPZq0;NRqx5a}zUKt5gDrP+g zov+sRX5p@N zCIFn*L1w7kx?ept`?E~Jjxw-j7Lbw!@{h&7$aEE98u|ICg)~p~i?kaXTkm&(^gs@k| z7ij`6TcFzyNby6&1KRu{ojHas_9eXnP|{t&IBtUI$YNjO zUKbJsQVQS8RRj7YReNe-o0}RO4!e$yyd3 zB2mS}bJ*F%MM)Y(H92EHcOvtYp3~Q9;j$=}e-9`j0ezsC`**kdG4%4Tf{&3kA9weB z!!AH`P`ro4k|DWeEmU!?bQ6uPKAqDxcJ6s!9Qz}__kcW0?YsJ+(x0#SUZ`eB9(`*l zveGe5H5S=ayyHkG&B$r#wC7*Rtf^u^GkkV-*Z#Sq&F}Hg#bXT;Y29(>K+{HzA5!ng zX!^{IDWQ3Jnon7KCOQ0jAgN?b&C!jSFE4^KS7N-&OsNj?;50W~slx(%JmxCZLi&l{ zAH|g2BMW~!_c59xBwySlY{IH6{-=aXNd`G4ibW~ATEUOd$k9%B&K2%@oW3vJ8ahuW zHpR5Fl?n?{ZI2)x>6ye;vBXi9OhXb+zMVW0z4n)nD7^X5sQ^7uc=|LsYxzk)K;{#t z&6F)0C?iQs}xfKK=6;@Ez2!fC z#F@jKId$!|ziaKxG(o)rv>@Ce@!CUZjV5cM_s4D)1UwPdFg$hTB{y&q;HAs5BU8I=Yl}PIj7)|tesct2uO5Nx)S4~ zIQ)2T*$-#g`srh;;Z{I zjhN4iR~xknz|$L!dWSZm*l31t$92tJub8jN zNGB@GnV!SZtH@my%AqG>$TWcXTL8{*DVW&w#)kn7(j*xby^RCuWr!Vf_6!mnbbcF3 zFr;W>O8sq!2?KmWMhT?Z>TjMyvomtDb@g(e_Wo*)7w>u1qd=z8!a^+?SW}*>i+sD1 z*c(q|qQX}Se3>j)?3J@ekfcMY&|uaV0{Ay<)^~wb_?i@CuHv+~$+w{bdsWS2QQG*r zp)nj0;VMS$h8Z0FR!0!h4BJY291FX|k;#}Mb>BIs5TBGt>x;a&pMR4mq({toGOA$H z4j@M~64i757z2_`$Odc0Cx(iVomuI2A^EHtccz{uwo@S?`G#fm8kK@!pCs_L6C)d6l~?wgqA-IL+sLdOk#jy0cS!mP99t$bN5M7lO3+Bm zy6=+!E4(Zg{4+?<``(y?#6%}`-$n-)NbL?P3Pik{A^G~?DnDto=$$jYsm3CDb;%qI z_`RyVJ>qC5YMPR=?Atl^YcO@Lh-q+5Z=jdm=?cjc)e` zM2x)Q2BAF_#R?8FqJq`aP$kTCj#-HXtKk_hziB1+=D0*xeD3*U3!Xkzc}o7b+s9j68FVS#dy`V~iF_=XuQ}X*s z4If{92*YlC#4F4tzbr_S!?<8cLMy4ro*^Q2G-&ZoRlqoS9vVIUQhAy?VZgFAn=ZTw=T$MBHqjh=&d-=%%1eLLAsl@)QYx8P853>A5Z_eAkO zc;h_?8o9m3@3pOsj6)fyKpmy&yE`|}XBdfUR%{uof*`3-=^!PcsI!fQ7D*c7q!kb5 z74>ZfDBy%16WA{6OQiX%`A=HRasIlN(hev&$bI}(x*h{7fhRRGD6ODrnk{5(W+7?l z&dS~%K$ixAo5KGt&HnhIQ^sf|F4Doz9W1JLExU?@*lhaDM5~;esgvjZO zSC>aB`;60s^J`ftOWeTXB|ys9>(pK;pxd#HkD$dxB*|+rgbdn-d}e1ch)xK z+o4D0i8?A`y?O>N6I+God`cah@!hm_b*v;@cdc{-%|WCDNOQ|Lb`S*w0($R5;3@Y+ zChF4!)=$ItXo7_70_JL^guSEvg>ZkyBn=G*qfgMS>~z?o8r1R}_A*Shrv3NnR}Jy; zJ?sqd+~NLo`S3P#rE^Tpp^9OG^D(1eK|1r33~;MN$#HF7caB3m*`)}Jn>e^x=jM8^ z2c>pRu11Qx$fcfaA9G7(PnRQ2ajTEgKM zJ1KsUlwYDJ9VV~XPciCjHAWvKM!b32Uy9!7U0*=$l&oz!b&a=I^X0m(_M7?ilkzXl zP9Fb4k9%?gy^q4SG_RoJ$=kE+SA7-LP(45#>e`mzu#V- zIcH-irR1#Zc$M>&^P_jYDaFNzGDcDpc`c{6@RbL9u+OP|gwg3*{ey@n-Fxf+Af3Kv6XZx?tA;nee#v%@uvWV9B+7GDiA2aH!6qx`V`vk&Z6?>e_p*UxzFKcFq%Yg-1#Dcu0sQy&{0Fq=3T~B+o8P|r zO9ntFVBV`eF#!zk)Z!xlx7yolt|(Hl^1-J^T=INdTkXe^iXqtku5IbmQ-7Qg*Me*= zKc=dkRM-=MAKcJeHk%SyZSU;GM8H7Su!*BvPIRPGY)Ol>2c=uY5eN+N`gcw5f7VX8 zr-K9pi)m-XPZWF0;6s5(t~Kz~+MWq&w-9{S8rU{>eaP|qikDe#L73o$3!mE;!`u~V zeHI5L8TshfZiQQ?NC8#Mpu*rnlTu^5xn6z_uUEPtF^_U}zLn$okzy@BNw{fYlLD!s z1Arp{s=EPw3RN6^CS-}YYa3C`YApEn@#D32?1|6*6vINn>df#GK9c|vH3l|b=U1RA zZy;f4mc%OBJ6@`X(*iHoEB&Xa{I(1n15i@NS`N4pM{}1i%)C5IcJTPlu|#M|-|q$0B}* z1RLDHEAeD~rlp!CjUG93RJ)iB~W`_ z(-iuQhlPa&^D=2PbZ7CGPk!rHFMtC9FOh-R5V>??qA0*4W;Z~q^`Lm}{AuR|0&2Y$D=4b(^tr~ee(#!aqUg8&0V zxJIV>=V$iSyq1iFzh~HL+L|8FXmd$+sW#7F*VhZhOZZWVb}jBKWom05AY)m|LB%K8 z2I}F%4EsM1Wt*SFRPCEduEX6$xc2kxhD^Prv&!HgbCCyLQ%3}7L$lS0LL9S-E>kUY zBO&O5gd+qnV!R^$2NOH5#Mmo^aa{C;;8n>8NtQBe*v^J!{isMU5T|wU^%B7TqH9$z z%#8NzzJ8^zM-u+Wh)3yt;Du$6kf?v17-fv~GouX`5u>cLhI+^7pLOpjva)ZMwBLn! zBq#`!unYtS_9?Jn7+PyPj7}+J&XY#8i)zO0IE2jaw7^1iNqLHx4dszkE6<~v@7v^q z?qJ40m3iRCe5Q!XJNC@(Pwgtq#~Es>@cr`|z{q;4H$6q#}8Ge?aU8nFiJ#_!p_N6}j-J8r#9P^_e)qgMU>wP%0pIdd? zg-OLzDgO;EbZ*qf`>QzJi#)}`=`KQ46mgeR?u_>wP=nDEM$)Mzp8KFqfP_%7@Gb+!D5{XO zznAyv5r*)qt#6)o*gx74JPWB_X+}(U-CXQ4CJDbNY#2BisG!xG=M4Bgl&D6pJ>gbA z*XDe+BCe*X#J}7 zC2RFjH1EFNB}WOli-&?ko5NCK3XU5DQVuAT?G1poCX z<=u-6uFz(dMELm}QBz&5{A1YL$;T-|$kZ_pE9O9d@bubJTYo2yS(t-I2(z7YnY629 z#9nZkLik%b!rnvV0o@hLhDPdg9CDA${G6QvJrRZ?FELsqwFZJ{x=M((P_K}jDl>1M zR6Ul{*A;RXm^S9L(Wn?)6!S2KfkNM92=_!#*PQoD&5V#}B@~fwrRHp+Ly=k#+wH%) z17DCC!KTcJrhKJ`QJQy6{(#QY%857|CjN(s>YTFg*07D#EK$)q^6eaY*unKeroYAK z6fzX~_>xDWaZx0D3AWmrU`;*lx)%=Ck!}31bxWQmZb^f}%|kTG$M}Iwr1$t3%Yj>7 zPOO$ti_?1!->T$89C0;gQ{$_d7n2Zt&&_8`-DD_mkZLRoYC!}D$LLl=CaGCWeda3v zbr5%N$K=D0#mkE_q~q5SD(8~pQUpr9lSlW>Q;Lei(R&E&+VWQVCt^pGig=i*@)t0| z6p^9omADN)4;Uok@m3xSCjYLuopX0AW*?lbg@!xO^oVNVG>auCeqJ^m%Huc<&C9nd zwYTUGkF-UmGliJ_Q_t$;JaYeMJxhh@MLDZ!nU;*~BUjCkyhkUeNila@jsPDXSaSfG zn)8sVWV~L8#LRy~)u3+s7<}4?3=0Fb=e8Y3t-9y$-=tM0Y(51f(&4 z@2*|N1}|uJ`d=p6>Zfa(`SQ$Ji&rf33ivNxA^<0Zanh)d5)&X{<;xX)vI$r&SJYUw z&_>N1n%6u-(Pt}~(|g2v!xjvjJbfJ7@@sbplE}2mHG%H?c=G)b`n(Z#{n?-JTZS}% zxo`)Y3NNNDNYhQ=WH<=#*vX;Kkyfs3UJGLMKkM(z1p<2|n8=&8TJ-;@YVAm#9Ts3L z5z6UF!5oTH0_K!QTABi69EDSXoWbFH#Y?rxr2B2V3;iWX|D zhUhWE4&qk#<;LrA2#tFWy%g2HD*8KM#KA$oMfo+j;04MTuW)ljEwzM-dD zAdwD-oZ~!3Byb!VN|5n6J_s75IsoFbJsb~R`9eb-tjQ?lE(eL{cAunve zn1y5Ih^k1k{xE=Rp?f}gZ#Fbf z0uWZ@5R#W4?oY0|e$6NVCS)9nOi#ytnA^MZ?%q(!?Mc*Sza1E-tr z)Z-A>1ffn)?I;Zm;Y z1DI#vUSLhJ0zr*~wZPp{s*)@A5kEcHysf=qSp zm@QS-5p)O#PxFVGwwO=IO}8}i9YQmg&(B0=n`fdzMf7J#z%rcy(_&VhcZU1z&EJ9y zADQhxOglGsh}|FzgDtYg9<`g2ZXCX&qGkbw;%EU}Vt1ek`7;>bc#4NLafC$7_s_eJ zH}61#4*8^`zYu(C4W&Xr8f#L9au*eURa5)5soFE}2n;vWw7GX2wy;~LT-t?LcZRy+ z(6Uh-D0NTw?we7r5S=(<(t5K}xURnAjx0H4RhIBE>MtB-0}`{n#If!53(KRpmG;$D z{Pd_JZL;yoAx`Uu0Tie!p!)=++@SuFQD!xZ<_>GW2~kx7oeU+0FqsGsf?!8AC*=Sd ztvfYBt&9Da}7Oko@knLl;4uUE3zQggN+K^W4%SO8>k=Hl5M-C;Wm zSa(W7Ys*l6lE}%$XGTAp+_YJ=N9`Mtz0IWGdM)+NmCgNrhcsEoJ$}Nq>x7F~4ihrT z)N}PgrPblkjx~2IxoOu@Pf-EprA48rXS|9uD);;JTf{`FS5YcP0yH*3^>fm43z@fV z0-F&X)o?JW!$Yu-Sc|?t*ZpHIQxs zsw(Y2(()^3>|O(FZbL4f=O9YuJ0QS7%F8i8(#jBJ^@*^lrM7C~4^|h@X^E}83D{7? zcns!A#B$FzL&=?^n=>3hp0l5yAH1uCw)gIMI`kky$*iZt+DDWJtghqXk?y_$&zpJk z$ufegJhvA!3Q$uffL@>;UeI1;eb&MMvAf(_eT#;lCsEccshfO=ZrZ&Ie=G!@_~*qR zXrt3gvQ8@myhhZ4Z?q z>2u8Xs;C00()u>{KiF$2ouMmuinZ!9e*|SxO@4)z3g;|KuEk&H&`&I+vYb+|jpv@j zqaGhG5weZ)YCnMh_#AD;q8?j3Ve!8u5QiVOeH_ z4{rb*0MY*{XmyR0t3ibogEkZMdIMrRY~FuX(7m}iY+O4gA^hwt)wFf3hXw zaQGVxAH-+R0$t@eA>z!DS>HF26o?=^pj0C)A~k%DM*H%`m}Zka*Ne>=)QOdFiq~f= zz##(Qfh!qh>_7^n84vKwI7ac!k2MtL((U+;sgz1S(4{aYF%1+AG0@FoKZEQJ0e{N}dQ*iG^BFHtuxaY}^4x zVdDfq)Dn2bcYRBHy+{GQ5PuiUB~ClHnKV5&QU|5pPkQNb{G=qQ+ms#ahiqe7mGibf zzYEx2<3poL)-hlXdWeDB7)S!XAm9j^w+q_NRUN=W*1AebG3?$^Zz!6kpFK2pfLXMT zBB|iyNeO`UF9Th-2ZY zGFhQR@;C(r8|#us;PX@}^jiY}o?tEkZGj)XPSvRN5zI`gm?%+y9f2*+%RZv7!5p5j z=6@R`pv|3eZSL%|q0*g2NVePdS|DrsVFX73q(!Wg|9-KP_RiGhjWh!zL`jfey()0| z;3mOs?F`dxIVSQ8IA1Xw*E?{PoWMEgGrd!EBfJV>6N6cBpw7i8?MG->nmc&^>(a%552a%cbDiZ^eimWb2x0KHH> zmK3N78t5<*DFtd1rJ*++B#%v8->vlpi7_z*x~MM`Vx0f0YmXZ&_+0~Mb}g^(d5ZQC z;ew%`Bnfh>cE5ql5mocoV?`EjHl4vB-C?H_Pab>&sTA3^Za5w#%wtkz`O9DQy396- zS>JiFNR)SzTN)M8?C1n+k2pGYj8HJW+V8pEs}eQfU}9htW}zS8(#TaX={u9ca2#53 zk|96qBN=ivq#;3jORd*VFC01HX%gn4kGB%#@+^SNEqsXYzc=~`e@JqSViOe!k1K32 zV7y2Cx@?sqOeFt-MuZ@K;WoOlV|ky#Bi?2p%R*Fz&?lT@b{y-h0R3sIEvB>U*Dp(> zpxL818s0&Ak$x_WNCU?vboz7Jv!%Gk0NwPt?FjkCF)wp?J#=8>$F9()G}Rn%(d9_9 zWZKGdCTI?if1Sn(>biWqC4+Q^%(c`(%wPH+3~9eUKz7BOgxQg(88S)fQ+p^B3z@9? z6ZrI4%eBv#;1}Q)WabaK#Q(IFp}x%63O;oIG#ZCs#)YM)Xgtpb!{1q0KIf)gGI-l9 zCGyRCWM26QEWp7X4I(azuQF=M@!TV4k>kZ_zBCs5z!oa5>Y=)bny%YHBj_|IblrQG zU*NLJF2;L0;|;NW*;kb(aASWcefckt;PdfHyc)aBK={sLr*@0j5A(bVt=c-SzIaIr z%0U@x9%&c`^M>6>bUhdd(SPa<#quEuTVDgzb(-lU?x^eIGW~C*^-!bT_h%jXJ=PhDJ`i(l(@HEK5Dk z*jfGgx2&HyJ8o)>YD^5qrCCIq#nv3DsA{sj?ABb8@nN{|wK*z-+_>lo@nOlk>)~nFxU$@|cbZ)!f%5T)ct35|u|m?Bn&yuuToaS0hwkkOS?NS?01w zrWuO=CqvPpTA>EaN(t&Zyz7=XvLH*Hxg@zUfZ{a~b&SjjNE+iD$5(>?bl`y?vi5q| z?imLTBUvh)>)PBL4{E0TpRj|-@tK5qAI_0T-l!%5`|6T1JXx%;{{uK^MK8S0xlD+7 z4j=Q0j{lSK%q%;ue6>#~Z*MRvOK!MLH=#~2`?Q)_2@EGB&yE0c(gTBoy(dZ_E~Ok2 zC`FPp0bg~Br7?nc~#8FWtT5*a0h)fG%m-GsfhDHseZkXAAlO_!C zx1XY-r}Y1%ftV(-_ebsEDDWhweJt0t2#772%S%GWf?PYs$-#ucnI^>o`nX)b{lKD~ zA*@<^d5}(uj9Z-dz{We?_0>s8R+MaH6GcguHKdFVQK9%is&!D>gMcu2dD&|`9L5&= z!j$Jlk(q}*F6S{wbS6;9Qe_YZ~%M8pN6`2=$`dN!}r6^~k7cnL|F-p{7Tk;2*H zw2LQkTXDlwCSK1Tn70@F_;QX#nertrFX$I8AWU1aN#RID%LkEkwt#bwPp)`UOD9foaDk&1({O zB}9l`0!~D<@M_XiwPHoD`v>dUIcDlLiIkK-lI@#?ML?4wYOsJnfG86zuLt9!>|ivB zi!*apuVREM@&TWG5;y7K94U0jiN2StQ6e3)@_{lH7*84nPvV$Ih!iHiSBIKa_Xv-l zV!of$Em*^viqwCt&=O%AlWFp`bzrRh zg3xH$BWMv^)`~`jekeg8E93=zB1g;}T~WE$@5_vjkbOZ`0yHuOjEFIK(|x@JB>qyU zc>=^~+%1AG$Q$1+;!Hj#V9ynE2jDD77bORsfcLFZvf8L}3AR=9I~aYg5RR4kGjt=w z-W6*j1*@;7B|Y$vDfp`O2XO+3zxHGaZF~3@U3UrJCwTvkf_?D>h3?HWN$rIGyb@*D zl8T9ugx+!i5V!ms=ToyBIj`2dKgDG771Ahda0V;~GjbnMMFs!7$hQ%&u^Kg;`lKGg zsG`?#bj^ZipVV@(xDmka@d&>CT#{{O6#S>On>KXu%;rJt?vjm*03zNfc%7xRf39tj z{Z-WgMR(r{t<8iFtl@=T-M0NR(%ysb!`EI+zQYZX;ghCNE^(vgoXHG+Cf+o|9 zwUvHKCUT*D2&oJh%bZgU%>lr%wC=}@+HcDTqgbsT57miU0vmUAw@J|GBEl~|?W}oY zK$PGh-09ZcVue{~=<`dCAdt@7bA5t`yFKqBUOtlJ7F&6cqcbX?}* z;a&KQfW9V4{w>{)Dm;0nQ#3DwbwlR&FQPEsq9u975bv5({YjJz?JD^P!KgF&;|nim zS~oN4pj1sPFt742!UMJV8NA$!JiEP9!Z{q=*9MtUQ;G>*8d@o&IchqZ`>o%^8ni*e zcXZDBmbGohC%x<*RVwPiET8haabZ^EkjN!mPQ1+!!5+l{E?1S+L#a@XDjb-^!vLEL zD#`MBA3$+oc07AEit5V|WzWWpZT#8cw+%+?&%BgMA_(Abl%E>n5qda`IC#y{20Cgg zVW1{Fv8(iNB2V{M;IdiR0zoPuL{bCi8hG@9nAWwLPl^9UOUJ0$Y7CEO?U2g`9qlcf zD7T2Z*xBTJCv%#dS)(-$DF{Rt33g7t-5KJqmMg-aUGmS$9(BFZB-K3p#er42#MzG@ zd6Do?HfMkZZ@f&a;+cOw-c;jiO41*U^J)IwU9`iJbkJ$DzEF&`@Q#uQL-SU%H|xWw_N&cSzQDPzd*r8%!TW#BPklnwDJB9w~yP zA)ubyx!TJ*cem?nR85ML28i~T?aGvER`5#H7H!Gx`R8WmtWNs>ZZ>XQ?-Pi4fgk^? zWW(0!4nYw`VeJ_HTq5#ADR{f0dG;ebwo?On&p>DpN{ID*p90|Hmu-PEw)< zDqDMI_}d^(XA#=@7pm*iW7Duc3}A`eO$jZDZ1(H6(b zpHrg-9zKj5?EMihW#bF@Jyqhxc(!@1ZDr1bk<|=M3fo_BIV@74J_lB& z^B&{H?2e*aK!i_-#&rK{CXTp<9|97aN~)*<`y|$O`(B|G%G|~VjHKZ_nM#Gzz3D|* z1)Pw&(1flOECJcCEI|YE-oM=_`Hhnk1lyP4!&MH+LatR!W9eS`@AW+ofK=CDJ8}`$ z!P>fMCtR0^<=}<;f0r`z7Zjw#xNu)e9b|#GYxe$1qVSCyHuw_Fr^uW|-I4EF7OwGC za9FIrEbo6)p}4Uy6busxhgWRzrQ$(>DZO|Ou^Poh9MguKDW|bAAQ{urgKL#0qUht44S!&}A3+s&|CK%!;YD~_ zaGk0&wgc18*VBC4dd2Z4IzU+RSI_RRaU*-Ytsu##)g~l-8!nZ8+JUyK4UHygT z`$F66dvoO)@76sQfspj>sJgXf{Rhyr=n_Yq&jipmw?`8H$zYnV{V?yy_Y#C{y#IZy z*K8WE^$vh58{j>$BA2gQ;SSV*)|+Ep2f*2hltAtFDw|pDMJ62dL<8Fz{C=tYZYp z@3CnuF7a*HG6>9CH}fXQTKyO~>3*_&_v}SJ4Ud*xwEU3y*W8PyqSU@+yS*800>^6L zd_Gsh?=L8)3~vDhffG4E=Chw43zBmIp#zehPtMNHJXMDiKx~^*1eXnjJAj20{E2LN zN~8F_kY>uX_x%zxzQD?g+9~+=Zzu?OrFaBny4Zj-nvdSlydmPc5l4x>Mz`y2*jyO@C+f%byGS0XT00ZoP2MXl(ATA167hWeM zDVOpCrsE_pZa|VWU^t`Br+`tVHBfxgrUVX|a_WR31)|jBZ|LbOmdi2*=8LR=2(?GB znD-PWv8)HjVam`7fDK~M4_pXx)_Q<7jQkN;I6z=D^si%2f#Cuy_zMu-RndO?b_N(U zs^I0Tey6tFsf<_>a#F;x?Kdq zC-@#eUkQJnJ#mSP0q=_da#9{>6Pzr9`j{0JZ+=p_I?n&g28dnZsx)$E&JfIH#G zw;&SAT{yzZ2Us*eUqruw+NZ$9*8WTMcE6B$*{@lhIdNp$y=H`@24Bm8GZ(-ulZ~Dz z4c5xpLe19Su20^4y!(VmMnFK2mX`Jh+RNj8Z)_=t6d+zcFAq?fLL2-Jy(xf&)qv&E zwU?E|LN-v+>}fj->$L&J39Kz4V66$3>(JaJaP0J3`y^JRw4ihjxqLO2==u@t%&DiqYhri_+l+rlER#cDb6|E0pDf=!F2;cEPS9@%$5M#3yhHV_$uFNXG1P1 z^J1<4(^x)3fdS4{?6fp0$GEqJQsLsqk8>j5E*(ptRE}6CY_&l_AKf@ccnL@^SliD7 z`%_MFawB!tcbVBHegYNzt{HmS#gkG~=YqEc{S0d~nLbEDxbnC1h6Z6;3 znR0aQ^e%S&FL~*4$`+eCp2Og05RMdU-Hp7748454ZlhNf0alOdj{ zEd`P46t;+FIC{omGtBxuMK*JnTTIVS5jhYG@Sn^Ze2BCdsZ*Lp?B$w$I!VXv^3n$hFe?P)trBZivz;KZcrp~zY z{+my??!VBMWd~Q!U)g8Ad(Z*^+~B8yTn+ID>GXq^T=|>Y!>~vuQN+Ct=7w?T{<7(@ zI$iZU+gj1~#!1^d_#NjmFLx4Fq@CP$QX8Cmhd;Rl#Qse+66QMGsS>Ea_gNg(erDKp zs#OhS8Jmb_%KkM6bV9>2>kI!kBgU7?y~GPenLtY#^J2Z26bsjUH09Xl{5dTpdfSx9 z;4EoBs)?)e1W~x|0cOEuD? zQbZ{UedZ`!riOwoca9u1!3k3KwehT;-UVD!yL9{RwCw>RS$(u-Z4FISYNQhreJ!*r z8EgqtA0rq{{ugc;rQcPJ)j0!tIj6Siss@?Za3J0(w}J&rG*W z{oJXRKg=@-4?0}ldTFNl&n`qb+u;!xm$1IZbbqbHkOHdo@k?cHTlKW?^9S{>Czckv z-I^)ix_dBemQF3Y+YKwUoo{v7Pzqr3irVuNxLIXS2pA5!Kd{L~qfcCIIuOxd$8u`> z`|u5)!)_@Ez<{AiOv{M{KX~B$)ktEF-kO}wq#rn>mE1;ji&OM9C4t%c$~ z{o`YCb|P_>yE^75by>G9*9H`m?Kv{c7>eVV*)JGoyG^WZ(mVi{-}bEVCz=r= zq{>L^d7lfUk0T{6K65?1enxkEy(cM9iQe!(Af!O;q*1ydab{wAM-27as8(W2(I>(| z#@Nsj29>C*^h))7YgcdmV5W$cE&F1T@93=Je-!r>k?X#++;c2!Bu5weVeH0e3y22IsFQ8s3fD))9JUt9mrBlDW%b89s2RYXHRY*Xo@k4>|3 z0VUOLLzYr(W6ySMjg&Zul|E7F*f@uaJQc%CIhJ`0^tG%aJNyR=;F6GrQf&;%Xtor+ zBN)5&a(zN5lZ2n^?N5U=rRlMJ6PHo`0f0rgc~9p3{FrD%Fuk@qHe5iDbjb9mPFt2w zP_3-YW3ty+9=0uDkzB=LNyYeH2jg=@JXW|K}p z-w75lSLGGYv?E0DIhv2z7LB2D+`||0`!DC`=llD%trqQTP8(f+`@^vTrLBiZgx$q) zu-7Fh#-H`8iQJPj6FD&b_60;7o1;JpJ5u5T*nM?2* zM;6v!v(3a>!pK@=a?P+0qR&rmpH|Bhq|82%j0~<>b3|lm8dnOZcbXfnJ6{FLJzOEyreS| zo>CSA)+Eg}!t+TSGWOl~E2<^NdBIc?I`hHuZCBZwdaq|-?Ujri>*V$L7&3A^C<17Z zRV!ZMd&gbx17ZTK`|qGprOt?M?Na5q4M-6L*sRlLm@uZlUHSt1r>6x>jN2^W3;Rgm zXmhqg+S8C8i>4@R59VIHASgRTakhF{+ic>vr5MNkV8WVF0Bh%v_&h`I#hZ%Jh_4QB z=0_#r`(%zI2qInjNK8u1)7wfAuNAhvL_;$eN>~)xe6j0}{Zs#~k@Y4fnW3GS|mcWf@t=yR!ajNOH#nQjMWQY#Bz!8C`B4lKA)%vdX?4JB|BwAFV;- zCZ$3#a8ZK6H>lSM4C`w4!Q*3bi5CUbc-dqZmY#3>0=K6PMVd|BwEho)O~Uc}3v-NG zR2?_T9$wVg{M^>Ylvpps-zE5IY4=?VAEu)l|B=x&ogTE9cjc8<&YW}{3;(fa{Tz2G zj?{$N_u4dOlu~w+>?1t?hsrVzl{@2bK^mIKTi6zY5O@^x`Army7-y=ZP@@Z0gA=x*A}y_FC$ z{{Qf-Ed5@p?w@~yb7R&2HL^C~WFvb*>gT7vH(egv=U)PTRk`>DaZL_ud9U;`he%$);NKV>0P29FBROx`%+2DN~O&vOf8vJ0^Iu!#?lmrTx@t8Wn;XvB5prh715bjUzL!hkH(}YU;moVKW>SnF zY?@TPuizoFj?Fhl=#YNrMW+>oyWpfc6`{D7n$xVEo~14M3o(k6dXt4 znw~$G({iYOH@{ChMLU1&JgA7K%bz7;;9!mr9V|KVzG?q){X$;tURj%oM9A=Nph&xx zIikb$NZFy39$JF`+aC2XM-P+Fc}?If(}UA#xdBvHU|`&>M(*t#u8v_npGw5|C3N7_ zj@#e-P%@AgA-Xr7&YiJvVKctmXp;-fK+w5L)pk0Ig+ASn>`#~M`Kq=((Mi6SXWnL? zWmWx({3={ID}w7IE(hbEYBaYyo^7)UPusDeh47k)GQ^w;b6BzPcYK!LIQqiWs6UCp zy7{#1w|H9V@VAuwqk7VBg{?P-^cy0#yEN9HBTwHy&3}EwS2^uEv*@%@dbe8N{N}sB zn=1K_xq|13h4k(xPc~<#TM1n^PoF+k!P^%kS)%Dk@XMbPPwTN|Hcz#X>f3yAc2Dx8 z>YS~Pac64OAa&}Puw`D`{o<^t-@?%Pb>%~Yw!Bj61mB2<6#rOdvTU%7IkN5FyFf0T z1x5BY$oFqAeS2vQ7Y!CJ=l8DKH^>VK(SJ1DQTgb4xtzQ+*|r}YWz-`%`x%#&eUD&UF2PR&%Dek>X?c#r@`4T zGYn#7dSYfzXZAlW=b?r4?8cnN?Ovz-GCXm@`^c=`KG&yu?`;?P=1=dQBs|MeBC7M_ zn=V;DK28shONcVR^NUZvI{Ctc{XVYxcwZ1ZT{PwHzOBw|klLU}w^dE+kCB>I^ z`j#9)YmvK=Xn(*df^()VUn;ca&WN3zot!*Z%#Z9Ya;7q{)Aiu`wxpyart1aeFOaZn zWn*&&CVmCho<&9QNJx5Q(cyn*+yXCvn#jOlC=%r%Z8Dfw_KuDuAPD(%y_5WAud}!~ zSei=k>gB@q(IO%`QBra;@7rC)QX`zEYv4WtPnpoVyQ#vAjErBmLowliRtCw?K(ddE z!?Mv1E~I`HbAR{!iOu1}`UlcN1&N)QsFLTK$1nCQ`na7HbS*9hiygoHCo35B(?1C5 zfB3xa&@Dz!$(PE?u0VdabOfw_-qQs#Pd% z3vImCcN(mMni-#&W&%5yM5;eL+|tt1mzI`FHBq5dHNzTdWaq{;HFn#HaoPO7xJ}n}O2J=z)n|=l!v%@i9mgRz1?sfMP^yDUd!f}co z+A7zRZ?9|-NCKJHTVBb#Kd(9b{`7bQodf~o+b%FlL3(k9blSFwUhzH5xz193M8MFA zsXfaag@uum!z!$Wt8%^|7v+qRbJ-F6fwvboT2=}vU6C$Ko2^e*%^r>H z+rh8TuGRC!xJ4IRM&`U4rbRs53Hwyy%qGt69aI_nOgTgn6ZjAp8|VmzNcNsTVB7FT*uDLzpr9Zif${M3gIBk1?d>fV zzQPRC#;~Yn(@!KwjY~^PBEV0KYtb-?#j0SOkjok+P1KCSk{pc4cK8Wz0q;)%Kh-JlvI21L65|jjb7!%4#G^3z-knW z8wXYMG7j21WV~d_^mKGP?wJ#${-GeRP~4^qI2^&yKjC+$G#zoL!;6df1<8SPsiQAu z>d$#(N)%JlnX%`r>8hBx3VJWp0($S>hfhltT2Mcny^M(_lA^Bsc6SVS5P69v2#czb{u^F{+#Ki3xZ?70%<(0n ze8HnE#!$>n{z)~=_g zr{WlmN&-vNARKkQPZk}YiR1ldStE~v$uLDz)iI0YLGyjn#;Qar2}2g^a5yt#mHyzs z9^E6@$+oRGct4DNKqOIe>)!UZ0F6FK*djm+`MjTg^i{joSjipv$0rQWW{;jffBt~! zEZKl~0f(2qBjpr9-#}2&$)h=WyrkveX!|U-qQc?1xyNacqA~?jS+!I5`<_TgzRbHv zYnamj`C_m-e8j9WWPA6zDTB*uG>K7z#MN4*!DaJnTwGjG5PShDOg4|>QXR;9r{(5; z3#L4RORM|O?d`(i)3tJE-fWfA@6`dbS>729bZ=3)vyE=j)o4;k5SAnekU3o=2It-t z7zv^4fFQm{OG5+Pl;|HOgwtbVRivcgrnS6i=rWaBTJ$i@s>^w)`9j07Bx!WT%VoAqPvEn0)W*5-C7EFfP*s@smRop|6nl zwSotDf0yzGzmG|vDh-)^ehFq6y*AO-ip6Rma&RC+vyOj`Oz3d9f~s^~98Bx+WqR5$ z2&-WBLG5}Pg<2A6esR8ekpBfT`8#YF-|1$hu5K zu&RlY>P19E3h&+xo))QR_}>TW>gpiDgom|>4)yqey!;xdDW`g~_Cln|hOO#EiW&{F z3m&D5`V9DJc|?{xdoeg0<74(LEIz8cK=9Fx_1)}wb^iYi-+HwnLtXq%f}xlx$5qc=5dFgC-{l$6||vGJJwlQ8nB9phw7`=gXZA zG-@T{i-G)Z1){$yCP91TW;S@EkQjZZOB>knX6R*5P z%y^8azwb1#EMktXpMI*j#`NJ$+@(OnD-Nbp9d!@lUV6{DrSk>)tn){6?Q6U*x!=b4 z-CFW#BI;@=(sICM9zOnJKvbkp8k9EG3Jbjr|sUgYv>n`v>k~=$;-=2Or1f8 zFo!8Vp}R%}Wy3V%!UnZchD@1RcTy!r4`sv}_%&`UeKzfpm(H^+{jjfR@MeKdMV*yh z>`1NXIp@!}952?3HAO$x;x6g!l{j#G^0S6<%)q_XY7_Zg>_xH9?4r$XB?bkx$9S(DVMl2@U?kM)gvuvnTCL9~cNfn(6qQd@n-r(eD8B3XUG1S3WW> zrbDhE*)GXc)_wlUchPFzkUvpKkiy4lz%?SVOTTHzR=3d3%!cdYCIL?hjq>GzlZ)XSz?p@8Oq{uun&ev5Y~b z)76D$fH_^t($$LDlHb^bC+tSGmUs%wKwk8y4jcz=p_?| zlzgRLv+_Fc*NfLRFyRd|828_j#=)7v%%yewS*y#d%kCl8yh$$IVXo>q539IWeZnir zcbW+uz5jc?v!zu){px)=r8wJcgJd>_^rIIa-O$a6OK^4Vc8xgi-uyr?XsFQfQiyeG zR#t%~jnK(6CWR6zOMP#I#di&H7T>2~Ol|jhK*hV8Bk67@va~+?%u5aC3XZ`q=r0U= zZgo}6JYio_*Q=ZmYHN?7SFfZHOv512MJ)E!@K|jLjwdQSYHNREQj{9&#OmeCm+4g` zg{Xc!jkDP$`|6VPIa9|qJy#X78mE?zzLqKdcY3PzIVpV5@;8wD*ZA#^3E8)s4Ne>R z@~A9zRfhla7Lm`4d+wUMi`T@|!r}#sM);$HznFxd4<>tNY);>#J?!hVt@6>s2Q<84 zUt?rk{47p5gzw^Y78B*)&Nq!UdpnSy)(P8uvNV1`XR$wVK;K*;T?>JYinE?vTGVxo z#kT*xUtcSX77-1&zB#a$^4!RSdab|qJI&T^b}n!pl09!KM#+12uHm=FeW{rhng{#D zg)755Xly39qrjjDLKV<)E33D;A3~^)k$xOF{p#MAFJBT96JeLbr>zvMJ*sm0-PB%_ zDNr6cZ~9PBent4Su3^M+XHDlp`9@gdLu&Vx>NRe|Pg^z(uR9|8+*yR$;nHqL{xAbw zziRda>H6o6aH~*xhbKHI(={BD60f|sO}%~Fl!vtGiR=~^7h%Ud2^dJT*KeOc8@+e$ zC>5`>OwyY_@u@@Q6W;0?RJ<84+=HA|sS>)r6{qp>I~VVJ(K}?qFOp(n?iVCu_HD-f zbFT|yQ!C9F2zBU>?J@lD^vZv{gwhPv5p2RZVaNl0F=wn+9v@pZSuOv;&~yQ3SBmqO zH|dW^-A)gxviNV(r*pV4C)f26&y%tAr?*;NsA<03$}D=e&gbEs7??cs$M=J=5P6(I z=SvCUhmkwP`KisG4Vy84W=?GnPfM8JD+|CK};lnw~F|PUS zgEP)1HYa2Hj5RpU-;=aRNXoG7xt_32aVy;VqHE*x=gjxV9tSIBL_M#r)(Fe;N^**b z>0q$Anf&0ybGsCa34M*o1`%1_iZLD!1sj*8DW5%AzqdON7@jy&yvv)4t5zOO<yG$Oh8(W)PhPvmd=-jUqwwI$6uAy>H3PA{PCD3|!3E&u$fGx?AAS`>Xg5xv$o z{op}CNP#iAljfJVYv=7W^VV|S4w%YN@p|Yk&#Z56c#{xf?_sI;r{PP9>+UUMRU8}~ zEE2X)-1>8xFoHBbV8?Yxe+GhD6gr^+Y{aG8q)|g*=Azl>J4h?1hFB+AQ&Tb@@h~`e%0p|-oKGm8v;SG_usmzhDT=(KA7%P4y!GX3dx;$)#Wlb zejr3skcn&mja}Ymx^dR&`*iJ+*trsB^z&YgnCvbb68@=b{>;bl@dc_!G|W^g+l#_~ znK~>*7JL*7I`%sh8pSPrR4l0{hkxLKj$qP@SS@Ro)1u;U3gFemlK;#UK6LBdl?AE! znUF-Qj1lXbb}ex(uUS}3hDNTt9Est}OB%^GB>O@*d98g91T3ZV>*bota_S!yzI82n zVOMuQ`UM@UMug&(d)J@S>^#Ntr6f_&txRV}obl(5oYq#6$`{s&N&4||(auSM{EUn= zNuOIIGh0Tzcr&89yL}eh%%5F*nc8)I=I7I{Rp+33rHr%u?+O=RGgC^$_`dD_1-|2( z*RL-uOtkC!$C_(vYaf4!(PSC7Zh48&@`UxZhHqC@QMtRr$h@km>W1dMf2{aL4Bx6{x7F+_u~m7sq+{Cow=ddhM(#xE-1P1g0u36E&A`*-3}wZD&DwJhyD z5&6QUr{`72k-5Z7DGiMrN7tm@ zZ|@d)yM8B$BVA19R2E&T(zNHt9#_i50`WfM231U3eB?9P{q(8Yp>>JjXP7$v?`7wv zUdpi1(o%#%BeRFAT&>0%3Gm@f$K_s&2gjb(Qx^5bwjU1v(ixWjfhU3rJ%AY6lR8R8 z{O9OR+BuvLam+GQNYGS%U0f8sNk*QqW9UvR=bAv-rdb&Co?ARIN&D_mlbeXBxK^e! zi#>nNz!>n@juI_OY)W6Vvap>0+z!{ddF1!PC!9I2)nR5aC(06DNuyirCwv())s+_- zN2h(&tIm~1g)#~0J+y05Jb17&=$~fFE~3(vFBjWd;i%FjYlz`0(7w#w+~l-`i^;-q?a18 z97AZaWOrpXd|E|;T?ZT^&Tl|hVp#U)7!`b*Z`s&JtT?@<=7O7>+r^8Aarxl&Z{x-! zifhx1%Im`=HtRrxoLyWl1_US#?RH>4Li)XZ`}WXDGdlAAucw!%`?vD?ejVJmG8SN1 z`dU_2*4)H|-i9g*Q##`-Dk{(@?eIJ!LX#b|v{m1qP|J6^R+^faTt;0z+J~$xfBSu9 zY(*KT9^~NQKu%5`v?f6 zNIRf=Q>MQJcI1aF_9KQ>!WJLr;R(i_VG39A{?N~=vNYYFTUZ!9Ez;cf`|=-veHZ3O zXYTHJj3FI-e0&7X2T7Bg*x)`Pn6!(T`Cfkhg^kqah-NF|p>BP50lIC1xeOsJx6j=@ zz6I`QiCxosY$@HLoVhT}WDGNCy0XdXdu6ASB->NR1xS7^P7xKv-!m0=Vr`ru9iLiw zu71KJ$G6aI-yX31)eop7<{%!PX1vt!9%$ETbB7D6*dhdV=o##8Z$FNb;cK+)uTm;o z&|#0RWzq+CiB1gc07dZ&lhd#OT(2h( zXcQ;3PJY5w2v+r~8%3|OtZ5Mi7Tn7DlG@tZf`YC;wdSR^agk=&m(_)iJ;p{xKaLht zmW_BSZ7vbrxahbQrl+P>KxcGzbzNIs{pg256ePKAX+1{+*Q#E)@+W9Qa<1AB($qf2 zCJ^Ke`%dZ9o78;&n1nIez`LPp8f*%8{R!q&!R~FH@4KvRWRwEh$41zUU zX=+l9C_|dxk-=neb!x-J6t)=yr`?R}z>xJ>JV8NxQr=R=soHwb< zKXZU9Q{iGoVPPTe8xC8IFBTUB0Lg!156u5?wC1k-bF=*j0j%v?Qy=((5Ev+yW2XnnYkWn;{rj-5f{b;?P zXspI;cY$@s*y9U3DBAL%&~2ai&%D~Z9tt4=@9kJ9MfnJzt4(y`z)_>A7R|{u2w}4! z3=1}qz+xcSZH?!)lg{SfyLWFaXt#w}YT)`WKY1(UnE!*Ix>v8k^sJ-mxn6NiJ(B3X z*;snNDMKGacmLNH;Qll2d*ar$!Sp|umt87H@C`5iS)Iz)NpE$nJW{hgmg(OcukGA( zR+8M##gGNmzIJJMaS<$Q1-0k6FBt4($b(p6JG#$NM3#$d%S`fCIqP!N(}tX zXyZJ(Fo!o#lj1h@&XR26c8!-OyLScLs0d2a$nBq5!f>1OQ1eNGx)nQ*A3l?3@){(?JV+dL1|1iJq{8yMKo?zphBl0PuBxla5Wu1nA4 z+dV7aF^m4ghp~4Zz`vReJDq>R=zwUsy`ScSq&sVmoM}wdLLN zzHazs+V>JhKYTmRX?5@;?Ao|8?!VH7c~00C9>7TTp`SY9&CQ4(I2om{OYK$`7T6EE zK8LCW{S%t@Z`G;Cf$NV(JWEPTWotUFJUJ6x4@OF^(8BWaH|TES>E-(*CHs1Nd%17U z9lxm~$gNZ#0FXI&>yAB&8+eeK`Nyl{bjQXEICk%bL5zus*~zW&rmt`O`*$VBr&I}= z($1YX{%o$FEH*0|y7c>p!}`{kcE`#Qe0)Dk%;ss)qM^5M-n_X~R$gBI%~Y6~X1RCH zN!wYfPJcIY183Kl&|aPQtCP0`A&YnK-sS1|cKSnR3l35j>4QTVFI+2uFe4Q-Oo{)l z`TP9JXAt8tDK;)9<}tutU_$uF3s;YTR*b)XMf@184*;A=>T;G!*U2`}6krTjDOZ{v zdn_BSf^LteHa0i+1-S;59~BrePTpNidBK=?H4j_04?Q+DOG{NXbjZDW?9`s0<;V9t z9`1lj|Hl2_rkSyEQoG~n-5GPIuDDrD8(z6`C7uouo-^Ra$;ohsn?vqghdkfFSarni z##tx@`0#x=o@bP~XnD2d|L*A5p(6bNS()G3UJ-XGJ>1;Ndc8)P9tHNDe^j(wgfF6W zHn4B)&7WoJ*Et5l&`JV~U4N>-g${S%cQ{BL9UWJ0=m@TS9qQqZ;#Lt%`u1PBQd!fp zckkA{b;WR>>QOR?%LE(Vd-H%3O0>9}8Xsq1E;9~v_aRzd6C0bOFf=(H3&N7_leffJ zda!}98lP1E`r%r7W3uT|ac24J z5J#+_pr9*E3eiCbTN5QtGrkjkTO4_RH-r6PclJxDps$U9`mMxR1d|TK_dxQq5hffD zN4`;@USoxb_Q|)i+|;>W+PEenCwFt&CwMEN&dPbL4wFgSHTUBHFwGzk%JS=1_kh~~ zG6=vC3T(9Q9MYW9UgOF+9=-uYyF?(p1_pg-#OWr@(LYJ*AdIim4okSp# zfOr5#2s_&dFhebE>+NmCM#*@MH*aSZN5tfR@gjpw%v7-3vPqTIH|UR*Z|_(+IXAu* zmA*_r*zulao7`Y~4C@E9Z^B#QDTlT@GT6|0aT7Oa7@9XDBcs&jF$+DtvhV(^wyX#C zbwf-yUwpw}S8h44anKVu6#Rb_l{VJnt~NI-=gfW;haXgnSyhI)8pSq{>y8NQz45=j z$8n^ky~g`^tv){R8;qV-vF?Y-_3`mRZ_6We5lrmtb3;?Gg!yh#V1c4%5ZXY#BDq7dw6UR zjXN2cn7W*Q!n?fdq<(>-pT_?oye5b|lMT5F6>e$KlP8y9E#dAS#`J1>6_};m-NlX{ zz2s_l@?(H3)Bqv@Lc7Dkm7q0daQpjb6O$x^p2hF^+kbpd%QKlVPWY+iz|$K4{t3t_ zs?D#c@Imqirrc|AT1IAO`O=e@ zJLTtrM`CRszy-)rkNJbQFxt@ZaO3e^P}G==cfxnqG*K)9vuIx_$q8Bj1HXo}v^1eQ zkXz%Go9{g%l}DCY=yT$x;fsviRi%=8BR*0@6rl9JYAX z1_zVY;~tSDewmt$6_@*==Z=;ZO-#G|<=;Qh;}eJwWnHudB;tB>bcKZ&9HARU%0=_b ztW2b^V3)bt%I`h{foDDbb92+w*FRNm(caz;TK*aM>J}Dk{px-faHTS_3Z#*1VSx~R z(Bo$E!X*uETmJrGPtLB!`rZdaP(wt~7RaCia&rDqI%Ss8qHgZ)s6vT{bXVdkf*ML( z!X8XV4sJu7Fki1)1(L(_?(W1cXU{`Nl$7jv=;>`3l&P|@v-_=Uf4r^sfLh!^P0b78 zj8<5c5F}Zv!By})bdpFRhWpd?#+jKFnTn@Xj(DbMrDS9TeC}lEG!>sfKx=4d$V`)W z_b$n2mp>y*94gb8wRmLn_|6!0J|362INr2M@{+;-KLdk_hh+L4i_uHrl(4^evEQHXbcJ>3(VsLn z8f>9VrPXqcEXH+GS-lAl7LLxsAwGJ#v)l8KcHS$Df}z;9?Kb)}A@LP#Jt+^bgH_A| zS{xY6XOTVzIA*~Im%hDwjn=fY?#x+os&Px*-~Be6FKPtH&J0*6aK|4%H+Pov{?9p+ zx?y`^rO5CT3rDLv)QVD5Q%6TfF~N^=+cvjqEA$c`f_%y-DmIpv8|E275l7!3;k6O+1bRc6ddKZ+Wgrg0Zm*|T&%0DopIzs3FU?w z_ZOT8Mjx|DIY$!3HOqT(o`=T&I+Jc+)yS>WtE!kbW#@}yRlirk<1CUE<1a0ds?0~@ z)GutyOrihKb1aH(Kp^J#v#<{`x@%jF4H|dwRWM(lODmpUq_vXrnsop!^hn3=vzlBQ(y9lUZ z5>4@rm5}L^|2aj`%x>*I7~RSsr0ws#&SZ5{+O(O^ST50XziWlHS&Z1%&$O}RykSf1 zj^)?*Uq3Jv(S4-6M?&w>P9;k>vN{cPJs<3%jg>Y%qY}ASE9J)xb-T0nCqDZe=x9-D zMLaaR#8M3O9@lLLm;4e&Q=_;DUhvTrt-bZaKZG!f3NurQJ_vw4H_)!AcIJ+>G7s>L zW_EZVw`+$BnS=o{h0r@vsVPLivM=LN>)&T8e@*#iE>9S}(DmcmL+;DM*Hsw=gx-Bn zcajv={7;J4_rJ^0{PYVKnV%*ta-BR2*A9M#Q|U*NeCAnPNg|SlCAzPZuHt+6E*E?t z?NqUXhY!)^o5{1EN}dwN?B%#ou31pF-{O@ab{595Z3p2gO6uei=RTp+~^ZxJ7z`7THr zWsTK1$Pue%epO76Dd*^mz^l%wa;hoMJpwxKrT)lcsngr<%5yK){Pa?MK;_mt?D>fk zUFcf2n}b6{RP^|DwqsazV3`}&IQcLfAX-I5Me(x!Sap8g;qx1lVk}!77NzP|7?O3Cq4@0kC+w`OD~KrjW|lD4T)D)@l6VRg@pYB64a8W4q^B=SWt#n! zHuh5}4|D2wzURJUh0a2ZikBninq=|IVET&b<(-rgLf$ckMQD?LtL>vb+xdy7QLyj< zw(I`A#Fw16SD`1JfWC@s=36rEf!QuW(kaXT)nd!12HNB6Rss9>6A&tcB1 z0$K4rUTHDMi$EB7*4_@_L?2-332ST4{>k8u5cSz+2ExqK!u35r@nBcDP*-QFv$N9t z`o?eCE0;B9jqA+Bn*Lhwm{Q*>9R9Cd%_mm#&(wv3+kX1Ag*q*)h_@NfCjH$|iPOBr zt3u^cME~CUBKtZNZ<@G`{OMN$4Yqp-Pv-D=xqZL97d`9dV`|dQPosvSy;HYx%~-{) z;mkk72`?@#X4izogknA3_Qk{{$HEi*N58$ZTfv|4F}q0Q|THj`ka4j zJSD)U;~y7SD_vi>2RyZxtWso&QqwE#Y*<*`Ad-)W2aAtaSru=#z&d0y6$4Mq$;kmi zv;xFy2ZCd2^KCX&2V;|XrRA@?_U!pSUG$;u+$p$m*oQmVSvdZ*b=~_hi2O8WNsF3247Mh}O$~XZOTKYK zIa}e0haJKxyq~I|qjbpOsgePI+j}?(D|xL z>5yWYAEm~(er8KC4lvJXpSVzmq$i>w*UDA&*(lpn0FVfSa+w$z5$D3Uw5NN$e{g8% z#mxh`j~OJtbY7BlSTZ=Pm8N(lyt)SA{qGo6}ZKP z;$vg0t%`BCaWi0H5fRW(Qy&ia^-wP#w@^_$jTATh*7;bT(NQ>Qh*0YH5urt@ra*n* z`uSPo|Ni@rt@>;9qk$mAH}EjqCaZkvHaZjdi!-I94j#n4S;m~jn!-Z&-#ZkV!S{{psC{P*8?Ao3q8{&Xqs z`rX$8Xk?%9ec#Q`&C~?l*10PYnp@2F$<1ji65*YEB6dCSjO6af7;pr*cnPqghu;2O zLGvR(&G647UUOU==lLbL9c%6G?hX;$-mFf*dhB0bX2$$^qDSR>Ytv~CXwmC)?vIVH zp2vP59tKwU?R-613F(#?>GihQ@1*7=z5K@)7w>N#d-qGz#l?k+7dXJBW%qRk763x! z^eVX;u_2GwP7WcU9~`u<^O=PD+p%K@&cDC(m;Oz?{9Xiph|09`-Zs761D^`eA0oG) zpubH|Mstm?;=R}|Xg37dI41wGD^yUj$-(zgUS_r1*3f){W(5-C?7pNYN@TE}rzMgJS^by?Sc@V<@4uwM*;l!pvIZ zfdCvX{~9^9b?erM9lKpW2<+y)XCiVkpKKp|4Ky1M%=n`JlVQ;i+qKoz)e++a17V#LCq6Ft$L|hdj8gkXceU!7 z6m1*8VZ7j zqc;w)UFSZURb~D01@Qe*euFB&f5X?Tq@bGYVh@Dx6j$ptIEQ;%|%SNrR{T=qti>t-Q zedRNa3ZD1$Bx&U-aQ9}FkNUd0<0SxrML-bWzh9k-oRTtrx2!lnzZ1|Xo0;UzF#Wck z9#J0R=dPb`Ve#9yZxJ~n%oky%S?5**ta2H{Cen3Wfn^@C+39r)x)1{J^y$;)7s1Ug z2=B)w5Z;5CR%iYG)hj;6sNqsO3C1YSdim&%Pna$rMt{u#OD-^hApKmuTwsC+b6v3E zM1~8Zg1lfucD4id6AIa`%B>%W?%K+CeNP7+!GrR|&l*V9h~`d#;fiDhe$Jjfi>=J~ zuMxhKENnZznd9uXZ*R*SLRGRYs@->u(o$KoL$e?m0y~zMM^r2Y;*3!CqMf6?wbQ*?P(VOAiF{lfH|B?AwH07b z0Q2X+zt{DD;2xuqTfM`Afq|i<&*ub`Gk|*{Ogijy&$d(v_d0Q}Hc;w*KflB~cRoC{ ztc;F{@o;w!Q_0p5bdpMkA5$0Tz~ZG?IU<&ziA=&nqk{Sh#hs*lLa4S1CY>IBXo)7M zu3B2rW<_TnV@_!mg_x336Osd>DOx!qUA?_~fZ`ItYGR@fJOf}=QKhs zpC~e34A1!L)#h|L-$IZ~v^M#({(TSnWMitcZ>zdZOV#NCppBV{i530}6P=x!<~O64 zJufv}fs+W)G7)C}7r}FqvdHk2ZWtBZmhmvNwY7D37wNB7kYS0*8*&dRSh4yFURFRB@Y$#KIkV+4%i=8~}+ z4b3PrqkUu5UqotN(rD$q>YovT;*poX^zCgm&|}>T@0V_<@nL_ULDBbG;?SYgX&(gx zL_N@?t}kz=Cnl2ZcouvF0BF5Trd|ywn-_Xb&uisX9cUO^5kwAcXV}`9jVMeRXTCuJ`NpM2jI`PTg6*z zfYbTf!LGm^nXhg9>W#Z#E&idxWqVxA!zNT{;M2eeY1t%6rROfua>PEm@ZFZSEp`7% z4d4Cq#d&!!T8I?)+g)C07cG5)pp@iGjUCe+ku9GwDQT5vPn6`?E2bJBU~1DbwFnk| zq`567heK`w!Vu8k?Y?8QZ$0RbThF_T+}NubH+7JEQ8&q|)sd>A{H3YOtD<{kahj*6N8f!j zGB#e|sI>w_gc(RWb75)X{MTEeUMrTf%7^Ze*i|WiMbBDs$_yY~h>3_znf(^=Q?q9g zKLBn4wp{r3=k;<8A=V$OKz78aK2b|m{VJhiX)tP?+71_jM@%QDqF2k% z&4&HvMXH$gdZZm$^^L5oqI<-n1v7z}{mA+8l)cbrUC6#wa($}~fBaOX|Ac4p7TzTB zlf*L|8SY4E4YSd!ur!FMK3&L(G)WzVQk-x9B$ z+b0)t-G>Pu*AnTHbL$g~QJbJGCZFo9+pl=X&dTcR4dVbl+BPI5H1Y=)fBvjLmmxi# zKzvWCwz8H`$_onu(#I;SPL_0({dB%2Xe%&eHRo~VkF=x9u?>@0E0ic4Z1pWD(4yrH z+xIE&rbQlHAdc!}rxtJ9uZk=&L(7ImND3Sk7Je|>-sjJFvCv<#Lh|>7BnLp3xSdS* zQu)K^wRz}r7w-M^S8Tf%$C`I*!jz+0t2ost*)i%>urQ~0Mc9gca92)3bmD;otzsjz z=-AiesFHlBDRrMhlyzn0S0>Qqn0l<(^S?a5n@jJ%pQDLwSEdN1=5=n> zVF}ec1%-W0&hpie%3Fs$XTDBo{emNoeEvtHo#7HJ<&v?5oV;N=?kQioX9Dh|9m~y* zRHqFSHNv1*QXp}52%lSv;%{oG?Yo*XJvZL};!yhm&w zf+g!*;bkH1e9>QhcX$ZB`@xr|jxk@0(Gf)M0CY_t^^7)=+C-Kb;GY2F2}ei1{%X=V z%CeL%g!Npu+`V#TNJ(MLxnFRxUA{n1)P?t6PhVlmuTPv&R=uwaF%!@8gvBE((-Ovy zPp+}1vFi&aSsELMYJ;ttrPi8oO0?Q2>l)w7RL8e-w0VM-ceCFl?NXuQ^|@(Mzs?>mx_+Iu)3o^$yxy_F@s`kqw>zGp!{ZKzg~YOm zrenGN^!G(OAnH1$kDGoT7fN&eaB`1YwgG);dvl;)HLTa#i$LV*!5k8Yqy<}fxs>OEY8j|4^4m^9U@_(uw~HaG8%*p5FOyD$bB5`t4;i43=XUH!q%eGG~9z1yP$og48 z+>XgYF=}L6lyn60HD%oQQSv6~rSQIHdq`2NplW$GC(6jUt<0G&lu0l(!$Qot&$+W& zkRl={@y!vb$clXh-Jd1H=%w5$4<_a<_+$@H^whe@o$K1srDgQ$7 zQfg726Ek(XoiS!LSB-JK{>n8?J}Q0#M{v z(_g)z41zSdZmCVHUz{ID_IsvkMp-U>^^|dyNfDj2+A5VST1fzvf+zE8csP`7pNqs# zE&QJF3!_+qaAH$T>Mz`kwEjsgd zg5My(IOF~99rd;lg1l^9S26b8k?q+(&)Mtr*k>aFM(S%0R>hBI^X1_Nf_j2rpP>0% z*z}YuOFk9(nKn<$(eAeEfd>K(j5>u9X%&&8g;iC)tzpzaqc=O0{w&nKhGqLoWaALe zaG7fW6}SCvwQhAtR#s^5-jtM-$*HNGdlUigSob3#xsy|_Z@Gafn4lRsA+Yw9-z=B# za+M*~(y_#(z@tR)pR>J%{h9*;*%ptTcfjs|KLo2)+xgv7C;j#@hNzRQo5pMk+&c_~ z?MiKj7vpvbb{O4@ru zjWc3`NDiRL-0ACJ0Ss-+?CngAY=avu!PSbg4-O^^8Y;Y(wK)GKd^;<$l#>2G?AiSM zJCUG6R<7vvEKt?|f#xK+UEpwz0e%PN2mDF|unJj4qi(~y00xTe+t=bEQdSI?PfAem z3#QKe4*L83uXmmGW&J-UY>A$Ewj8~$``DPB%WsO&9=m3yWl*sOk{56?d1Tu$uQK%p znY9^+K*;3(hZc!v-aR5R(>+_7M_?b%>hdKZ5QBbBOk}*EZw_w}nC`H9b?sjsKp)70 z>*(px?cABxGf8J7xnypULt7nL+f!o&z6P=;m__NJ{Urj38Pb)!VeoH;n}UMy@L&?5 zmX7?3Yx(*3yw31__0-D41|H!vuLe`Cv!_Q}N9Qu2H(kN}dEQUxr3uN>)6!b|(YDi6 zWLN)WZ7S>&_;fVk6_^gvX?ph^oMIBl0>6Co%!*J~cMwIGM?Hqc@{DGtuhtO#ZYnB% zqP1m9OAB1^&=uBzM1cM0>P9CfCV-7>Oey`n`hNBdh=d(apMsW$uy%EAjc@?NU`I(R zRQpo8larGP+9ATVv(f^&12Y=&+?nv5jEwjg4?E>}0(3(HH(4tWgvCFACPYO(hMN|Sh1K|otB1sM3qWS2r-4tNXqaMsBYe8ZEbg8Ls#>93Fl=<)D5S2eY#2wZ=>rKY@KReXq# z&k>9-9Cd2Xr1aai$E@@Sz6crYJ z1?C+7M<;#_p(yaWb!4rs;P-}0Eyac;pm^g9xJQT{hKA5X^xY6y>QNJQHMKV@%DyiJ z#+hh2q=ka?rg#W6yKH3cfBucoDY5^Bfrm0OZ{7s!>>sKE zwo|%{uK~C!eEj?%bCy`w{RHEIpdvqwWB{@VHtGn#SEz|d@iH#3JPDY$>CdtRcRVW_ zTP^s#K)LNOvK8pgMMw-NX8q*#CAi!$Clg9wQ6L^g*hqR!wm3oBAXmK8JGFR#7Rm&L z(nbZz9RwJgZ7^NyO8Nf)W!VNd6B7YUS)aQ3wF|sLqD}em$^Sr1OifGs3l27sj|GDq zxI+^f8xXTq(bV73z<}OxlAZP%BnnBEY4qAeJhca5Ei%gk0|P=jH^--@j+>ay?^RL? zLLj{-aH$_Dd}4WN1X4=pP;T*Si~>7TG^CX$B_oppnASo}@y>rieKWg%>Ix2^Rl2V{ z*EI+EOueEZ5Qn4GasUrKHq5zX0@UuCXxp4Vz|Td7$Ligd{Q*BzsAhcYhwodW zh3@;m4a?&IcE7EP?cLkn^@8~}KWHV=(!MC@pvne>j0O%B_4QUD{qNef36X*xIRaXl zP%$nn=nomNQv|nfsc)-Px@YCcLo?9x0+y~0xf95+pWia^2jI3sgxv5|f6?MG%O3!) z0V-4rVmu4hD4I=RyJ`eoNzJMRIC?^wLsx*Yy5YEf(U2pXtKD^IPXG_>*8`4FI+Y*$009rTpjtpTd3k(bO#h+rk0ks)4ls5vC7`Ly|QI zFrgo(q|)bJbVrFlb;9Loedpl22e~ebi13RJm+0-Hsa@ z8#_8qUKMZP{sGZowjM?GF$N`pySe8~K_4aV=DvZ|w5A zfS4c)1f&-GH;`8Nxq!_{HCvEx+Q_?qpV1~Hg5TZv>wIow;}yU@P^HAjDti^R5%bhC zAu~f-*%}%`=LdKVz^$x|%$c$Je?w|!Ku@(~XB6d=2HWA$?5EM<$RdCBuzGCds~Lb9?* zIDj8S@-n9{4(^DKjotgX9(rS8VWBT#EE{cEt%|R`Blj{nd2%-^E1@00qEaa#yDK_s zAIw2o$r+fj4!YbrhO{7{dU~7isRzZyn=roY^z>!G%{omGT|`VV%S~9gXjcik5@I!kX~PGzAEsM-|qIqz@*$vlN33IVtEH zydoG*44Nbi=VuX;7g6J_0BfZzsy#KnxLP(^IIwi70lCcuf1kS1ckeQ=H9v?h9nG;v zO;vWFf_Nb*f1!@9#C|tph zN&1Rct;vJ=iPgxW=#`)ul9S?14GjnS!K7ghZ>v7Vb>y+SW9aNlig@gvw_ZNsodkU5qd)2($b3 z63Ha4p&MiOj9oNa-I#5!{mK5+?HRkp_*BsnON2FB5H6{gBJqW{wn;{sy}V~u^j%$a z`~8Ft@P)2wim}9~^Ifs9xT!2=>Xdm@CRRu<^(T9fylcNpjJnU$se=|FlE#I~6BG+H zcyU-TQDK`sFf;y3`BZ()^|8BAtdT?4ovY=KJoHl>&H>YcgEq=S&gS`D#+YQzYO~gC zo-qG~Zj->9!dYe|%(Zizk~qsqvRtg^6Erq`@on0+mj!u=J1DMNr*IA*`eY!N<9{IE zqaZ@#bfBA&cHX2zSJ#taRpu)d$$4@5!}ZghyYK`m;x=#s`p3p>s$>?A+$?UK5V{e` zeZr8r(WF}yxewdDlLv)43#^j3B*x30iZDqPeEK#J!KWyplhZoiL=C=FYwK?UM$-%Q zdUNlkSw^echLTMfFZ}{-A$pUz76FY{MRkTUam-f(SCp}N=N}*YZ|^yO&hAfkpmpUr z(n(EMv1FNK@!sVP6JtqLD=;iFad%Nr->W;s7j`30!BW8Jpp>)~I-D8h(ae`tS0sD% zFvKkW;9(1G%eL&UE_uYd^06%fS|SkQM^6QQPViR6J0A#OeAn3aJH4mUSF}ZA#k!di zhXSSJG?||r6FwP7uQK5rwJ90eqAZxy5<;J6T`+j=p4K1!t@aeOl{_4u5B z;GHlPFX(mrCp~idgf&Ym`RG)r94<8!Y2p==I+Rvv zxwZ!`g|_ePXZ#YMba?AyEoV`A0Xv&FUv|7xJ@F`<{rG&USLtr+2L25GolA)kqh+V{ z^1nP6IpNVKR}tkaUn{>eejn{r6raCn#G_y9lqalqCw`GVv3})GIfLBom03=mi<6sI z8a{l#EVz~Dc6!r7_+?tDbeV&Ezfr~&wEpe`avHF-D?MD>Lv9cE1@nZ6q2@qJPR?Rp zSno(eEFak?$An(7(B{(qnhUexQT|zOhYw&5a&VZr{(0A>m+x)!2S$uSbro$CIuzua z6+K%1vtUOMJme-VT&i-TnuMwG6|}<9A{VIgGf_1uS{l%t^SAZ5?|1u4TmHKqdwr6+ zJu27VG*Lt`c{JHTn3?Q$p9}@*M(^jt^gmxWyYc7_*sXS(&o{20O*ZH4vR-6H0RuI~Q=DS@5TLHXyzw$d22>+jKS72NAsu9Ffi(Czi2V_Rqo++S4L ztfD|n+yi~!IYYbOFgO=Tr9Wpy`+uL~-f#Db_ucAp4r>)LjWHfFT9Z`F{&v55tV3yU ze*pib?}Pf^QVWi>sfXUQO4GF;rQbsp|LlT+DcSSIKN(!Zf9Pz}?q(=?h+imhi-w z%w!3gZO8P&E}?yT0bc?Y#}2WyiD*aB>!s84GWRpTEiK)3di&vh&wt;mlS_MvzrCMv zmS__n_fuq>r3cY6UE4*PqrvvSx?E*5M!U&-kBe?UJ1@Uu^{IVKzuhM;g|nYBOn+gR z;wT0Qphq1V%=@LQDAPNexhc~hOQrF%Oz9~UAFxtCKOp1e%a^p}N=#7Meb0ki?JiEH z%$7Da-ZVe+rYzgqfrm8Prnzl0T+rgQ&rvzyz@j+8!cw61yNr6>=Lx0TSUG(2@EvvRH&{o_!lO+Mg$jHU7e%5&dGajW4W@0Q*7EM!&J=NO0A;ffYh~pDY0iwzTcXdsE z`DY>O2mikHC>;ZAg3`t&IKn;=p*)?AcQF)|WhYJ_B{A#WT&)#Eq>4eZ^N2pFcA*^TF%FC-O&*yq>X@1=>CACQZyRS!ieNKa@=6Bl5p# zoNvIluH`l+EG97;iI}+j{}rwN+czJTYT|k6I$xqiON1{lcdng-Ux%0afqDQah3^>s zzpvtQjW~;YngSsjTYJuq6<_-c7~|{mn!RK#aAI78C;xVCgV6Cjv+t0fu2Z!j<@Wyu zjG&0@3xK!Y+g*Rk?Y-{Q@Tz}27mYN2!ch-W=WA{#0!Z*!cPwH zbaF7qYJ9K#6qk30649FoC*6M#``>a&T{sB%Gd7t<2V@ONV zosG}3O{*^;`gZdcxYCxw)mEn8zI`KN$fbC!fZlw4Ahf0%Tv(hUHokn;4-n=F& zef=1J_h%~T&s4^TjV}^Gt35-x<#zTS{`5tp)BSBz&|geP1&4=d0sFtd z*@eG3>E!eYj!}Hjm*)q2)xynY;|zoqrf#3><{~E}go-XZ3=nlZ2w4|Ky*t3Uv7WcM zaiZ%-aAmcj**lKU-CW%q?bj%8o7tQxJ{`O{x1Ez|>5cAk^}VA{tje98RI}p7x~(!J zEQ&JO~mdjJ#m44(PlXeacV%bdqm17T~AJ!;Y%o3GLueKAdB<7Zlv z*afl91CAGyELDGWb3NN=S9mK=de?rfXn!{2}`+c;wHKUu)6W28BuWb1O}zCavN9*ZALvbkgXlw{MEZQ!uas;TY0fp8$n+OgGMsjn2cUIACfJ~ z1FPozdMjDxw|7I?^CgWBlnb6!(k3|`6OgU;lkO6U7V?%~%L)EOo?1L;afX9zYiHcs z9cLW!4)zJ&S=eV4d=aa4%aTtmHctrd&2Cg%11T2q)v_u zU8&Ufs`1CuZ!5c29Lbo(L;%i%pMAGYC|DcVZ|Ad#s6KXk_wpOgdCB_%3ns$dpXH8K zKl@TqaM7@PNVVOV?6_#CXw&`Be6EtC`%dH*)Q#Ltb!;w2v%vNP<2UV_KP?;x8yV(v z`Xx73a-&MY>lvFMMHd;2^ z>Dp~NJ7%wPQmC~UmJj&;s?UbEm&Y|r+x2Pef*yE<*9W6flWEaPNyVkEbW=(y_r z{KmjMRoT5ZksZtHDrAenxxD7`H$OiZ_((V9bs%m_$34B__g3Lo{`TY+6zoI&_u@rq zj#@o+zJzK^<-mH7O`*x=9h^AZPF5LJ<(5OVYVp zJo53r%USLCVv{j@{k}2IIgGs>dov#J;pUFD=JlI%t{q;GEsyaDx@p)s>1}d;UZF!~UoYh` zK#=JxZdl{}iZN;(N%>L_DTBO))LFGq)g{>h(~(M+sh%AJ4sxr09k`l9C*OvT0A;k3 zAgA>PhvTQ#q{~pt8S)__%e30AQsjL7p`hvWGN$ZLjvM4NhPMydMq4C&t?bt!%bPw! z56RN9bNj{XO$gso>eCue6BI-Uj}X$-BQ7fBGx@8I@X7Ym6Q_PSY9aIa>-@vkFLD%w zdK221JmCL3AUw!?9vOiIwPSxzDty2Zj!Hm@(|FOL8PUeIP{|Yy;V81X0xJTB%*lwsTK-c@3Vn{j(yB?vc#wexadLBOj_{2D+cFpu z0oBM1w1xrdFO4Pc2EOr?wy?8y5*M!$+^Pkse6HUg9m>Wm`aMTaExvCRqe5-(N)FN| z_~8+tqzTl}0MV%sWkZdXw#q{R4x$pNx;p7-1W0?#u{@6j|A^PonQ|0#6%=X4=$S(@ zJuU0w<{kKr5R`4q?h&<04q7TS8Esp{fKmz73WpIpjZQ+WB7=0K(fPAco$h zE5b`TM{D$>+gSDVbIb5V+A`6FYYP(D#omRZMaY)9+OBA^rys2P#YG5{e~+}qf~H8M z73M3xj1OTDp;3>1fvCyQFQ2FtPqq$)YN|ej-KE}fDT0s2-6>r#~$jlr! zK_yOw$njA5_eJz8RZCW?(nVRf^N*N zMXsN-iGG^#({t}b#PbgG*{E1mf@1ilqkv%YyDL7XOQ<>V6uPWk}qF&`S!4_zXC;^j>_h3;Awij=1jLg*hL@`%qgRLfbNtu*{81wRk zx}QK$hki&*>=?ZeGGcYI2Tfvtzd_Y_n^Q~at(=Y3>=Yj#OB!k0u0+2Imlnsg^5hrK zDRS#F!?cpEUj5rvuIb3AE9y)|o5TRc3XND?ns!}jC`7xy+E&^<**|0B>75XK>e7)Y zhQ!-bBNG^@FgfcJi$t(}{Teg$>KrbEbok?CKl)A;@% zODl6Wp?T`?nKuO>?}|^G#^+x#Q()>UF(WDWi>0xe&WIWs*Yf-L_{Bkb8%4MDz(?qhh9 zJ}ao21q4dp^}Lc^=jmD?7-u?H=bYk+g6{<%x|`$FU=Wc+kA~FQ%y{uR9 zBN}9W?nC^u&)a4rsJmT_YV_;<63;8;k9ZiD&_6x$9?*qV37tOJm9xZ32R~BlOi_yQUO= ztT^P4PLU!WGH$(J_gNGv4=&$$yb<-Zj#d9?bslhS6}mSjf&#a}T>qu#;|I zrgC|Gr#B#}0?t40sd4A|J~{*i{vDY6z|KQvLjX0k{TS1i_ZtoJPW|wVgz^Lfar7}q z$m9Dv31+;Ymf8EJSC1^kdklr-JM-7!%Vh0VxkJw_SH(Bn)+@fzO6Fa7-XP!h{wc5@ z9l25$_5L0h5!Ruo(Q~yV)==1azsMuqSmr_XaFf3nWyGOn>XW@c@onniCDrfjQSjd5 z?6@yKrvDio`AMK>!nvW1g~{@Zd%R}xkkFcu6G?Jc+{kHNmS7wqHI>j}7{EbEuTCRV{DcjDj*)v}Ff32gm=F*^VF z61!!dHu;@8W12i&?ead@r=GY8g8EG)5CY;DU$6*54=kHk6`lYM;jaws++FWoj}EGX z>K>)1_Rs96rA2Ma6GBoAscG^wP6R|Z`hgu=T%9vWqu6Vh?PZ~70(F_|nfAQ?qZGcd zXeLi8pAix}XRxVF=sTP10()L_Cr3MiAk zFCE<%vwTVYnY2HC9V;QF8ykJGJ$TZi&Kb}9;sW)H;>VEj%YV!ZqYFrtKd~?wYLNPW zs17QVfGs*`T)Y{=Q`-cG3||(gDBed2HYWRWDE1DiayZ_9(c^D#<};gJ&ArgGhu3pL@qryORm}2JZsm%Mlmq8Y#Y}3b09Wdw0K4*Xv;fQ zTPuCPJmwBIB94SA&v1d8~$$G#+1;fC}uOPl2l7Bb*OJ4_NS7A81Ns zf8NIYWqOpu5Jrsc_#MRS*4~Kh{GQP~ki$9T4avaFrx?Y&S^YDz=5FUyyM$*emv5^{ zUn{T%JP1&Vq}u3(f4(=hzj>9FZ%#u>Bu11F7`FG=;8YddS4N%Va_Rft=hIFXYP$C+lGGr8~PZe5a%?H zcK!*+`hoP1e5jhXa;3?mxtnhfz$O#K+op~Of`D*|W+2$CVC?#N95IR9 z4za9rS^_mBR@kU}|K?Ua6L{$4Kk+;54xzIcyV}dNgbEcewXge=njRT8`P(HMwH~qu`PGl zPk;Tk+jb@lkMcx1Y@p~Li ziR2YCz+y2VN3`TQf0=41RnEynw^=` zS;`{Gml?ru!Kz%7oUK~VCtDdgy*V>~&R(?Ft4F>x``}eZ>Xv*@UHIR&VrX$?=U4dm%kVA>JzqI|vP0jE>%)d(3;kW}LeNR>!FrNW4iDppr# zPA^ukV%>Prn8Y(Hx?s=S5E}3L<;Q2~C-(!TVGd~_i&Y>#4Z#(_ZEqSoZQ-={ZI+0w$Ur+vP<5mVN*a#`l3yN^^ui}RBvL!;?0y?HaI-12Y`^CCqbeaISg_j|EF(249J$Rtw$8DQs#tUXismk@SG2eK7qd`|+#vWQ)+Zj^ zOn2@teBpKhjTFls0t9gYcuic*=7BeKobOvOdn_#rjvFifS({;pGbbut^s?QHcr0w} zlD%R+DMtL@Z%$?#pg|X8v=Es%Z?CO&IxHOSWCO~69SHD^!8f(QbW3!k1HJ`3IP`2n zyf{Q)|BzpmRB3^M%LaB} z7wQcvYln}>egy^tfjA&Qg!D{S7a8pRkl=fZ`W5e{dvmbKDBZfZv}3-OZrEls>1guH zbp{nL%Bb3U@XzwUzJH${kA)Q2E8b;}j+j%<@FI8|JkVb4)8BoM=FtLzCkAM3f$zq37#_1+o!0=!oiYi!;i%TQ$`vfgln?+Y0lI8qLCe{9!+iz> z;bQ<_Pb{cuw#hW{0bZQUOTq?9_RZ2(IlhYr+mg-Vsd%MGgrHt(t$uenDANif?95-< zGu0W39r4{{E-OxD7=x=0{{f>`UP&N^h%f)D4N21LKN}vDYNxLooDeg~Z-k6#vP)m- zaOW`*$Vj)+sLmBHIpV-pL#(a9*aHg{3I-e81HhV4z5zQ$*!YaVtm~vCwoLOB{EQ&? zQZ$UZVj&Y4fh?tdcFqR5K4shX*>H));7gW=`oT+L?yVQrw%>zuO8_@|=-_I99uxQu z7*O}Y&jn+>z#X*`YwvDehQDVsBXS3R`GHvU78oP4r|PPkjL0EhzIdQ`=FJ8!1CjdmiC+SV zojJ|D`#CO{z9Aaate6ITgWbcyh%2UZ5yq530AO+gIHLD|ApMgQ#xJx-3D+_UWQ4$_ zI@R+U1e65<5hONyAIu`ffhQH1Ez5u5O5zOH)&eUtSgFDq3kVYYj}&srFH-Wam4+j> zZpEtugfwfd`!C{Rrr;*8)}ETKEZczD**Ao?AQewSyP-eM>`ffJZI-3@`DrAix~-E( zhaJ)6DCrC@T)p#u8R#vOb}j0!no`BEp9QY1elFs>_cqBAQPrCpuY6^K90>!4paxQDsVzg|Dk zs!=n0aEkGC#{H#Bn#Ya$Ra*bt#I9m%sY8Oh8vj@Kj!~hHxRYGX`tXZlL8N1HDCA=7 zq{=h4akp49<;!CHOg3#ALgYkl3iJ$4-tU+8{ztI>RT2`5M0{u|lr?cgTsVF>FcoxB zf%_pe*1wKEL!n$BXB^9g0@+~rdK})JlAW;$DY;>Aof_G8>>J5D0>P=Rb<+OBY3y*! zlu}hav%Ep1etJ*8@HNMzgZ{|a4})i}%1D7;nipR`;H<5N3`(k@x|t>*Wslod=klz? z{hmsoelpr@oC4R@R%swkk;#LDLLNn)Yl(zhslprh$u3@pRWN^M1Q}!#FyL4S(T~uf zw_qnC9Awc{Zap3UU;*a7KB5@eH`U=6<)7CQ6l7!)#&v+8HF zFiS%M9gtzHtN$76F@|e1-+(ZRNU35=lqKK?WbgjUU9RcCNem!w?XLER1T$E)CLoAj z0vY>i6x^jAuM*#2!d4(BpspQe*nBIs2Tq+*04*vW0jm}WQZ?QVIWCu~aG*bxa8I}X z`2WRRq(Ni+K&&63|r8OxV!vb%HQC|ZmH zv+_NvSs+W*{7__NC}cbb^9Q67KRU4KjOT~U83M28;>NE=as1ukj1|%PSU)+!KNG;JIbLn4j>28J%_aC!(cvibEU?t>C$sl*-ji8Iz%IQPg7;lrBm z77`R&YiqF~@OLR0WnEwM;So#l+q<;rzeWgBvFwy&_=z}p5Z7<*IvBKP8K05sgw;Xj zHilfE!=Ibx;q*nm0bSk6M_`g`_mj{3*KO$uEXy(EZ(kbH##i)|aY+*SvIhB-9_l(Z zqK4DCHG))t%GO`zPH@@ten0{lzQM2Kvw4$Y|0o>3VDgCT!n@c}q0O8~b|E*7%3<8J z&LH;JNYGGTe6d#3@)=0d*mLt+%8_2qUu!3koHz#$&Y-jE%3v^7>ian(5-9!%Say1a zAHbR7Zen6G{OJ)>tPLdUIRMWF;D=dVgUMruLGB3n2oyJa`4R;(1}17CPnq4H>8`pm1sKQRJ(BuB6OHh=O6f(8{af0$=MAqRrk=Ht>5 z&7TDsTY`iatr#UF!K1ae_utKj z@n;H48wy2os7Oke<;b|0u~K6;c*8n!+>l+F`V-C67bv7Wh}X!eaJxvRhO72BGV#MJ}r9HV`j zwV8&XJ@gBoj(HSyY#WH_1j9TnKe2IexWVrC0z?##GZao8c?fL#S%G^qU|7M?o}^g! zm%>2=kCW#uu#P2gBL@oIi>o0#3JTao`T6_?bUHqI06%wz$kb6-bh#iVPwh1kE3gZW>5eh`9^ z4$5fZrqYu=#x=~Srb8t9bKj{3&10s*e>4qb4Ijv?#K?_;Bk%qVA@zU9WLnrN|Z?aC8dZIuvQbMOpJ zBzI#!GE_XAnEDIJ6V0E_R__0|7@ytxyqdq&XpzUfwiaTyFaIwYq%XDLy$aE0hUahox0RkX<-8;@ zX3^%O?;`$LXsZ>Ik7tjOA$?p`zS;e0f+TZWOxyV=40p*}2KN|Xfnx924TI%>`S?75 zjqR_S#y7CkhD?!$g@^%jROIkC_vdCHD5>Xe+ek;}2E3%BBj!|i5E+3+oscI7JyZf} zR_Dcl$a57b&m{*!P@Fw1g;LnvwW4&g&(BZLsP4*V5^h%!gv#5jS_sKTSwcxTD3kNE z1T$WJ=cGK;tp2J%Y7waWQn_Z~lyn?42iVxK4`;+t)Yc7k{PK%K*wv5Bc0|!j@n9u& zOC*$@`rO)Ot$b`9#x!#I$X6MFB)W=60K%dW>`u1b_<*NcCEW}aaLGi ztxJk_zKCf%05%aguE9YKiI!`Hl58rpMC&rN(X&DV7!5wQY{uCBVOU*VTN5fg(9eKH zs}XHY!Sq+K->AoKLOw~uz?@|y6%DR@b@rB@YNvI z182VKB}hN1Yn{n?Q~;q93%{q3UV|MXXsto3l_yWM3JcyTxPMBoCy4r~=GlQ7;%zUN z#;Shj!90dX@ykL#+q`j#r;3dqm5u%QaQsg5@3KxfRm8ZyGrRhI_NfI^=njdl8}C0{ zccs0R((Z&JDWr+qUq>i@)DF@98~c?Sl!$aIyY|gv7|F;sdJhQM4}D;h1`s}r8Kr7! zQu*H%diRO9FM8fQeg_d0g~2Hgwdy~^*(V#S=XCxB?(N*7PzD_~SKjsfq14SH7WuAA z&(hVR%G|Jd{fFn_0TP=*ZP@g_axR}Sq83nnu}tL$)cAh9fYi)wGQbY<$>M*jm9-~)?iX}K8rN~D{IBg2e^oJ5@A-OU`{G_In-8h3-5R_^ z+W{JVWr07)DV-$+6bj5X10Jy3EjI0s!4`wS8h0!%tHDYr0u;Ky1lH64W3~arj&|4l zD~@SaK==L6h(q8&|I+5`sn?(Go%@EJ%f}Ic-m6lo`x9^jX7!aBv%|#16#d?h0Qf(! zO*#cxecxX9o&y;M;F)Dn!0;19n_MLr6hNxpTwhxp3*L`hXQmJPYH|G*8dUjHvu@}KAz<$>J%dOk01tG;M${E>cUgC!1EuWmpe_4Rn zM+s}PrE?;nrxQFQbN@E+%m`Y6v7YBYoy29>E-?3)iq0u@;~Pj5{tL7H0TQ_%oLXK&IK_+J**pU-i~q4~7@EF0sPCzKBXDNc1085icx~+gya> zKb5_e2~lC4JREF`CTk-j-_x|RgKw+fp*7oHCN5kPR(H&L@+_y%OzGyzYvA-N(qObB zTvIu{eS;WmOS&)yK$zcvi39WGKp-};0IW*^v1Oh`_Xw|#=J z3LDQ*78AB$xiA8x4rRFdI?=wxd_j^NXtJvS0|@# z004qMClL4a{ng{`tI3`I674a}gpdJ^3r<~P`la`sfaTrieXeU{eCC1DEa=46hO4bR zS%XxrYiz=uK!Wx2>@1`C0qYrWM#CRN`#;MCXjMA6!~nCgE`d*c-(5LDnTbbKE-mB^ z*00OU){}=P_QeeiSu*a*pO1?VglzLdy*qfmNj?AP-(pi&5#aXOh^NQGlYXDqkoI)A zLYJrP-oHMRn4AkY5W-gB1tttk5<^g8ng=RM0;&_@uk? zn^^1xTdPAw+?&jp4uddK^r@4e9rN{YSr0{#EufGW-`^ zUHA*vrE+CwL(_4z{q;D5oPWO8pF@AI$>zw(levT1wm8lU@&KJf3d8)a3R;y+dF4nNsLWR zR{j-SslrbXAY=$&f3vCq2)jDA?6VJW=D5h+tOJqJmfZ|u2ws~ z>DbnuStzF-NK#4qeBjODPFTos>9Q(Ui+S~6cPHt)hDYn z%)lD%^$J5l{`vt^;!~|&wS13hqt0R#>w?+g@2njuX8bbU#+CE#m)xmpcE|xJJm~dj zt=XiKj?wy!zRr{yx3{+!AaOi`|LKIxN4A<8tbe3IAKugvd(bB{ipzhVAxtlm+Yw1Y z4w-t_>bGW?u6Rj42}#&aIng{PtBHHIV0X@!!d$;y#X#^@ri7wSQ{`^*Xu-=D!{FJ| z9}$RaVi`=^oK|sKxddSHAzY zzI@kb(wM}!{tSA`P_5~3<1bcSMU(h9khVuIT}syR#=mDysJ-NQ(i5?35LO%js%rFF zo+3(SIJyArwaZrJ+oHL7#FV*ijGt)q$nwjmk#JQ$A}@bAA;{tXHQ^-Sj+f(aV|We% z=>>F&)FKtga`-e0ikA01gmjD4>ZGR3D^n>|((w1i4bM$?($7Jq)OGUdGzQ8Xwa6K#@AEXD50iUtH$r1+lg?z8a6^usR(x*CX#@^+x|)W6 z-g=G>qowJ>ULvUCH@1gO=*eF*{neU`!ZmYn&*RR>3By^^c%_cUPc(aA&*Oc4Vkg-o zJrzl;dsdlkf^5*Qx7#HQ)D$G+EBbj`WjGLZVPiAqd~n~Jrp&2p!xZ5y7EvLK__?*}NBF~9ZT|=y z=tnpkdW$C}Bq4Q1@QF0GRaOVdq0#gl-v@g>__1gw>vYlZ1QV5D#OJ=5vO?5luASli zgJj=|+F!PmK;&PeCj5OPCuPr~uk{0#I%l*}c%~UebvY^6q z%7vvh$G^ti*va*Vz$3p)_%l&rRH;GyAD+@sP3fKQ&aFyteyMul(R}ptRz+1!I_yt0 zXmpn~HWA6cM~tH@MC6=5i5Zhw?8&spWXlqNsKW>_5OE%6#`6`eNL`l{*cq8e{2d zgUrfT6#X3@i5c;eNUj$yhai5)aATNvnMGu$?tNn=w@eI6 zEVS%(4TSR_K;X!VUlN%3f(%R33>%DmnI#YQ-#XF@Mc)YEAv#Mm_SB2z z^BxHLjakU35i@d(An~B@qC@#>Qn!g8LmcYAR+&P83e9RvW1b|beV%cBgZWk}*x%U^ zN8o9+WV%#K7=bPWnA{&694HA5!c3+BSOE!uN9YdELHa7ttHAA?^3fX1ia-hj2N;lf z$VTM-5C^b}jnHft7^jY~7_E8vEY1xkVe8>hszj>XC$MXN(I5J}df1*nJh?lL=`%{c zr8r7nNK0V*!uf##aSYjo9>or;gVZggDZile!#@Xh+-MLXUECz65q_^mi;JMH+()lX z++EAf%#r`f;gyCC9G$A4XI(Dc{o!g>YvhyD?RR~iKYoi&<_T;A_oNUpl?inRX|XQc}!&7M}dEB(jj| zXQDN#Sm&nz;wv3Pg1(?6Rk9nMqzaU0QY{}2$1H=(Ov)4WX`d0INt6Xxe>MvA-@}Mw z)1bpm9?#&t@c|VlbPDW~assu^AhP1c-?dp$O0`Y|NmYa4c-M^Jf;JteHvMXSTp3z- zBv$87TR+UBx;8s)jZ%rBRM5q<#OmUU$!57!t<8#7;^=^+9pe^4=h<=^M_Y}5;L&-C zMcQG_jG`&U`*ZAdftw*M`DFCr5ZW3IeSL-$?MzX*;(%qd4DC}WdRwXhkLo&sbr*x5 zhadM~rAn3jS1m1FlERSfj0%7yQ}3A>#*Tkd=grz5KF>7f7VL~Qt7?Qn67lqOV}mg{a&q_54_+0*8V6wxoM&0Bt1rL6PJXbCVYd1X%je7k^X30L-s zix5~X6jZik0PG|A`5P+bWP>0CQ9eQ7Z-d7$sB?HYUZMP@<@M02lV;*Wn|aj zVRju>x|cx#`^GdRm~pBVxSeihFQb;$9r^E$yi_wK(IIMA-cvfRC^?l2evuRXP`!5= zuFNBn5ymfLN__HXtSPca%W8hhC-{H;qRS9YP%?M8n5xW}hqueg_0_)Z7E|V!^%0ke z&r$Tu*@Z7xrQ0Y-G^0?jU8mY{Vb+!|5pY{LtV*;+k03sxL?uFCdHrckL7`O@zs=!{%y)wkMli*Ws9#DQ?OLQ`dl;C>c~@Rhqo`S` ze!78zmz0Pl+*j`u=9APM;3F|n5OuCd#E|!gj_+|e`-S{%zwyRqyI`(5f~n}z*@|l?%0wQ zG<-D5%4c0CCP&rxYEoZBQf~d(^}jC_z?=*txjR9cFYrVcHmIjfQomTP_tt%VpSqH* z_BS5g>Ju(mtGsUA^xK_PTnCb`1;*kmc&GFPTSMj}N)7s{u_>VR&&dfy!8`y7?7M9> zia@$0 zQopzR{xq6j`2DP}ra}(&w4B|opwb9GBW1}=>f0M1yY**$;iK&sxVUfMnmLNw8mNGr zwIdcYkY55A+iPdwTc&nGQLIlk?|kqe%+Dk{^9+f%h+jUMw`-{h=Bu69;uD_$o^{o4xO$EIsXfUC z-5zqRNeY!b&~P3fmAmjuI)6j1jVqh)Al?O#y$>y*RsbA5q2oL)nEP0$lTXKqPt7>a zV?i09YpnrtsSGbDB{Q-?ip;mGedd%ZpBpaxkrA^MYM{^jMIY9#iFEb^PxdfnFQ zaKq|X68xg(0!u3?VsiwJ=1Vp(i|1+ncinaWuWg@D89ZJI+aOPqc?@aV~;YQug3jOfTP0Wty01t z@`j|Txs{JaCVLrGVfOa?Lrngi%)Q6$lmYcPWm7;KT{va+#g7FnOz^)0sy=W=S8JaE zVxkvJ%VA4B50nUHVNh`fgS^Z@2mtWOf|#WR@H8Vz(_@3=>{(4*14>cp>5Fs~%prA} z)M#|k`3s--^_r$_pR&w3O_>O+LIij@oF~_h!uB-7lGrVCc@oh4g`FF~fZX$c_^1Kwx~>#?Au( zkAnf7#qP6>38aonxb7WQZi+`4c zeBdFLb7y3S)WNN#rguRz%uZM)*in-0t0%3^G*kp8{~X1`mJGE6#jsInQCmdiQ|J_a zz>w89|3=>g-qf0j0b%!|>zp0*0)vrj?`CnnT-bm)urj}S^9YukUa&wi?-dhR$82Y~ z;R^Ga;kPa~&!Wj<=i$Hu>r5m5zDDf?5)VOmrcn9*@+`l+^qjwW1WA~}-Q>MhQ(v@4 z0(szA#kqMrctz!t$$LqD^f9~ zO`+SesmQX9nZ81upSRG61@!Uf*iceg9Y1r_0X+Nop>=&iyD&MFGEl&7 z78dmrT_L0L)Pqj}t|e1DGn4$#A9)J&i9@d|`^Z3~6d*`l_clz*)qkft?2j2!Ur^;I9 zxg%Nga{{vV2ruvOZ@BGjtS$?!`2IfAYhqFV{@`p~=4ct6IPqOHlHFcA3s#~GxrmmR zZIISJUv3OFoS%=Z&#J*fR}GYp@oM6|3PlqSd_<*{m3SBW-L2=b$ela86Z!1>Rqtiz zCD?k(vS%J29*W+sS_z6Ajlqh(K=*)&hQ<|8IbdZlpfdw>R6BOW<}muDNV&B4^uO;F z>=~=0aBR$8P_Oc4RIY$W?>92ZD!y^=Kn#(nnBXP*_C9=kX4(Fo2PvfApl!v7<9%t@ zG@-=pCqc${#_dsBEgFsp`RerUkLa@yLjPDdURe=lH*g&$L3ImN%%7{!N)_1PM?RXm z9G=&D;ZkzirkAKQ@Df!jrWt;6X%)dIQ%Zw}pbEpD?4-#ypBrL{@cm4Z0EWq+SZxxBBY z3&&?cKO|g;A9cNVF3~ijK7bRvSyRl{NUkA#UF*FbdC095irR0aXN%TS99_Rqtt9&U z%MWty?^tYbVHgtEp5DP3jg0fmh@tmh$g*$icgdt z4Vk7@boao;9(@dgWE?{V>cFm>)XdFCB?TMYr*O3Kh!7$Y($+DJ0`_NdDrnY$U!;Ru zTJBA3d^zDI=`Ua5xvid;?^<+zaHlVQRge9p+nS$Rm-!t?3PaYtv)T)Y)j@XSwsrKm zW+msQyKzPM=*`7L)(i61h!B*SGCqVKqKmM*Xp~b@evW{plcqn!$Ghr)^Q1sTNQN3c zrTVvGvw>Z601x+{V4mu{^Mg4BaiYMuC(u7}s!85CR~)pEOv5Ce69wdDYfsKOZui|Y z#1a{Cr`B$X<$RO?GYSbg$vZD8I)$*=(I)8apmPP2O|tdk=xV&_dsxv9;7_XZFML3F z+&RdD6)KOM{4QE8!|pka(mHUNOV-P1)^ zZg2o4ER(k+zB7g^RFg=pq$s-lkkYJ|o=p+Vo_XN(<5%btNMa(*qtv#Cc8gK#(3*uH z1@4%Q3O@>hZbz(=f~m)Xd$AB9KZ-9un7b2qJ^GKXQP_7>7Q}adzB+%2n+MKVM9H-D z^!W8tDMmiNwx%Yxr5c?*cIz+PfjwYh0;^!TQlX$f*I-x+GBjWd z(Wn>y;2_YE2?`PZ!(CJQqaGV|t$`F5m)13GP|W-LxUk!p9gud_vkUlW-ekU|#n)O|uO&8|DSNdSUcJd)shM7B zCe>0vMl`p9Cw0I40zzSzwy+ z`YWE?jgO`_yRWL}D}IEuIZoY5*kitJ;ru6pGipe~>kL{&_t-78K_U(ZXas%nf(io2 z*d16mEiWytIk)%yJK_M6Gl<=+USTtQ&$tOvIwo5H%`02va0X&r{({T>cq;oR7=}K8 zn4xsVa!^?T!V2sFTFeths#{Yb-I;#G$M`}40x@hfu@c+n9-iR0XuN-37vnyiz` zJ5}mNq{I>Czu5HN>X&Qar=9Rpio-peIQP(0KAOvKP_RC~6zN1BXFw|w6KLCu7tITV zE4J|LGzLR&$BtUmL=ZN~f*jBu;!AJ`* zE}(Jh!-prH;zL}Qpw}0K62nL;fJ7VU>vw?j2E^2Y=;5Crg$rbDzKf3jy-@gY=q0?d zzU~A1D?to0xTeuPml#Xp4Xxh$AKb_>OVup{hbWw{-+orDW7YmOeXqp*p2diw${cX*vt!t7Kt>yO-49Uil&F$Rb z_kZpz1f@AY`dI1Um5+5YIpNODA#N=0TEpo_USv1KG@nd%Gbn&)v0b^ljr`Y2uC48> zBbmOB@@0YMoA*T12Ogn5f**m^>GWJro^G<-c-^`RTiI~-kDSYAw0788$)4#1^gQ4` zQS87#;|Ze%CQOjR!|(#_-xa8&0Ue{h!ug<14CKiGfU(Gd6ap#Fpj!wR7Z*f{e*^_G zF(sD&`RLdzx@a#w0lbn(z^${B)A^MtV8I}2klPG=6d<1!oUX7!a8NpS*?CM!E<#{> z(be*U!j?w`!~=bxyZigmRH}?0AK7pPQAoH)t#ajPET$t+yb#xps9_f8 zvu=<9!hv0lQhU=Wx=rM}sa5;$nQ8Xg8U2SC`?TSPQjn673LjR&-WYthPzwpuGL>(XPi$I*#Z-I>`?g9rEE#)c ziqkX@K9nR|W@l+>mo#)XGYR*MRpANTbP6}OsBf#+%`|C5k9&|5KEa|lX_L>T+0ZXx z-_}e?HN~AiO2J=wLK!>2xK>5Jw`Nso-0kDS_*W=@2JH5V)P927F3e8euHpZcYY5z& zL1cQ2RlS^{c^&X;1adnq*U zn=m`CL)v3zQu0xpB)vzzxzWDfa1~i+4insT(J~lAkbcY(8_}Siaic}}Nwa}GDDyfxb!S%a>ih$hFcgALj+*v2fi%gX4W zlarJFI0EvR5=O_S&~iYpfW--5{6I?Nr2yL&c^2sU61AB~3&zv$BLf|#l<%~FJ$j|e zL=c@vvzUho*)Oq8xc_B<{z<_^zY_xAOH0r^20k!+3G6j#tVt>A z7izVQ*-`p0e|ir8MwXV-CzOzbAPHuE!nv-JK(fq3@EHDGT7JfTBq*;-TrU;jy`gd> z$MTIqfgL)x`nNgcWh0s3hyTD}K4lnh;>w;ZkWTWKjDf_|G^$g^Y=qgDVDgiRB4f{w z2^Ohh&P^Taw^C7Q_$PEodkye6v`N)UBL}Y%uEc)BE3GVbzB-2DdS*z%mUQ$hHOjU1 zJkz^8!c{95P8BhaWV6k6nJ}iW>t9@WFR#72(f@8v^}*mPV&Og<4HM)4V(+cPs%*b* z(M3uqg3?{mB`qz|4bm;$AYCFLh=7DhNK1Ej35t@^rG%1Ff`lL-vFGylefv85+NZAb z_gVk=>bur@o^?NWj5+6+W8k9&w^h1e*-{ZmUPk&-=@{{UlB=tD&v#7!9Z9~wEyo__ zKfmj|F6jCUE9TCMh+)f+?5E^8{l2sYil_mKGYScz&mtLsn+;b;LA9M;hExx7+qgtL^8iAIdEcPYMIJ4?K_K=eB{?Lqb0vv zY4ONq4*#?$UX~U$E1YHv-PJ7O{84qGo%6F0O+$-S$AaHzgA!xnN@pX1>PG_S52l-X zdJ3<#lDHzw^zw~eQ?^vMUDMKH8n$(wGja9bA)QsVag27C37dH&m}p=e6sK;c-?ui9 zmB9Cl)Im%_?xB9Sp564ILUQHwwu&lc!nW=C`OlcN>mgNt14{2&`K!o98|8k_p>_*S zo-xc9^lxS)Ax&Fa-;Ux#pLuHN|EwWX+5P?cvF|7H2Y8knN4Hto;_=a4CrLh03P|Tv z=jNjJ8s~zRh1hLtz*ERYw8diks%ma+sVExz`LS;x zrIPO3DHaxsx9O)tP4_9eqk39EesisIytu6S^!csK%8%w6hZPPD->{{W5>q2*9)SIL zU&=#D1R`{RMMHzqrM5L+Mvs>64#w%NTg~Ir-FOp~RzmJg2xWE_c{YXHb(?*^j%{Zv=$KSGUe1p4-nZ~YHp%;aj*)%qCKqQO&arrFi)f?Bn z5s0^gI8!=dN;D^rlge1Kt47qVeo9gW1b|r!Z1T-zQY^pQy+m|sl*vyV%_?69C~41b z-QO*sJsERee|NnhbQzIU!T!Bw(Bdw!TZL@6|25IYq4v%>TIrS9H}(zghAMcK+6xyH zsrc-KiI6rgb8Qe@i!(73NnqAwy;DeEm!!G+^0%=2^g0?M_dC%zHWy;B_UGtK#40i} zHMh{AZ+IIJd?$|ap48t!A@2L%Qo5b?v2gPtCqS=BkNSG(Jqs(8p;eL#q7?A$Z(kEd zP1PK|DCbp7SUkMTfp{d8mX^C_sQ70yay1lH62a*UwFviKzETeK~GF0>wT*So>=IM1@j<--#tnq9Z zv&#z^T=S)rSj2l|)jYGZe=0sD67vj!*t<(8`$6@F#k1sfSl4AgB|-;LQYL8Ln1=29 zl5TXMvbc`ki^7*)iHImP2wgr-E4(X0PTBOBdo3E>yfr4>&lR!_0O*>YkcOT4T%MnT z1VK6In{PY~DR^6h;9<)v!dA-YtMN?}iy!;bvy`(C6swUz8fCZE;Be?w-lLsxAN5N# zM`MmeOm_~Up-Hu~kISKdQb=ckQu+Yebm`si4qlpJqaR2AevhJS7Vwbv5JKwHLR+b{ zJ4=`gvHr%7<2XLu9zz0cdW?6b+eW^2mKd%0HhNnoU%FF|DQy2k~z$Vm~4ecQ8wk55Qy8BYUm z*%h&GbgLm06%~a~QPWI8a zaLZFP&%k5GMR2S<^54CV?iT#G>HD#n=lx;1m`S~NkClJf8@y@dlx51+uB!~NB&Hpo$ptj0l!HYojOk@^*bSL|6d6zW6<`?-7=a}9# z)EzfplrH57@f7C%;lej2M!&swv$|_|PSmY($F`3kJc8pbC!QhytNZw)M(+{w6~SK) zi+#KWjMv?{FZP|+<=ee}q#b|EpuF1^k35n*e2`w*EPwA__W*yux9VW&kqW-o&2P=) z{apM8$3N*^R=wYQH`VAmBia~U(#fs$y%!BZyDpn4V(z~^w9_Ve&o_CmY3Av|{L)&L z0?|;Mm~{n*kl|LG^zxSVC0dX8HT4Dont9v1otEC3otG==-X1uVCIF_=6#2fI&y9I^ z%-4)Wy=jCs>Hz#L{4z4kCA=!qzt}taB!bzBql7QM)0Z_s#iWcjWU{|^9{c3i)%Uu` z2N@97Lqb9xqHYb-rk~6a6xO*D*D5d;ao!A)0*W(UXyr^Y-vuefp^F zvZtN%CqWnQqXScA=#vh_byw2wT$$K^vlL%!mbnxC%dT|RrS#tHZs+w^p)wBBv>hL6 zw11bR9SA9_{&nK7d9ht`H9JMk8RR>b``3znar=+*cMeuu%9JJM@63xsXJhJN`4v4o zIyQqL&s4uR^0^492JF7+RN)!@^L71j2h@7OgE$>BeqBUIUZ9|Jz4lyK`gJc@&$^m^ zJ+q;CdVV)$(4@g(N=j7acD};tP@&Kur(pV8<{zUUSFH+%Ujk7vTT*&@j^FLP5IH7w zOq)}qx@Ug3^JKZkhv4@O@;L92g(JplR{mA&=-Li2ShE>@6!RYiXHhT19j5kYN(sI=G4^ z{00~P5BOf6PDSV4ycC(k^FM)qD!_m07gz!mS61TfJR;a4Aa(QtxemBPu=sUy`bm3L zO7pP$%LA0Q{fHRBQhuG?pm&eMilwR^Vz~sNp)?zFWz68vg^dm|8P`j9%dy^Ia(X0> zMHeP*@%g%Cr>crc>e%p!y+=QB9a@BRE&=$H5bfT=Cx512^wm7)LL$IV(_o>3SVQ)~ zawVUOrghY7f$8^u*yc`N@}H*G&KWWVa6I~{)9rT=1F3mulN;HKmcM#2dylY}&pTfc@W4XkbD|=e~^zw@HU=l7Sud zTG#_6_sOrz_w_6&%BmmW=g=Z^!ZE`8!j?PSIP-p|`rvCYK~7)%PZJK8ZLVb5aap zeh~iLvxPk&e^2|1L%0SH^d94{hdscGF>2RtkaKyM?ui+0)$s<=WMGzA%tbSvTjEk? zYU@OQGFDzx5_v4cejW1`X`5!zgppzl-Gn8t;ry)&JONT25%vg1?Cf;RR2${{=4$~} zbxK8i0ayvEx4MMSY4Ipiyf%`;iIprTt$*K6nyq*)U6%e|7;cP32A$`a?b&;C8^RtF z2LeQJT_;~{eM@oQ{P3Fl`Ssfs9IV5%C|J}tpPz1sTXIj3k?ZrO?Tx7w;aV9D5G{IX zZ#E|C)0&w{>A%#$Q>s%|P8Be$i(_mlPd|>t^;#GMKH!l^9-0D53L<+#Xc-qJ4)IbK z3u6;>F#LdL*lmI#ZO8bEH->59-8XG34bnsLJ39FTwA4zt33J6$hO8Zm>~z5mB&^za zTq)NOfXJFZJGj1WIW0ZPAHzcgDd>{np#JUGd2Zq!sfAE!(A8OfsFSpgg$SD}=)9%rJ`pAYL$)=dQhaVnxmv+k6(2V{U zC0tq*k@OysFRv|5+Ol&>xuIY^j@7cLeM6BZrSoIaWi}RNha&{!mIiY<~C1quQ( zQy{#;1${{LeEeG`_YCuNCqB|8-YI$DuQ9M)EvnBZ8vo&i%v}tdhzVTnKN0;_2Bk01 zUkMT*q`r1&V835`ZEhQgSZqvl3bOCKjeE0$CxH&%|4gTkn&^mq3KB?B5-HtU3!n%QGnblY1Rh9i|F}ej*($q8`~G50n=Hu z!d<4W+LjkVh|q~EFEq<4Ha^jFW3Jwr)qOM?MAyXXqn60?$6v0UFy#Gqbb{j1+S*#4 z2UnP!D!_6`v;#87is1=~9|>oYc-ogYgB__Ny-^`3_v${XWt~Wimb3fQ`;iUXxYI(c zWdlj52wQ_^0;A5o&&{o0?FLWyAkGlB=t`OvYK=ymHm?=S_@_2riWuNyGi$Of8I}vU z6lft#+on(w0_{5^at5{8q9^T)x#@#T9BPe!OKe3~evUA$n*ue6^#>%OA zp>2o587}AiNR=N&%6?F%HYh#h4b!#ToWy^IoKT~}$m1fy=QhX=g%`R~G@q!8c99u8 zN714M>}|!u$QC}lY;IkwRL=Jsi_B6u_F^#=HT&nEsH6F0--Oou)6(CrGGWD zyyEj0an6#Fr zkKHTz+2v)cl=pE15>GmnL}yjuXN4bD zgDjybm0GC+{<8i$x%#DVPt5C*C<-W&5>PW^S*J;CNF(id)95e4vISjgm?~3V@r>RM z4J?tC6a5_?9@IIqjiF>e%Fc?Ys++Z{y?g21R`WUIb;ZWj%T2-}f6I3!Qnw_YFO^0e zenLQEWWXuUUjFs>5#7zGj6kKd3QDK6C-3x`c(lyj-QZC5b z=J^YGg~V_z)krOyoR8x_tt6H_61TTmyh?NJa3NEvlYe{6sy_H8WoK`>P;^7oK7Z|# z0eqUr?jo5Gk1&|rknVQhnH9i`V7L|e!30ZsX=Z9_%~Qm+Wey0HV9tV6`PF1$h0fNw zKcID@t)~~!7EkwB&y~YvV63do)?rU%PiSbv= zhN^s4&8L=_9MrPdSi7U^+BAchZF^>lP6B~LXIZ4O8uWB2cG;vP5}uHM!$~l#7#LWsSStku-lDN!lHOf?sxq?U5R~Xrp270F4auzj|G7YOH zR*hO1|E0>TW!IGYeK)ROFUXNA13oZ_SUYcN2j=3(a0=6iNZVasj`WDSaSu{)zX=j8 zI_SQX%rn1)x0XQSGnIGp?k}tu@|S;MGWI0Gzk**gK?NEeEEYS|uBZFp5-p7tejU$J zo<+gGLxPQs4Vh8v5o%lDdZqBe1Ow*|@@&J-z9F-s4Vdn|dGk?_rHZn0^vpI@T++?Z zLi@BrYhHt9TyS840?v*8H|{_2WmpurQp##-?!tHRi$PY|pus^03O{nv(xF-<*L{fS zrswAa0h@!1+S!rjJX2SIL@Ha-)^Xf304EqFxw*N}qt)0bI3G^Se;vL9+Xs8& zIfylZ_|F|CrdXNoB-5&OH(|Pd96SB$DFA@@IA~>4{-+1l2q+J^_T4nq(`y1Z8_*5< zLnq1GdgnhY^1~uHPX*8#PEAhwc9>FgTfI(p1qG;)EODgXKi|{Ka}BE7H>e;}3?q{_ zw0(rMkqnth;Wn!*DZ%c4Bzdy<43K(LV816b< zp!G{`oc*?U1x#RlBu*C5qH;}vVFnHuJo(nS=8Lt#SakdZo?c#%*>}?mdH!+x2%!1# zH+OjZB~Z5hR+CpEJ!GH!F@Xn}G6*5X;~IU+Endy5LbXhAoL5d*l@c{CL;5`PmqH0m z0CJjLq{qR4)nE5}4%!_7@fR#qcR<(yN|U!EWsspGxOPI&f;AKU{2EN(`+&^@p>rIM zT8wm88nO|e;Wy5R>R|{&wP%hKAIs(26{yqziDWYbI$f#7VBl^dHK{J34IQ6~N}TOe z?xSwIaE*A#ox2HohmE=c9Vf`voEa+@e$1bjhv2tSTvk?RSRs+E&Xy!`y88O+H2Vtt z%05HGFa{dj1KhnoFjNXvHf`VX4c3D64Kx-H{`CHvVix2FF$A>FvvYDfLjerj40MMAtU>!Wp-E+Jn)aY1Pd7VT*AkQa#0@#uuvI;4}yL zz|~h-_WgTU`#9ggJkl<$=Vf>gYM``RAh@iZz~xSX`IT9?3D?02{{W_~@PdNDFt85h zr*hse-VViz;Q+DoF&sL6La4e)`tH45aDnE1P;r6!61Y|3W1+*D4z+`&O1$vv$a~ZS z<6-b2gqT9watsut)WE6{I5;~wSF7GHOm+RM9fPvp1Anic?;yXx)f+mS& zWtusUAsu0ReSN)KN-#e817w91d^eC(KKP37frmp-Hnkba>IghNfHxs+HFn4>!kGk- z>%FR`2Dt8Wva;cA_+c8po}QjaXHEDVXo5<(ul9jWj3mq$S+E8+dv_kmoTdLR@DzOO zx=k3!osnB*7Wh+^O&J&%EODXJ@hgEa&0E`>0Z>Is z2iIWsha z5fo-qI1COBv#$cja%JJ?0snT1i&&Ky`M^y7J7&M>Lx-h`1AWQj1f{@o1`pK`x^-AU z>5@+`b7iA-p^Rq-ACD#m2X5Afg<)-`{ECn2rCEG|{s4-QiU36IELwf{m%-Q6@ZSrS z8)$DZeG#-wOg8XQ7~UJ`CEyhZz2&jUc|zYFzmrA5-4Z_aqy$Xu-|R&G2pk$7v11 zr1w!?c9gfs&+E0tat^>*s+196fQY zBhW@hBBCrYvJ-OO_dF_#3t+F_)7$%yFbP^y>|&vy3fuou1zAoc3UWgB4Gf&^xIUc= z7=(g&x6bvn)$^dIpnn83??ce&$`tWpi!T5JXVizO@^W&+*|0v&Phl4}3k*Dm%JA~a z3RsbMn+QnXCPr@N1YnCha&v=J3mB0iL+yfM%s(^g?!ZZU@TT)+f@D+t8 z^1R6@Y;^SB76JujMnyQln!ZrHaGC!0?OWjMEZg6}6hQ6Yd?in<*vthFzFmMJ%!LMq z)CItqDPybMr10dwS;3ffTz&cWO)Y^jt{3+Yc#X8WuMKQY6dC>7jnp!P z#}1D`2^0|w7as<)79HJv4&tk2l1?m{V%XP)kVkenLzo=896SG4fFuAyp+N+40-)4W ziPVdz*Z~p@u9H1*U<_Ou;|%I-WaZ>;p}$|*M`9G%6f-2BzVq4I;@j47MM`P(w?z2FsJH}AlO>%e;uL|2Nv8U9QyI`@q5trxxT*63{OnI8bK0r->&w( zT{sg-S|RWle#6-<#LrJD5qJV-&f>10e$#_^c%?BJmN@KyyA#9$0E@o$dbSv&#XR~o z@sWs%YKZ4^3(TG@@j;%?a2vfmDsaQl*n%7eBO?^^o|k0M6$wuli?ITt0&sqo|`&dL86GE8=62Z2=Q52UPfhQ1C)xN1Ee5uuK!@fCvC^k1-I6WbYX`$3jhg{Jq(g2 zWdNiQT|g!81XzJoz-8Ah$FgEgV0R z0De9};*Cr}6R!L*a*#k8?L7s5S}?5hQ^<4&LP8E;WHb2J8yn9btPSc_7=q_iGPfD@ zXZHL!)SW0&Cy55X@8&-}u+KgP{uva`paTuO$P5=XYXQOm{C-xZSJi`&6R`_(4bI?u zk4@B*x9nel5%G=R-^HPClu(vaA57pwF<`u@(yy8zg5r3oYqt#Wbq*d6%r;*DZZNq( zeAg0u-z=1r1^}#en;7oFnQ_{OG)L=W2$9SJa0`lBy+9B)H8Z<{29?y_m5cLr$&uk< zT^7Qn3J(>e*(9ooZwG_+$cth))-(uZH75s>*gZTvNQ@(HXkz8W!=veGg5l8TNCp7q z2B8hdpagtBSXTX_4)|FEv*LMMJ;7ZtABX?M44nkf08Za+$YAOGhY*s)1zTRW?{Z`> zb%4`>n7{Yt_??tN+3)#zd1wfbw~0l5Y&IK7B||^BEvtF*79njX150=XY03?d3QB>< zVFPq}4qye&ls_Orn@=9b50jKR3%-~+3pO4@VCn+m0->t3>};gJJCgs5yyIrv>#2h& z+}R>tQH7fl_t|*%Xb=EZsa|NngTPxYf8|Itd}+%RB>?DwDQj=&wGu4+XtqV9MZe zWc04Nvqs$lQPmutJ^Imxh=<`QT%LRq9iZz5MjTbe#aKRccBKD_phWw+)n9L9pV+lK zIyj7PdIBTS6zEi-S_SBZbT_Ie7X}GwA%b|}{sUX8fr>}W&60^69z_|Q@DK-S^#EphIkKaPG86Yxb z&k@oZ&}Bh{(nZV@F3ZBnd4Ys?(97Z2FuS_CN*7cOn-@eT;JYo3m>)J-h9_Q`o+0K3 zLxGC8j}L3HBmFI^S+Az@+?S9F-4%jH!x(u;ZJ@~ssdSeF)&*&4tsoEh-l!Ti2e}7;i)IMtv(J&Uc>rC%W33&YB9O-zTquO_ z2qHjCQ;3fMpdqp2h9QBpoeFZgR)R_(SxnCj3c>cjyKP^zywZa}hKMC;07#g3>%5@o=5cR1Pu`j47#vtTfRTt*djt{hx20KqyX&X$7_#6)kPQl_*K6TOKh(;l z*Ai5J!<8^SJAR`@k$}Lypdv?j9>~?lupGb(LJZ{I;78#(F=8$M5JwxqrVqm>$ANvS z97tuzMTyKYWE~y2#A&>HWMVik#XOhF{PJ~5YpAGaM4c8ViM37`AybcyH32J@`YB4I}Zh9^H%o(R5*y*q2lt zgE=zodlg4SNtJ|Nl`O^TaW8<2j%4v8m4n~}4iLnYE4Q__4zJ;XF^Pi;TMRZ*cNqdx z_E!qhL95f5>9ylpG9Q|SHD_C+dejiXFv?!`j|p;b#4LykY^~Yb8-Nw?K_cVnk6F;2 zb=%P%oLDtzF*ry}EFa$`Fe1>@>~`LAs94BIXBrt88cZsBu`J_vlnZH`$rMbvPy_F+ z!*Q8hFJgqAtcziqn2?YV`yEzQ=az2^UpC9N2Ony|x4~;B8n<9_auN>9hKQq3uzqYQn)_wg) zVvlM%n=yhaX{&vfzr)&gO)c8`ArAVC9LA_&3wSkc4P+(}B_)rSHxH}5c!Byt$)HH> zc4%26ftFyRi$dfJ#Kun*tzY%py1Kg@Nj{0CM==NlRkGJX`&s_D;ovYF0RuPeb0{6B z9x%O*QFu)!jio9$rw(SbIC1@TaV5A8QvK1^p4U3je)6bt3cA`mh%*eh=v z8qnU><>l2ijBTPKUL?5crx325QgAk8=GNy+(JCn^N7*?$wckA#=i+2%mt`C%`;vi4 zYuH4`UTv1agCk-nig??t)mkTTM?I7O{L!Os0xGHt03~r7tPuGpbk*qEsn9Cc0(UY@ zot_XoCl7hOk!Q~`xuSzS3U24-(i?8Kd8ivFi|A$lO=MBaP69_A5!bGW8?JOPtwpMe zT84**yOPs=(2&z$i0CL&Q3e;Xteb~{&>3cE?R#BzekKjV zsqt~%56qi*!cU&5+U`8BH;%S@C|hD$^*!E8{L@Ek9sX8{s`>cInunQLj8$$JKI4pj zhKU~QY9t6L`JG{Zg~6mh6p0z>YX{?4UiQe(Ww8HDH7%{nPIyq+(&7jBgW(Br7=GGy z9>biwOGx>QQ2`HMiBg%Np0ItGSkv@ejt`wTu&)0tmK=6`WCCT3jb&^toh_NAY|AG6 zF79;+`?(m(JlQPVUB`fEJd{dZvKkKj$1U>^>xcbkEkIqIpGgIxv+d~$IA}wqbD$#t zdrUL@pq<0^;(2-b@NjG_9)1a@=oEc$Y|p^BmNNUCVcB=%8W$7O=|WYa77Cfi1&XW( zVz2Nyl85|H%RUtQd!>n~kz!O-#2aa{#UT)O6`U2-!kI@g9CHSxrZX&r{WB$NAMQIS z^ec^DJuNkBpQ{U3IG-B!e=;H0U9e|4}DOYjt}(jDYGW zT}WRhea(+3sbX7jHFB9A0Ct%43v&}93N|gJLutNR@oKR{v#YZiD3(NhhYRlSk zIaW+sd3ia+CeS&;F&`eL5h0t|_R!RT?Iyp!mxywm)}+R)LqF9k1H$L)@T5QqDqe$y zKUHO^5@P`(e6Avo59M#}86Y)l|2J&&-`QdE2aV9o1L)Ms$_nfgu3bpT3kgoSD)i5N z{jQQlV4r@)?l7U2WBUDe9=(iWZt;uh8n?$BgOJcV2BZvGbzC1#xZ%gF1yDBY;ds2t-5Nl8`i64-#jOknHC6Tmy$= zfL{QpVB&m-#v#a>sY{$%HAh393mcM`2))yn7o zeuyE#b27+MFrC}D8Gm={!!MX_G|=x1{fv?jEkWOP3Z%DBkXBM>TkHi|_ngTkszaf; zg6`UEcm7HV;xX;289f~xcmO+4yXZur6&HU3C?1MFEf8~mFj*|lwfBbzh33icSD!3U z5GC4Y5E|WMX1-=y20yd9E8JzYdHwM8cz40E1`MsD=XPSFqd~VOhzXgaJC(_---Or? zS&D#o11wAOz)pclM8uFQ1)=~!=wg5p6*BF94d6cYlC;nY0hi}9CJC+-r2i5orX7t$ zpfG;j!NI|K&=oShSFZmUW=sV#_>3ZgIfd<0bH5b=nuiY`K6vnK!Nt|i&d$lH9a7a% zMC{0T2f`vqoVssm|D2b{%DD>(rZf#?IDj-~`9xv{?@WMhM12kz)r!(moOX76EPv*T z6Yxsb6@3A~l^@`MkPr>zRFLrmQVJfDO>f@3foPeQf#E^fRHb3V7E%k*Clv~PXOQA% zDugF42|pF-h=j<1^#+bukV$ujgm>VHn}IDjXi< zP4AFPG-x*zBtc{yGTz*f0EE7|u4oBMGN=WdLD&YVTgbYi4{sd#qq%AlHb71q zqC$lNtyc;%nv+ljlG_%AggA>8te^fDgKLHP$k*?$K}#RP;TQRdQTuR1F~i27(b4>w z`UMZnwJdxl;%LPL$gA&ziJC%!m5Rz|2-1Nr{IV@%j#CI?1WU+JLjD{ZQsq!p zWe+TcKmKzq<#7?UX0xo#UcWx;{4ckuJq}go=k{=9@ttcJ;zdpN1D~)=a zxO|`b!*vDc3lP=y4dpD7;s&NxH+@)F$o|a@7AN!s$|UKJ*gj+^=y%wmk6?+*vK^|S za7*rcBHKSznxmy|BeR_N(yv47jEyYP(Z`|+wqNRayH?XMOB)QXhT$&I!U3_@jtA66 zkJk}p?D>kDw^Orxq6VFv+`U7T3KHX898+^Xe__Ogb+m87dCa}#0KQ3_C^YTH%D`T(=ui4x4a7uSR2&H3icb#D zxQ%RC#*#LY!u&d>#q@8JvTE8yw=BB6ZE>ISFL)uE$N+s1Hp@pg3|acGxQm|i;0)Tc zOo}8lV4Boa%4BC_NhEjQL7?;M*w9zkKU-qwPwT>BOJ<)URq_1(o16Tts4Rdtssa>!}-7wH!J!kx7OHc3lkfOg_&Rx>X6UK_|!^ zm=(eSCMDWT1lWuYsozbzf z*6PpSQ$lfW*ylTHzw};#_J9N5b}4_->F+LV)05zq6<2U#Y#t%OABh-orhX(viF)oP zo_A^>=@uCj3wNljM-d}P-fSG(yM$Ir1I@67bcXLc7uYnAH6yqR1FyJI>4dh?hEqwy zeHJ~u1UKP@4tL6$S_S5|Ocu|-^3ADI^r}{>0$$%HwW&u1(<8X2Ui_UKbDa9Itsev8 z3+UA(22P$MMjuI2FCJMyCJ4DCmlgRYS{$Okei)4;!$A8;6*qAsuq61}Sm3QL)qqf% zW%1t()!o}n`H9;}(=nJ8J<`G1>~j`<$-e^xxY{OqM+lBKPyVa(8S%b)I0`e&5?LZv z#iR^HlZY#t$wv>9p<_;eDH5SS=E{CExM|pUBJ0g-2Fyi8BHrB@re~?|vI1JKlqn7; zfQ9RMCBI?a#c+JGrOu=O51|7^8ZVhqj)G^k_CLXnWKzJSC&Yh@339N23f8 z*Kb&`a<+m(b6)Jo%e7Vn}*CK(*R#jY|D_xi`qQEU?b=uZ!brGa1u*DyQ8y z4Jxfp1_M$0XI)0F?=*FRY45zXA-wR@zRl8f;K?C_{BAD+C>Lx)BY5Z8RMqpp?;+ax zoNLR>FR3FB)MODDJ*S&$vTed3kJA2zc&O0HwuGN1=L@m=ncUS&F;0L z|6;|UNoYm>XR_#;mJzL{)kyZdxa+8GJsc#mg>`@Uxk}r6W`PCL&QEzCQQhK&dBmNd zjt0B1)%=u>>NsU;XXQs}HZQc}`WUKwXhH@tkQG{~hX8DuEk^Yh@y&^-HxH}*OA#3dvbbsrM>j*0C|K3rFsa8S_nH1!V z<_)EMpsM!N6`wx5(Kv@{$KT~PE9_Mj~n$!bYu{fVc%|cVsUm z|EzC2L8{+BZjGup^ps&>Se{|m0hb7hYmmqu1U`otGt@T82$@d^v>}wNG&w3f& z!Wpl7AiD&|ZIACOyTBGGyHA!d@N(dQI>P_FkxR1xABx$&q+RUu&6F{e*S984uA3^U zOkiN&ns`PVBjf+7MUh>UB$b1BkhVJ^G#H(v-VCZ^EhM58L*;_6=bnP1X21OPW^pU# z3`;-1qlnIDo7PMU{)_ybv*C`PiwJF*hZ^jdTv*0alq~O9U^CJ--v0i2f%bSanHST! znCtdURjZGUg+f{Dsx%QuWHZWzMI=oZv7RM6_gUZi{bTi9^%)jxk2FamuE?dK<9W4? zH$yLCEWEhXd|Fx?W(!xwOol~tXO-CnhqBQ2)Ynkm>f`pltoP{>`HfjoX*;+)bSF-d+K9mnIU@h;F5& zaSY;CD1dbfPUczQeM27k+t}j{ZyF!i3F~mI<>u=Qk_@n`@erU7)~2rD-f^$v+edin zvL|S}gej@A;(adGSuJ%iE8XI2&rwKd>@i+1YiVHb5aFk8qs`c|Ij*^^UolPo zalK4&BTc~mUHOC7M7mZjzVLI4N9_ex1+8&f%bpxB*4OzoC4cM5D@I}ScM-T5;yFz@ zvTk~YTa_J5K3o_bJHQyqN%Rm+2IWwj9#+q-q*4nJalO>kN{+Y5k)qm;lv6=-~04~<=SV9K*QOn^USFPMhd(e8mN`l zzy4gB{)hA=f9b~idUQ(n$}Gf}tDwY&I{_@6J~kwl>nD&r)V#}|q+EQ1lO@^q1HwhQ z-{!!tl*=k?U)$pTH{&zuyoVM>50fsxf6|sIX?k5}y+6ue@wt}NjUcE?3Y&ZHUDi~s zomR&6fuop;A|>W6CCkHK7RF=s`tLI4`~=91*nSXdO%6PklqyTS~+cv|`#N}aHL*ZBm(of1g)III~E*c!xjV`RL z{rko&_#=dGD^g>H&D73NfSP`^!NB9pRJ2!+MtFoaQQL37nIR}7LC2)sFfZtT+d|^n zKy0`4vn}Y?d6QW;q0~Tws8D*hJGz=9Det2*khD{;o0*HqG!!w|{ZqE*`CdFlofx-Y z>Pk#eP3^PndjaJZV{s1lwPG5CVL^WnL1n`S?E5f{|MqGsA%d}LzT7j)&ydf#V*)g`j&Tr~D!gAw2xPD23WVAgvM~`&iyz`W5Y?isNopVe-h{?$G#* zL0J@94x3zbM6jC}Mh|7M%L9`~OZlYa8ilmFdb*g`4N!u4k|O`OD*EA3Hb!7;>&{#^ zTh)5{HWHJeeZtt@m3k%2SBwK|UpnX{eroY7WHV}*zT~5Bk~A;Hx=)pj-p=!Wx$dPP zF1)bN_Br0^#yAOGp3FYRzbLUz+l<{t z`(|U`XqMU8#H|FX&F#K7t$pbX>Sa2Ir;y&C*~iXP?+mYADPMSd_P*BXZ3^nmwgjAr zv`-7m42YYe#BZ;K;rTj-BBW*}O9i^=GHy;Aii4bwNb(wDB!J+cJ^0_&ryPuPAE(fn zFPJ$Y+R?~+gajw^aFOf1e&WP7JC){jshQ$*d{xPY=CY>`^Ac0-qZ>*|5q3IhUe1SC zk5Xg~`O6Z9w(iQNViTA*G;;mg7oi~;QVQR=bZNBKzZiLeAGfbm_szYVZaGa^RbJsE zrcGn4;OnH*Q^QB|H(kW5$Syb*Zd%K?dS4I-)4jl#-Y{?zC4T0SbIGkhBPFRTCc0Fj zjjz&ixyqC3xD^vK=DRgMtufg3szUsr_we*h-jM3L?V(4saU-v!fUg&UW4aSRY0xq) z5$fWXzZCu3X{yoa49|Yx`|z;sEq*x;>Aik!=4NOYn$ckpOF~%PDII-mcLuxpCo=uVg;5DPK#$6R{^G{TJ?bICuF z6H*MN*^<&!RMgaZP*NA)2YmI{X;Om1QDJ2Ffm+#=SBkOdhcUOQW`- zNVnnjnC5b0Iwwh&b+SNU=9Fa|{);;wJ{I>K4ck2Vdp!}hICAw*_Z}qIPp^o3eHp3A zBlgYl&!@^v zGr80B>XK0W%75cGr_RO1naQB>ScAHQP|1lTPA3r-qRU|EbLXbsYFv^!g|l2xb$a0^ zwGQ$Q{Kd_iLylAITO>>r(WYXey!p?O6-p?uySuyh%HLD%j{KhC<6>N5I^{aAZ(w@) zdv+N|l$|G#ExXw>ZTeE7!E%AlUa#qtl6@wt&6;8uhu@zbedhxY#mk2?B08MkB69e_=F}P4EO3TvBcEn z@0GX#A|10zp^8^#G1&{nW(K3_+kX&P;uPc*(+i01t(%?4+fSQ?8-pISoNJy&m2wM_ zHw9sJkck`~_Db~Dh$`y=nosF$JtawB=m=Op-idjo_rOy0WgEeGTSGaA@slrAxHE?5 zuFLtSGVd)vY-A>Ft$i;Uyb;~r^VKY7!z(?)b9mZwJx9_~E z7!-Zy7~#E=mXskKtySCUHe{`gQ8P-=5Z?W3;O-=nzr#T+2^xwQroIQO{m=@b6!9)5 zc<>8>G?riQk&W#U!)hh5Y(FV~-p5bhqXl(O&OdvjdYoPNeKMl6^<-J^;Cnaazx3YkRx*Mq90jWSC>>iE_=lMXGO5m>*V1=~mgPiM7qmNKotjiz58qI$+v?6a3rssBuY|Z*VL)?po00z%W=DvL4 zxxP|d_ND*q%pbU5KV)-B27x917s?E_W4y>D*jdS6VA5Pc_W+d_QQM#uue=GWf62+k z$m*IJa<}{a@DJEq`_@wY_1K#>a)mT=Oq^ur&YRJ}(oTxMb#`Pk|A|5T{wo0+Z~HH9 zgNO0|^bc8dU)7{kQv?l7B89T7OvMCY+%_!tPae^>iT6n>RsWBssZ!omj@slPv|uqb)lr_VBNB zn^E@Q6skdq7*&X6X8D_(&+pP_U0uZZpW4wg$jN6Pn$UdUd;k-N>KYQ~?~g7u_7;wn zq}qx7EVLoVBZ&c%BYw&p7n8(Q<2Sxqo0=Y~9ZTp;Xb89RS6=W~i^_5os1(iAiauB0PMkNgngPQ)i*y=ljkELNC z#RP3px@|tqjdHeHj_r>6QX9FL0ToR_TJoEo6|qB<&sj(Ol-J zdth(#RS->rZTNH4qbqMqhdKEc>h1lKujev|@;fbBjz`Ay>gr{TX|F!=cwYIuJyaC6hamSu>?Q2`fO>gPi{eY#-8UhQOx+{grqGArO1gI+N|smnWk;F66d#s zls0^797HEc%o6vPzx&j0Hach-_y}hS>kb-jhugB>BAYy|vAJ9Hg>o&};HA2<$88h@ zVxv5n@vEC%@GUpafX+XE{u7*vS}2D*MG|V?Q$sN>OHi}EQ%aP3+rn5g>GEF4V@;>& z-}Qmf-yoT?-&`0aWzr9bfY)tJs)Xvs%y+}sE#yzGW23Y zRNl*0TAvyXWl>pm?`Y(Tu^IHZaMf~3a_5uF)fx)Jl4{Dz9$0(Qu%%7QR`?;Qb*#SG z)B8Ca=5>3wh|7)pjO@0#=*D?(`UavSi%En38^34k{WpHU|G(mQ%nvB|OPj;LS^{hJ z6)?}%72Vw}xNs^B2G+J78L-MJ2z5aG7FzB^*W!4 z^G}h74+M+K&&SG1y>3*zn@D#bLBy!{_#$isr;o+TCLQ=q=x+V2BF2#o4QrU4v-Xe{ zqb3gXrITE`hF{3Lf9aRF-t`C%;Yk5;C-bM#2{geFD+4owzOab-FY;$8qxrl@J+Y9+4iu9)Z~xSJ- zeAp>x(^)``J#==8;2Eq9F~t#6;n5cUdCrMIoM3fz4`16r$w4!%W<-oa-+L%jC;Oi7*) zZB$^3NoJxZWM26r!ZBY?<^7lW{V)*|y^|NX@Py>bY2) zKAinAY_LoBt@ekW&^%Ng4U%UAsf!XJ}j_B)&PI=iEdqEyKxqju* zPwyU{3i=e}4V;#p+zC7$^2Unf&5fcH5nY0D=vIYqo4VRmOfz$XAsm`g+vg<~8fgs(^PoEtK1OoNar- z9QilfZZa}#Pu{v+-e_J|ahp!&+T*)FZ&@HwvtBPg zHt$YEi2c^{&+Y!a&%bn;;YrSeE9Tu-Ps^`9efIYEx)*m1^VjAUEHppYpL-`@-TghE z-*3;?dwom&)~pZy^8ZfT|7*Xz;jrq#18Dt-Y4*TNNIzEm0nSD?^GACxt)KcNyN4^e z=0!~cun(8}vfKCdxwrP_v(wrc|5tVJ#c~@n`33Lav>@*OmKAlI?)lVU=3egB%<_1499 zVp5r}ukZWw?~nbzZ-;IKm;ZhEZfCts_*{A_@LxLQqT6+z$K~w z_pORuf8XH$dI8RUOE=%JuzeeE?%iF!U%v8dO+rgt59{04(^pq5URWQL-S=nP2mAlo z9Sp|V;a8?cOYi;9uH3kw;Mtc6Z0&vv1^>NSIs3ZabnW?diT8xheUk!)W5)-#A6w)9 zA3nPAtMsX-{ini1YIk1T@zwdo*$Xr0Uv{@Yh@&%cIG@eIp#pfbyPI3v-wB60L0f9~_X3ybSmgn)Kfj;# z)=bUl1aP7M56~5}z_n6wVaX!Gk=kp+zwcO;{{9|tX*zK6=EskP{kQL4iJdz?vh%F? zvERyZ>q1X=3Z1$3v@`j`y_??WOtmdr#g?Z0Pd^6jPq424_PF8_*8ar5g2zT>z>IqS z*v3Cq)|;wn z|2_?)QxOB{R8%;A{sEqbexa^1CpzK`uVq?j%Joe*uc-;HTN)Zx)0y&-xizl$<6bONsW5D5l`%K_w zzdyjuX;~7-0>w9}e}8}fKX9SEPu!!q%PxHW1X@pGQTQlwOAn|zsk0n$~^WJfGLP3*2)v5j(AF?%Yd83_#%N>gTe~DWM4f0AGpO diff --git a/paddle/contrib/tape/function.h b/paddle/contrib/tape/function.h deleted file mode 100644 index 8c9694d9a2..0000000000 --- a/paddle/contrib/tape/function.h +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include - -#include "paddle/contrib/tape/tape.h" -#include "paddle/contrib/tape/variable.h" -#include "paddle/fluid/framework/type_defs.h" - -namespace paddle { -namespace tape { - -class Function {}; - -class Fill { - public: - Fill(const std::string &initializer, const framework::AttributeMap &attrs) - : initializer_(initializer), attrs_(attrs) {} - - void operator()(VariableHandle var) { - get_global_tape().AddOp(initializer_, {}, {{"Out", {var}}}, attrs_); - } - - private: - const std::string initializer_; - const framework::AttributeMap attrs_; -}; - -class Mean { - public: - VariableHandle operator()(VariableHandle var) { - VariableHandle out(new Variable("mean")); - get_global_tape().AddOp("mean", {{"X", {var}}}, {{"Out", {out}}}, {}); - return out; - } -}; - -class Linear { - public: - Linear(int in_dim, int out_dim, const std::string &act) - : w_(new Variable("LinearWeight")), - b_(new Variable("LinearBias")), - act_(act) { - Tape init_tape; - - std::string initializer = "fill_constant"; - framework::AttributeMap attrs; - attrs["dtype"] = paddle::framework::proto::VarType::Type::VarType_Type_FP32; - attrs["shape"] = std::vector{in_dim, out_dim}; - attrs["value"] = 1.0f; - init_tape.AddOp(initializer, {}, {{"Out", {w_}}}, attrs); - - attrs["dtype"] = paddle::framework::proto::VarType::Type::VarType_Type_FP32; - attrs["shape"] = std::vector{out_dim}; - attrs["value"] = 1.0f; - init_tape.AddOp(initializer, {}, {{"Out", {b_}}}, attrs); - - init_tape.Forward(); - } - - VariableHandle operator()(VariableHandle input) { - VariableHandle pre_bias(new Variable("linear")); - get_global_tape().AddOp("mul", - {{"X", {input}}, {"Y", {w_}}}, - {{"Out", {pre_bias}}}, - {{"x_num_col_dims", 1}, {"y_num_col_dims", 1}}); - VariableHandle pre_act(new Variable("linear")); - get_global_tape().AddOp("elementwise_add", - {{"X", {pre_bias}}, {"Y", {b_}}}, - {{"Out", {pre_act}}}, - {{"axis", 1}}); - VariableHandle post_act(new Variable("linear")); - get_global_tape().AddOp( - act_, {{"X", {pre_act}}}, {{"Out", {post_act}}}, {}); - return post_act; - } - - std::vector Params() { return {w_, b_}; } - - private: - VariableHandle w_; - VariableHandle b_; - std::string act_; -}; - -class SGD { - public: - SGD(float learning_rate) : learning_rate_(new Variable("sgd")) { - Tape init_tape; - - std::string initializer = "fill_constant"; - framework::AttributeMap attrs; - attrs["dtype"] = paddle::framework::proto::VarType::Type::VarType_Type_FP32; - attrs["shape"] = std::vector{1}; - attrs["value"] = learning_rate; - init_tape.AddOp(initializer, {}, {{"Out", {learning_rate_}}}, attrs); - - init_tape.Forward(); - } - - void operator()(VariableHandle input) { - PADDLE_ENFORCE(get_global_tape().HasBeenBackwarded(), - "optimization must happen after the backward"); - Tape temp_tape; - temp_tape.AddOp("sgd", - {{"Param", {input}}, - {"LearningRate", {learning_rate_}}, - {"Grad", {input->Grad()}}}, - {{"ParamOut", {input}}}, - {}); - temp_tape.Forward(); - } - - private: - VariableHandle learning_rate_; -}; -} -} diff --git a/paddle/contrib/tape/tape.cc b/paddle/contrib/tape/tape.cc deleted file mode 100644 index 531499b6fe..0000000000 --- a/paddle/contrib/tape/tape.cc +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/contrib/tape/tape.h" - -#include -#include -#include -#include -#include - -#include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/dim.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/operator.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/platform/place.h" -#include "paddle/fluid/pybind/pybind.h" - -namespace paddle { -namespace tape { - -// borrowed from -// https://stackoverflow.com/questions/874134/find-if-string-ends-with-another-string-in-c -inline bool ends_with(std::string const &value, std::string const &ending) { - if (ending.size() > value.size()) return false; - return std::equal(ending.rbegin(), ending.rend(), value.rbegin()); -} - -std::ostream &operator<<(std::ostream &os, const framework::VarDesc &var_desc) { - os << var_desc.Name(); - os << "[" << var_desc.GetType() << "]"; - os << "[" << var_desc.GetDataType() << "]"; - os << "{"; - for (auto &i : var_desc.GetShape()) { - os << i << ","; - } - os << "}"; - return os; -} - -std::string to_string(const std::string &type, - const VariableHandleMap &in_vars, - const VariableHandleMap &out_vars, - const framework::AttributeMap &attrs) { - std::stringstream ss; - ss << type << " "; - for (auto ¶m_name : in_vars) { - for (auto &var : param_name.second) { - ss << param_name.first << ":(" << var->Desc() << ") "; - } - } - for (auto ¶m_name : out_vars) { - for (auto &var : param_name.second) { - ss << param_name.first << ":(" << var->Desc() << ") "; - } - } - return ss.str(); -} - -framework::OpDesc CreateOpDesc(const std::string &type, - const VariableHandleMap &in_vars, - const VariableHandleMap &out_vars, - const framework::AttributeMap &attrs) { - framework::VariableNameMap inputs; - for (auto ¶m_name : in_vars) { - for (auto &var : param_name.second) { - inputs[param_name.first].emplace_back(var->Name()); - } - } - framework::VariableNameMap outputs; - for (auto ¶m_name : out_vars) { - for (auto &var : param_name.second) { - outputs[param_name.first].emplace_back(var->Name()); - } - } - return framework::OpDesc(type, inputs, outputs, attrs); -} - -void InferShapeAndVarType(const std::string &type, - const VariableHandleMap &in_vars, - VariableHandleMap *out_vars, - const framework::AttributeMap &attrs) { - framework::OpDesc op_desc = CreateOpDesc(type, in_vars, *out_vars, attrs); - - // Create a temporary block for compile-time - framework::ProgramDesc program_desc; - framework::BlockDesc *block_desc = program_desc.MutableBlock(0); - PADDLE_ENFORCE(block_desc); - - for (auto ¶m_name : in_vars) { - for (auto &var : param_name.second) { - *block_desc->Var(var->Name())->Proto() = *var->MutableDesc()->Proto(); - } - } - for (auto ¶m_name : *out_vars) { - for (auto &var : param_name.second) { - *block_desc->Var(var->Name())->Proto() = *var->MutableDesc()->Proto(); - } - } - - LOG(INFO) << "- " << to_string(type, in_vars, *out_vars, attrs); - op_desc.InferShape(*block_desc); - op_desc.InferVarType(block_desc); - for (auto ¶m_name : *out_vars) { - for (auto &var : param_name.second) { - *var->MutableDesc()->Proto() = *block_desc->Var(var->Name())->Proto(); - } - } - LOG(INFO) << "+ " << to_string(type, in_vars, *out_vars, attrs); -} - -void Tape::AddOp(const std::string &type, - const VariableHandleMap &in_vars, - VariableHandleMap out_vars, - const framework::AttributeMap &attrs) { - InferShapeAndVarType(type, in_vars, &out_vars, attrs); - tape_.emplace_back(type, in_vars, out_vars, attrs); -} - -// Temporary Scope for Operator::Run() -class ScopeWrapper : public framework::Scope { - public: - ScopeWrapper(const VariableHandleMap &in_vars, - const VariableHandleMap &out_vars) { - for (auto &v : in_vars) { - for (auto &vv : v.second) { - if (!vars_.count(vv->Name())) { - vars_[vv->Name()].reset(vv->Var()); - } - } - } - for (auto &v : out_vars) { - for (auto &vv : v.second) { - if (!vars_.count(vv->Name())) { - vars_[vv->Name()].reset(vv->Var()); - } - } - } - } - - ~ScopeWrapper() { - for (auto &pair : vars_) { - pair.second.release(); - } - } -}; - -void Tape::Forward() { - LOG(INFO) << "Starting forward -------------------------"; - PADDLE_ENFORCE(!has_been_backwarded_); - while (current_position_ < tape_.size()) { - OpHandle &op = tape_[current_position_]; - - // Create Output Tensor, this is only necessary for OpWithKernel - for (auto ¶m2var : op.outputs_) { - for (auto &var : param2var.second) { - var->InitializeVariable(); - } - } - - framework::OpDesc op_desc = - CreateOpDesc(op.type_, op.inputs_, op.outputs_, op.attrs_); - ScopeWrapper scope(op.inputs_, op.outputs_); - framework::OpRegistry::CreateOp(op_desc)->Run(scope, platform::CPUPlace()); - current_position_++; - } - - LOG(INFO) << "Finishing forward -------------------------"; -} - -void Tape::Backward(VariableHandle target) { - PADDLE_ENFORCE(!has_been_backwarded_); - - Forward(); - - // TODO(tonyyang-svail): check output of last op is target - backward_tape_.reset(new Tape()); - - framework::AttributeMap attrs; - - // FIXME(tonyyang-svail): Need to infer_data_type - attrs["dtype"] = framework::proto::VarType::Type::VarType_Type_FP32; - attrs["shape"] = std::vector{1}; - attrs["value"] = 1.0f; - backward_tape_->AddOp( - "fill_constant", {}, {{"Out", {target->Grad()}}}, attrs); - - for (auto it = tape_.rbegin(); it != tape_.rend(); ++it) { - framework::OpDesc op_desc = - CreateOpDesc(it->type_, it->inputs_, it->outputs_, it->attrs_); - std::unordered_map grad_to_var; - std::vector> grad_op_descs = - framework::OpInfoMap::Instance() - .Get(op_desc.Type()) - .GradOpMaker()(op_desc, {}, &grad_to_var, {}); - - for (auto &op_desc : grad_op_descs) { - std::unordered_map name2var; - for (auto ¶m2vars : it->inputs_) { - for (auto &a : param2vars.second) { - name2var[a->Name()] = a; - } - } - for (auto ¶m2vars : it->outputs_) { - for (auto &a : param2vars.second) { - name2var[a->Name()] = a; - } - } - - VariableHandleMap in_vars; - VariableHandleMap out_vars; - std::map - loop_over{{&op_desc->Inputs(), &in_vars}, - {&op_desc->Outputs(), &out_vars}}; - for (auto &each : loop_over) { - auto &vmp = *each.first; - auto &vhm = *each.second; - for (auto &p2a : vmp) { - for (auto &argu : p2a.second) { - if (name2var.count(argu)) { - vhm[p2a.first].push_back(name2var[argu]); - } else { - PADDLE_ENFORCE(ends_with(argu, framework::kGradVarSuffix), - argu.c_str()); - std::string name = argu.substr( - 0, argu.size() - std::strlen(framework::kGradVarSuffix)); - PADDLE_ENFORCE(name2var.count(name), name.c_str()); - vhm[p2a.first].push_back(name2var[name]->Grad()); - } - } - } - } - - backward_tape_->AddOp( - op_desc->Type(), in_vars, out_vars, op_desc->GetAttrMap()); - } - - // TODO(tonyyang-svail): how to fill empty grad? - // TODO(tonyyang-svail): Sum var grad is necessary - } - - backward_tape_->Forward(); - has_been_backwarded_ = true; -} - -Tape &get_global_tape() { - static Tape T; - return T; -} - -void reset_global_tape() { get_global_tape() = Tape(); } -} -} diff --git a/paddle/contrib/tape/tape.h b/paddle/contrib/tape/tape.h deleted file mode 100644 index ed79de17a7..0000000000 --- a/paddle/contrib/tape/tape.h +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once - -#include -#include -#include -#include - -#include "paddle/contrib/tape/variable.h" - -namespace paddle { -namespace tape { - -using VariableHandleMap = std::map>; - -struct OpHandle { - OpHandle(const std::string &type, - const VariableHandleMap &in_vars, - const VariableHandleMap &out_vars, - const framework::AttributeMap &attrs) - : type_(type), inputs_(in_vars), outputs_(out_vars), attrs_(attrs) {} - - std::string type_; - VariableHandleMap inputs_; - VariableHandleMap outputs_; - framework::AttributeMap attrs_; -}; - -class Tape { - public: - void AddOp(const std::string &type, - const VariableHandleMap &in_vars, - VariableHandleMap out_vars, - const framework::AttributeMap &attrs); - void Forward(); - void Backward(VariableHandle target); - - bool HasBeenBackwarded() { return has_been_backwarded_; } - - private: - bool has_been_backwarded_ = false; - size_t current_position_ = 0; - - std::vector tape_; - std::shared_ptr backward_tape_; -}; - -Tape &get_global_tape(); - -void reset_global_tape(); -} -} diff --git a/paddle/contrib/tape/test_tape.cc b/paddle/contrib/tape/test_tape.cc deleted file mode 100644 index e9bfd21a71..0000000000 --- a/paddle/contrib/tape/test_tape.cc +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "gtest/gtest.h" -#include "paddle/contrib/tape/function.h" - -using namespace paddle::tape; - -TEST(Tape, TestMLP) { - LOG(INFO) << "TestMLP"; - Linear linear1(3, 3, "relu"); - Linear linear2(3, 3, "relu"); - Mean mean; - - SGD sgd(0.001); - - std::string initializer = "fill_constant"; - paddle::framework::AttributeMap attrs; - attrs["dtype"] = paddle::framework::proto::VarType::Type::VarType_Type_FP32; - attrs["shape"] = std::vector{3, 3}; - attrs["value"] = 1.0f; - Fill filler(initializer, attrs); - - for (int i = 0; i < 2; ++i) { - reset_global_tape(); - - VariableHandle input(new Variable("input")); - filler(input); - - auto loss = mean(linear2(linear1(input))); - - get_global_tape().Backward(loss); - - for (auto w : linear1.Params()) { - sgd(w); - } - for (auto w : linear2.Params()) { - sgd(w); - } - } -} - -int main(int argc, char** argv) { - std::vector places; - places.emplace_back(paddle::platform::CPUPlace()); - paddle::platform::DeviceContextPool::Init(places); - - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/paddle/contrib/tape/variable.cc b/paddle/contrib/tape/variable.cc deleted file mode 100644 index 5ec1612909..0000000000 --- a/paddle/contrib/tape/variable.cc +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/contrib/tape/variable.h" - -namespace paddle { -namespace tape { - -void Variable::InitializeVariable() { - LOG(INFO) << "Initialzing " << desc_.Name() << " as " << desc_.GetType(); - framework::proto::VarType::Type var_type = desc_.GetType(); - if (var_type == framework::proto::VarType::LOD_TENSOR) { - var_.GetMutable(); - } else if (var_type == framework::proto::VarType::SELECTED_ROWS) { - var_.GetMutable(); - } else { - PADDLE_THROW("Variable type %d is not in [LOD_TENSOR, SELECTED_ROWS]", - var_type); - } -} -} -} diff --git a/paddle/contrib/tape/variable.h b/paddle/contrib/tape/variable.h deleted file mode 100644 index 35c328e69c..0000000000 --- a/paddle/contrib/tape/variable.h +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once - -#include - -#include "paddle/fluid/framework/operator.h" // framework::kGradVarSuffix -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/framework/variable.h" - -namespace paddle { -namespace tape { - -class Variable; -using VariableHandle = std::shared_ptr; - -/* - * Combination of - * framework::VarDesc desc_; - * framework::Variable var_; - */ -class Variable { - public: - Variable(const std::string pre_fix) - : desc_(pre_fix + std::to_string(count())) {} - - Variable(const std::string pre_fix, bool is_grad) - : desc_(pre_fix + (is_grad ? framework::kGradVarSuffix - : std::to_string(count()))) {} - - ~Variable() { LOG(INFO) << "Deleting " << Name(); } - - // Instantiate LoDTensor/SelectedRow - void InitializeVariable(); - - VariableHandle Grad() { - if (grad_.expired()) { - VariableHandle new_grad(new Variable(desc_.Name(), true)); - grad_ = new_grad; - return new_grad; - } else { - return VariableHandle(grad_); - } - } - - // Stochastic Gradient Descent with Momentum - // VariableHandle Momentum (); - - // void init(const std::string& initializer, - // const framework::AttributeMap& attrs); - - // void value() {}; - - const framework::VarDesc& Desc() const { return desc_; } - framework::VarDesc* MutableDesc() { return &desc_; } - - // TODO(tonyyang-svail): No need to expose name - std::string Name() const { return desc_.Name(); } - - framework::Variable* Var() { return &var_; } - - private: - int count() { - static int counter = 0; - return counter++; - } - - framework::VarDesc desc_; - framework::Variable var_; - - std::weak_ptr grad_; -}; -} -}