From 6326c40d2709da9839edbf3fb2a280ca92804a23 Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Thu, 28 Sep 2017 09:49:00 +0800
Subject: [PATCH 01/82] Add max pool with index

---
 paddle/operators/CMakeLists.txt               |   6 +
 paddle/operators/math/CMakeLists.txt          |   4 +-
 paddle/operators/math/pooling.cc              | 255 ++++++++++++
 paddle/operators/math/pooling.cu              | 387 ++++++++++++++++++
 paddle/operators/math/pooling.h               |  68 +++
 paddle/operators/pool_with_index_op.cc        | 198 +++++++++
 paddle/operators/pool_with_index_op.cu        |  31 ++
 paddle/operators/pool_with_index_op.h         |  99 +++++
 .../v2/framework/tests/test_pool_max_op.py    | 125 ++++++
 9 files changed, 1171 insertions(+), 2 deletions(-)
 create mode 100644 paddle/operators/math/pooling.cc
 create mode 100644 paddle/operators/math/pooling.cu
 create mode 100644 paddle/operators/math/pooling.h
 create mode 100644 paddle/operators/pool_with_index_op.cc
 create mode 100644 paddle/operators/pool_with_index_op.cu
 create mode 100644 paddle/operators/pool_with_index_op.h
 create mode 100644 python/paddle/v2/framework/tests/test_pool_max_op.py

diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt
index e56895c63a..0feb969c62 100644
--- a/paddle/operators/CMakeLists.txt
+++ b/paddle/operators/CMakeLists.txt
@@ -62,6 +62,12 @@ function(op_library TARGET)
         file(APPEND ${pybind_file} "USE_OP(sigmoid);\n")
     endif()
 
+    if ("${TARGET}" STREQUAL "pool_with_index_op")
+        set(pybind_flag 1)
+        # It's enough to just adding one operator to pybind
+        file(APPEND ${pybind_file} "USE_OP(maxPool2dWithIndex);\n")
+    endif()
+
     # pybind USE_NO_KERNEL_OP
     file(READ ${TARGET}.cc TARGET_CONTENT)
     string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}")
diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt
index 91ae3d49f1..811deb4c2c 100644
--- a/paddle/operators/math/CMakeLists.txt
+++ b/paddle/operators/math/CMakeLists.txt
@@ -1,12 +1,12 @@
 if(WITH_GPU)
     nv_library(math_function SRCS math_function.cc math_function.cu im2col.cc
-      im2col.cu DEPS cblas device_context operator)
+      im2col.cu pooling.cc pooling.cu DEPS cblas device_context operator)
     nv_library(softmax_function SRCS softmax.cc softmax.cu
       DEPS operator)
     nv_library(cross_entropy_function SRCS cross_entropy.cc cross_entropy.cu
       DEPS operator)
 else()
-    cc_library(math_function SRCS math_function.cc im2col.cc
+    cc_library(math_function SRCS math_function.cc im2col.cc pooling.cc
       DEPS cblas device_context operator)
     cc_library(softmax_function SRCS softmax.cc DEPS operator)
     cc_library(cross_entropy_function SRCS cross_entropy.cc DEPS operator)
diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc
new file mode 100644
index 0000000000..0e4d9007a6
--- /dev/null
+++ b/paddle/operators/math/pooling.cc
@@ -0,0 +1,255 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/operators/math/pooling.h"
+
+namespace paddle {
+namespace operators {
+namespace math {
+
+template <typename T>
+class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input.dims()[0];
+
+    const int input_height = input.dims()[2];
+    const int input_width = input.dims()[3];
+    const int output_channels = output.dims()[1];
+    const int output_height = output.dims()[2];
+    const int output_width = output.dims()[3];
+    const int ksize_height = ksize[0];
+    const int ksize_width = ksize[1];
+    const int stride_height = strides[0];
+    const int stride_width = strides[1];
+    const int padding_height = paddings[0];
+    const int padding_width = paddings[1];
+
+    const int input_stride = input_height * input_width;
+    const int output_stride = output_height * output_width;
+
+    const T* input_data = input.data<T>();
+    T* output_data = output.mutable_data<T>(context.GetPlace());
+
+    T* mask_data = mask.mutable_data<T>(context.GetPlace());
+
+    for (int i = 0; i < batch_size; i++) {
+      for (int c = 0; c < output_channels; ++c) {
+        for (int ph = 0; ph < output_height; ++ph) {
+          int hstart = ph * stride_height - padding_height;
+          int hend = std::min(hstart + ksize_height, input_height);
+          hstart = std::max(hstart, 0);
+          for (int pw = 0; pw < output_width; ++pw) {
+            int wstart = pw * stride_width - padding_width;
+            int wend = std::min(wstart + ksize_width, input_width);
+            wstart = std::max(wstart, 0);
+
+            T ele = static_cast<T>(-FLT_MAX);
+            int index = -1;
+            for (int h = hstart; h < hend; ++h) {
+              for (int w = wstart; w < wend; ++w) {
+                if (ele < input_data[h * input_width + w]) {
+                  ele = input_data[h * input_width + w];
+                  index = h * input_width + w;
+                }
+              }
+            }
+            output_data[ph * output_width + pw] = ele;
+            mask_data[ph * output_width + pw] = index;
+          }
+        }
+        // offset
+        input_data += input_stride;
+        output_data += output_stride;
+        mask_data += output_stride;
+      }
+    }
+  }
+};
+
+template <typename T>
+class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input_grad.dims()[0];
+    const int input_height = input_grad.dims()[2];
+    const int input_width = input_grad.dims()[3];
+    const int output_channels = output_grad.dims()[1];
+    const int output_height = output_grad.dims()[2];
+    const int output_width = output_grad.dims()[3];
+    const int input_stride = input_height * input_width;
+    const int output_stride = output_height * output_width;
+
+    const T* mask_data = mask.data<T>();
+    const T* output_grad_data = output_grad.data<T>();
+    T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
+
+    for (size_t n = 0; n < batch_size; ++n) {
+      for (size_t c = 0; c < output_channels; ++c) {
+        for (size_t ph = 0; ph < output_height; ++ph) {
+          for (size_t pw = 0; pw < output_width; ++pw) {
+            const size_t output_idx = ph * output_width + pw;
+            const size_t input_idx = static_cast<size_t>(mask_data[output_idx]);
+
+            input_grad_data[input_idx] += output_grad_data[output_idx];
+          }
+        }
+      }
+      // offset
+      input_grad_data += input_stride;
+      output_grad_data += output_stride;
+      mask_data += output_stride;
+    }
+  }
+};
+
+template class MaxPool2dWithIndexFunctor<platform::CPUPlace, float>;
+template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, float>;
+template class MaxPool2dWithIndexFunctor<platform::CPUPlace, double>;
+template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, double>;
+
+template <typename T>
+class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input.dims()[0];
+    const int input_depth = input.dims()[2];
+    const int input_height = input.dims()[3];
+    const int input_width = input.dims()[4];
+    const int output_channels = output.dims()[1];
+    const int output_depth = output.dims()[2];
+    const int output_height = output.dims()[3];
+    const int output_width = output.dims()[4];
+    const int ksize_depth = ksize[0];
+    const int ksize_height = ksize[1];
+    const int ksize_width = ksize[2];
+    const int stride_depth = strides[0];
+    const int stride_height = strides[1];
+    const int stride_width = strides[2];
+    const int padding_depth = paddings[0];
+    const int padding_height = paddings[1];
+    const int padding_width = paddings[2];
+    const int input_stride = input_depth * input_height * input_width;
+    const int output_stride = output_depth * output_height * output_width;
+    const T* input_data = input.data<T>();
+    T* output_data = output.mutable_data<T>(context.GetPlace());
+    T* mask_data = mask.mutable_data<T>(context.GetPlace());
+
+    for (int i = 0; i < batch_size; i++) {
+      for (int c = 0; c < output_channels; ++c) {
+        for (int pd = 0; pd < output_depth; ++pd) {
+          int dstart = pd * stride_depth - padding_depth;
+          int dend = std::min(dstart + ksize_depth, input_depth);
+          dstart = std::max(dstart, 0);
+          for (int ph = 0; ph < output_height; ++ph) {
+            int hstart = ph * stride_height - padding_height;
+            int hend = std::min(hstart + ksize_height, input_height);
+            hstart = std::max(hstart, 0);
+            for (int pw = 0; pw < output_width; ++pw) {
+              int wstart = pw * stride_width - padding_width;
+              int wend = std::min(wstart + ksize_width, input_width);
+              wstart = std::max(wstart, 0);
+              int output_idx = (pd * output_height + ph) * output_width + pw;
+              T ele = static_cast<T>(-FLT_MAX);
+              int index = -1;
+              for (int d = dstart; d < dend; ++d) {
+                for (int h = hstart; h < hend; ++h) {
+                  for (int w = wstart; w < wend; ++w) {
+                    if (ele <
+                        input_data[(d * input_height + h) * input_width + w]) {
+                      index = (d * input_height + h) * input_width + w;
+                      ele =
+                          input_data[(d * input_height + h) * input_width + w];
+                    }
+                  }
+                }
+              }
+              output_data[output_idx] = ele;
+              mask_data[output_idx] = index;
+            }
+          }
+        }
+        // offset
+        input_data += input_stride;
+        output_data += output_stride;
+        mask_data += output_stride;
+      }
+    }
+  }
+};
+
+template <typename T>
+class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input_grad.dims()[0];
+    const int input_depth = input_grad.dims()[2];
+    const int input_height = input_grad.dims()[3];
+    const int input_width = input_grad.dims()[4];
+    const int output_channels = output_grad.dims()[1];
+    const int output_depth = output_grad.dims()[2];
+    const int output_height = output_grad.dims()[3];
+    const int output_width = output_grad.dims()[4];
+    const int input_stride = input_depth * input_height * input_width;
+    const int output_stride = output_depth * output_height * output_width;
+
+    const T* mask_data = mask.data<T>();
+    const T* output_grad_data = output_grad.data<T>();
+    T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
+
+    for (size_t n = 0; n < batch_size; ++n) {
+      for (size_t c = 0; c < output_channels; ++c) {
+        for (size_t pd = 0; pd < output_depth; ++pd) {
+          for (size_t ph = 0; ph < output_height; ++ph) {
+            for (size_t pw = 0; pw < output_width; ++pw) {
+              const size_t output_idx =
+                  (pd * output_height + ph) * output_width + pw;
+              const size_t input_idx =
+                  static_cast<size_t>(mask_data[output_idx]);
+
+              input_grad_data[input_idx] += output_grad_data[output_idx];
+            }
+          }
+        }
+        // offset
+        input_grad_data += input_stride;
+        output_grad_data += output_stride;
+        mask_data += output_stride;
+      }
+    }
+  }
+};
+
+template class MaxPool3dWithIndexFunctor<platform::CPUPlace, float>;
+template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, float>;
+template class MaxPool3dWithIndexFunctor<platform::CPUPlace, double>;
+template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, double>;
+
+}  // namespace math
+}  // namespace operators
+}  // namespace paddle
diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu
new file mode 100644
index 0000000000..f32e6a26d0
--- /dev/null
+++ b/paddle/operators/math/pooling.cu
@@ -0,0 +1,387 @@
+/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/operators/math/pooling.h"
+#include "paddle/platform/cuda_helper.h"
+
+namespace paddle {
+namespace operators {
+namespace math {
+
+template <typename T>
+__global__ void KernelMaxPool2dWithIdxForward(
+    const int nthreads, const T* input_data, T* output_data, T* mask_data,
+    const int channels, const int input_height, const int input_width,
+    const int output_height, const int output_width, const int ksize_height,
+    const int ksize_width, const int stride_height, const int stride_width,
+    const int padding_height, const int padding_width) {
+  int index = blockIdx.x * blockDim.x + threadIdx.x;
+  if (index < nthreads) {
+    int pw = index % output_width;
+    int ph = (index / output_width) % output_height;
+    int c = (index / output_width / output_height) % channels;
+    int batch_idx = index / output_width / output_height / channels;
+
+    int hstart = ph * stride_height - padding_height;
+    int hend = min(hstart + ksize_height, input_height);
+    hstart = max(hstart, 0);
+
+    int wstart = pw * stride_width - padding_width;
+    int wend = min(wstart + ksize_width, input_width);
+    wstart = max(wstart, 0);
+
+    input_data += (batch_idx * channels + c) * input_height * input_width;
+    T ele = -FLT_MAX;
+    int index = -1;
+    for (int h = hstart; h < hend; ++h) {
+      for (int w = wstart; w < wend; ++w) {
+        if (ele < input_data[h * input_width + w]) {
+          index = h * input_width + w;
+          ele = input_data[h * input_width + w];
+        }
+      }
+    }
+    output_data[index] = ele;
+    mask_data[index] = index;
+  }
+}
+
+template <typename T>
+__global__ void KernelMaxPool2DWithIdxBackward(
+    const int nthreads, T* input_grad, const T* output_grad, const T* mask_data,
+    const int channels, const int input_height, const int input_width,
+    const int output_height, const int output_width, const int ksize_height,
+    const int ksize_width, const int stride_height, const int stride_width,
+    const int padding_height, const int padding_width) {
+  int index = blockIdx.x * blockDim.x + threadIdx.x;
+  if (index < nthreads) {
+    int offsetW = index % input_width + padding_width;
+    int offsetH = (index / input_width) % input_height + padding_height;
+    int offsetC = (index / input_width / input_height) % channels;
+    int batch_idx = index / input_width / input_height / channels;
+
+    int phstart = (offsetH < ksize_height)
+                      ? 0
+                      : (offsetH - ksize_height) / stride_height + 1;
+    int pwstart = (offsetW < ksize_width)
+                      ? 0
+                      : (offsetW - ksize_width) / stride_width + 1;
+    int phend = min(offsetH / stride_height + 1, output_height);
+    int pwend = min(offsetW / stride_width + 1, output_width);
+    T gradient = 0;
+    int output_idx =
+        (batch_idx * channels + offsetC) * output_height * output_width;
+    mask_data += output_idx;
+    output_grad += output_idx;
+    for (int ph = phstart; ph < phend; ++ph) {
+      for (int pw = pwstart; pw < pwend; ++pw) {
+        if ((offsetH * input_width + offsetW) ==
+            mask_data[ph * output_width + pw])
+          gradient += output_grad[ph * output_width + pw];
+      }
+    }
+    input_grad[index] = gradient;
+  }
+}
+
+template <typename T>
+class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input.dims()[0];
+    const int input_channels = input.dims()[1];
+    const int input_height = input.dims()[2];
+    const int input_width = input.dims()[3];
+    const int output_channels = output.dims()[1];
+    const int output_height = output.dims()[2];
+    const int output_width = output.dims()[3];
+    const int ksize_height = ksize[0];
+    const int ksize_width = ksize[1];
+    const int stride_height = strides[0];
+    const int stride_width = strides[1];
+    const int padding_height = paddings[0];
+    const int padding_width = paddings[1];
+
+    const T* input_data = input.data<T>();
+    T* output_data = output.mutable_data<T>(context.GetPlace());
+    T* mask_data = mask.mutable_data<T>(context.GetPlace());
+
+    int nthreads = batch_size * output_channels * output_height * output_width;
+    int blocks = (nthreads + 1024 - 1) / 1024;
+    dim3 threads(1024, 1);
+    dim3 grid(blocks, 1);
+
+    KernelMaxPool2dWithIdxForward<
+        T><<<grid, threads, 0,
+             reinterpret_cast<const platform::CUDADeviceContext&>(context)
+                 .stream()>>>(nthreads, input_data, output_data, mask_data,
+                              input_channels, input_height, input_width,
+                              output_height, output_width, ksize_height,
+                              ksize_width, stride_height, stride_width,
+                              padding_height, padding_width);
+  }
+};
+
+template <typename T>
+class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input_grad.dims()[0];
+    const int input_channels = input_grad.dims()[1];
+    const int input_height = input_grad.dims()[2];
+    const int input_width = input_grad.dims()[3];
+    const int output_channels = output_grad.dims()[1];
+    const int output_height = output_grad.dims()[2];
+    const int output_width = output_grad.dims()[3];
+    const int ksize_height = ksize[0];
+    const int ksize_width = ksize[1];
+    const int stride_height = strides[0];
+    const int stride_width = strides[1];
+    const int padding_height = paddings[0];
+    const int padding_width = paddings[1];
+
+    const T* mask_data = mask.data<T>();
+    const T* output_grad_data = output_grad.data<T>();
+    T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
+
+    int nthreads = batch_size * input_channels * input_height * input_width;
+    int blocks = (nthreads + 1024 - 1) / 1024;
+    dim3 threads(1024, 1);
+    dim3 grid(blocks, 1);
+
+    KernelMaxPool2DWithIdxBackward<
+        T><<<grid, threads, 0,
+             reinterpret_cast<const platform::CUDADeviceContext&>(context)
+                 .stream()>>>(nthreads, input_grad_data, output_grad_data,
+                              mask_data, input_channels, input_height,
+                              input_width, output_height, output_width,
+                              ksize_height, ksize_width, stride_height,
+                              stride_width, padding_height, padding_width);
+  }
+};
+
+template class MaxPool2dWithIndexFunctor<platform::GPUPlace, float>;
+template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, float>;
+template class MaxPool2dWithIndexFunctor<platform::GPUPlace, double>;
+template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, double>;
+
+template <typename T>
+__global__ void KernelMaxPool3DWithIdxForward(
+    const int nthreads, const T* input_data, T* output_data, T* mask_data,
+    const int channels, const int input_depth, const int input_height,
+    const int input_width, const int output_depth, const int output_height,
+    const int output_width, const int ksize_depth, const int ksize_height,
+    const int ksize_width, const int stride_depth, const int stride_height,
+    const int stride_width, const int padding_depth, const int padding_height,
+    const int padding_width) {
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+       index += blockDim.x * gridDim.x) {
+    int pw = index % output_width;
+    int ph = (index / output_width) % output_height;
+    int pd = (index / output_width / output_height) % output_depth;
+    int c = (index / output_width / output_height / output_depth) % channels;
+    int batch_idx =
+        index / output_width / output_height / output_depth / channels;
+    int dstart = pd * stride_depth - padding_depth;
+    int hstart = ph * stride_height - padding_height;
+    int wstart = pw * stride_width - padding_width;
+    int dend = min(dstart + ksize_depth, input_depth);
+    int hend = min(hstart + ksize_height, input_height);
+    int wend = min(wstart + ksize_width, input_width);
+    dstart = max(dstart, 0);
+    hstart = max(hstart, 0);
+    wstart = max(wstart, 0);
+    T ele = -FLT_MAX;
+    int index = -1;
+    input_data +=
+        (batch_idx * channels + c) * input_depth * input_height * input_width;
+
+    for (int d = dstart; d < dend; ++d) {
+      for (int h = hstart; h < hend; ++h) {
+        for (int w = wstart; w < wend; ++w) {
+          if (ele < input_data[(d * input_height + h) * input_width + w]) {
+            index = (d * input_height + h) * input_width + w;
+            ele = input_data[(d * input_height + h) * input_width + w];
+          }
+        }
+      }
+    }
+    output_data[index] = ele;
+    mask_data[index] = index;
+  }
+}
+
+template <typename T>
+__global__ void KernelMaxPool3DWithIdxBackward(
+    const int nthreads, T* input_grad, const T* output_grad, const T* mask,
+    const int channels, const int input_depth, const int input_height,
+    const int input_width, const int output_depth, const int output_height,
+    const int output_width, const int ksize_depth, const int ksize_height,
+    const int ksize_width, const int stride_depth, const int stride_height,
+    const int stride_width, const int padding_depth, const int padding_height,
+    const int padding_width) {
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+       index += blockDim.x * gridDim.x) {
+    int offsetW = index % input_width + padding_width;
+    int offsetH = (index / input_width) % input_height + padding_height;
+    int offsetD =
+        (index / input_width / input_height) % input_depth + padding_depth;
+    int offsetC = (index / input_width / input_height / input_depth) % channels;
+    int batch_idx = index / input_width / input_height / input_depth / channels;
+
+    int pdstart = (offsetD < ksize_depth)
+                      ? 0
+                      : (offsetD - ksize_depth) / stride_depth + 1;
+    int phstart = (offsetH < ksize_height)
+                      ? 0
+                      : (offsetH - ksize_height) / stride_height + 1;
+    int pwstart = (offsetW < ksize_width)
+                      ? 0
+                      : (offsetW - ksize_width) / stride_width + 1;
+    int pdend = min((offsetD) / stride_depth + 1, output_depth);
+    int phend = min((offsetH) / stride_height + 1, output_height);
+    int pwend = min((offsetW) / stride_width + 1, output_width);
+
+    T gradient = 0;
+    int output_idx = (batch_idx * channels + offsetC) * output_depth *
+                     output_height * output_width;
+    mask += output_idx;
+    output_grad += output_idx;
+
+    for (int pd = pdstart; pd < pdend; ++pd) {
+      for (int ph = phstart; ph < phend; ++ph) {
+        for (int pw = pwstart; pw < pwend; ++pw) {
+          if (((offsetD * input_height + offsetH) * input_width + offsetW) ==
+              mask[(pd * output_height + ph) * output_width + pw])
+            gradient +=
+                output_grad[(pd * output_height + ph) * output_width + pw];
+        }
+      }
+    }
+    input_grad[index] = gradient;
+  }
+}
+
+template <typename T>
+class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input.dims()[0];
+    const int input_channels = input.dims()[1];
+    const int input_depth = input.dims()[2];
+    const int input_height = input.dims()[3];
+    const int input_width = input.dims()[4];
+    const int output_channels = output.dims()[1];
+    const int output_depth = output.dims()[2];
+    const int output_height = output.dims()[3];
+    const int output_width = output.dims()[4];
+    const int ksize_depth = ksize[0];
+    const int ksize_height = ksize[1];
+    const int ksize_width = ksize[2];
+    const int stride_depth = strides[0];
+    const int stride_height = strides[1];
+    const int stride_width = strides[2];
+    const int padding_depth = paddings[0];
+    const int padding_height = paddings[1];
+    const int padding_width = paddings[2];
+
+    const T* input_data = input.data<T>();
+    T* output_data = output.mutable_data<T>(context.GetPlace());
+    T* mask_data = output.mutable_data<T>(context.GetPlace());
+
+    int nthreads = batch_size * output_channels * output_depth * output_height *
+                   output_width;
+    int blocks = (nthreads + 1024 - 1) / 1024;
+    dim3 threads(1024, 1);
+    dim3 grid(blocks, 1);
+
+    KernelMaxPool3DWithIdxForward<
+        T><<<grid, threads, 0,
+             reinterpret_cast<const platform::CUDADeviceContext&>(context)
+                 .stream()>>>(
+        nthreads, input_data, output_data, mask_data, input_channels,
+        input_depth, input_height, input_width, output_depth, output_height,
+        output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
+        stride_height, stride_width, padding_depth, padding_height,
+        padding_width);
+  }
+};
+
+template <typename T>
+class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input_grad.dims()[0];
+    const int input_channels = input_grad.dims()[1];
+    const int input_depth = input_grad.dims()[2];
+    const int input_height = input_grad.dims()[3];
+    const int input_width = input_grad.dims()[4];
+    const int output_channels = input_grad.dims()[1];
+    const int output_depth = input_grad.dims()[2];
+    const int output_height = input_grad.dims()[3];
+    const int output_width = input_grad.dims()[4];
+    const int ksize_depth = ksize[0];
+    const int ksize_height = ksize[1];
+    const int ksize_width = ksize[2];
+    const int stride_depth = strides[0];
+    const int stride_height = strides[1];
+    const int stride_width = strides[2];
+    const int padding_depth = paddings[0];
+    const int padding_height = paddings[1];
+    const int padding_width = paddings[2];
+
+    const T* output_grad_data = output_grad.data<T>();
+    const T* mask_data = mask.data<T>();
+    T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
+
+    int nthreads =
+        batch_size * input_channels * input_depth * input_height * input_width;
+    int blocks = (nthreads + 1024 - 1) / 1024;
+    dim3 threads(1024, 1);
+    dim3 grid(blocks, 1);
+
+    KernelMaxPool3DWithIdxBackward<
+        T><<<grid, threads, 0,
+             reinterpret_cast<const platform::CUDADeviceContext&>(context)
+                 .stream()>>>(
+        nthreads, input_grad_data, output_grad_data, mask_data, input_channels,
+        input_depth, input_height, input_width, output_depth, output_height,
+        output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
+        stride_height, stride_width, padding_depth, padding_height,
+        padding_width);
+  }
+};
+
+template class MaxPool3dWithIndexFunctor<platform::GPUPlace, float>;
+template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, float>;
+template class MaxPool3dWithIndexFunctor<platform::GPUPlace, double>;
+template class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, double>;
+
+}  // namespace math
+}  // namespace operators
+}  // namespace paddle
diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h
new file mode 100644
index 0000000000..3a05cd98fe
--- /dev/null
+++ b/paddle/operators/math/pooling.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+#include "paddle/framework/eigen.h"
+#include "paddle/framework/tensor.h"
+#include "paddle/platform/device_context.h"
+#include "paddle/platform/hostdevice.h"
+
+namespace paddle {
+namespace operators {
+namespace math {
+//////////////////////
+#define FLT_MAX __FLT_MAX__
+/////////////////////
+
+template <typename Place, typename T>
+class MaxPool2dWithIndexFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
+
+template <typename Place, typename T>
+class MaxPool2dWithIndexGradFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
+
+template <typename Place, typename T>
+class MaxPool3dWithIndexFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
+
+template <typename Place, typename T>
+class MaxPool3dWithIndexGradFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
+
+}  // namespace math
+}  // namespace operators
+}  // namespace paddle
diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc
new file mode 100644
index 0000000000..d7a07a403d
--- /dev/null
+++ b/paddle/operators/pool_with_index_op.cc
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/operators/pool_with_index_op.h"
+
+namespace paddle {
+namespace operators {
+
+int OutputSizeMaxPool(int input_size, int filter_size, int padding,
+                      int stride) {
+  int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
+  return output_size;
+}
+
+class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
+ public:
+  using framework::OperatorWithKernel::OperatorWithKernel;
+
+ protected:
+  void InferShape(framework::InferShapeContextBase *ctx) const override {
+    PADDLE_ENFORCE(ctx->HasInput("X"),
+                   "X(Input) of Pooling should not be null.");
+    PADDLE_ENFORCE(ctx->HasOutput("Out"),
+                   "Out(Output) of Pooling should not be null.");
+    PADDLE_ENFORCE(ctx->HasOutput("Mask"),
+                   "Out(Output) of Pooling should not be null.");
+
+    auto in_x_dims = ctx->GetInputDim("X");
+
+    std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
+    std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
+    std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
+
+    PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
+                   "Pooling intput should be 4-D or 5-D");
+
+    if (ctx->Attrs().Get<bool>("globalPooling")) {
+      ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
+      for (size_t i = 0; i < ksize.size(); ++i)
+        ksize[i] = static_cast<int>(in_x_dims[i + 2]);
+    }
+
+    PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U,
+                   "Pooling intput size and pooling size should be consistent");
+    PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3,
+                   "Pooling size size should be 2 elements. or 3 elements.");
+    PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
+                      "strides size and pooling size should be the same.");
+    PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(),
+                      "paddings size and pooling size should be the same.");
+
+    std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
+    for (size_t i = 0; i < ksize.size(); ++i) {
+      output_shape.push_back(OutputSizeMaxPool(in_x_dims[i + 2], ksize[i],
+                                               paddings[i], strides[i]));
+    }
+    ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
+    ctx->SetOutputDim("Mask", framework::make_ddim(output_shape));
+  }
+};
+
+class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
+ public:
+  using framework::OperatorWithKernel::OperatorWithKernel;
+
+ protected:
+  void InferShape(framework::InferShapeContextBase *ctx) const override {
+    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("X")),
+                   "X(Input) of MaxPoolWithIndexOpGrad should not be null.");
+    PADDLE_ENFORCE(
+        ctx->HasOutput(framework::GradVarName("X")),
+        "X@GRAD(Input@GRAD) of MaxPoolWithIndexOpGrad should not be null.");
+    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
+  }
+};
+
+class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  MaxPool2dWithIndexOpMaker(framework::OpProto *proto,
+                            framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput(
+        "X",
+        "The input tensor of pooling operator. "
+        "The format of input tensor is NCHW. Where N is batch size, C is the "
+        "number of channels, H and W is the height and width of image.");
+    AddOutput("Out",
+              "The output tensor of pooling operator."
+              "The format of output tensor is also NCHW.");
+    AddOutput("Mask",
+              "The Mask tensor of pooling operator."
+              "The format of output tensor is also NCHW.");
+
+    AddAttr<std::vector<int>>(
+        "ksize", "pooling size(height, width) of pooling operator.");
+    AddAttr<bool>(
+        "globalPooling",
+        "whether to use the globalPooling."
+        "int constant equal to false or true"
+        "default false"
+        "If globalPooling = true, ksize is ignored and need not be specified.")
+        .SetDefault(false);
+    AddAttr<std::vector<int>>("strides",
+                              "strides(height, width) of pooling operator."
+                              "default {1,1}")
+        .SetDefault({1, 1});
+    AddAttr<std::vector<int>>("paddings",
+                              "paddings(height, width) of pooling operator."
+                              "default {0,0}")
+        .SetDefault({0, 0});
+
+    AddComment(R"DOC(
+The maxPooling2d with index operation calculates the output and the mask based on
+the input and ksize, strides, paddings parameters.
+)DOC");
+  }
+};
+
+class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  MaxPool3dWithIndexOpMaker(framework::OpProto *proto,
+                            framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput(
+        "X",
+        "The input tensor of pooling operator. "
+        "The format of input tensor is NCDHW. Where N is batch size, C is "
+        "the number of channels, D, H and W is the depth, height and width of "
+        "image.");
+    AddOutput("Out",
+              "The output tensor of pooling operator."
+              "The format of output tensor is also NCDHW.");
+    AddOutput("Mask",
+              "The Mask tensor of pooling operator."
+              "The format of output tensor is also NCDHW.");
+
+    AddAttr<std::vector<int>>(
+        "ksize", "pooling size(depth, height, width) of pooling operator.");
+    AddAttr<bool>(
+        "globalPooling",
+        "whether to use the globalPooling."
+        "int constant equal to false or true"
+        "default false"
+        "If globalPooling = true, ksize is ignored and need not be specified.")
+        .SetDefault(false);
+    AddAttr<std::vector<int>>(
+        "strides",
+        "strides(depth, height, width) of pooling operator."
+        "default {1,1,1}")
+        .SetDefault({1, 1, 1});
+    AddAttr<std::vector<int>>(
+        "paddings",
+        "paddings(depth, height, width) of pooling operator."
+        "default {0,0,0}")
+        .SetDefault({0, 0, 0});
+    AddComment(R"DOC(
+The maxpooling3d with index operation calculates the output and the mask based on
+the input and ksize, strides, paddings parameters.
+)DOC");
+  }
+};
+}  // namespace operators
+}  // namespace paddle
+
+namespace ops = paddle::operators;
+
+REGISTER_OP(maxPool2dWithIndex, ops::MaxPoolWithIndexOp,
+            ops::MaxPool2dWithIndexOpMaker, maxPool2dWithIndex_grad,
+            ops::MaxPoolWithIndexOpGrad);
+
+REGISTER_OP_CPU_KERNEL(
+    maxPool2dWithIndex,
+    ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float>);
+REGISTER_OP_CPU_KERNEL(
+    maxPool2dWithIndex_grad,
+    ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float>)
+
+REGISTER_OP(maxPool3dWithIndex, ops::MaxPoolWithIndexOp,
+            ops::MaxPool3dWithIndexOpMaker, maxPool3dWithIndex_grad,
+            ops::MaxPoolWithIndexOpGrad);
+
+REGISTER_OP_CPU_KERNEL(
+    maxPool3dWithIndex,
+    ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float>);
+REGISTER_OP_CPU_KERNEL(
+    maxPool3dWithIndex_grad,
+    ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float>)
diff --git a/paddle/operators/pool_with_index_op.cu b/paddle/operators/pool_with_index_op.cu
new file mode 100644
index 0000000000..8007fc7ccf
--- /dev/null
+++ b/paddle/operators/pool_with_index_op.cu
@@ -0,0 +1,31 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/operators/pool_with_index_op.h"
+
+namespace ops = paddle::operators;
+
+REGISTER_OP_GPU_KERNEL(
+    maxPool2dWithIndex,
+    ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float>);
+REGISTER_OP_GPU_KERNEL(
+    maxPool2dWithIndex_grad,
+    ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float>)
+
+REGISTER_OP_GPU_KERNEL(
+    maxPool3dWithIndex,
+    ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float>);
+REGISTER_OP_GPU_KERNEL(
+    maxPool3dWithIndex_grad,
+    ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float>)
diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h
new file mode 100644
index 0000000000..91abeed016
--- /dev/null
+++ b/paddle/operators/pool_with_index_op.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+
+#include "paddle/framework/eigen.h"
+#include "paddle/framework/op_registry.h"
+#include "paddle/operators/math/math_function.h"
+#include "paddle/operators/math/pooling.h"
+
+namespace paddle {
+namespace operators {
+
+using Tensor = framework::Tensor;
+
+template <typename Place, typename T>
+class MaxPoolWithIndexKernel : public framework::OpKernel {
+ public:
+  void Compute(const framework::ExecutionContext& context) const override {
+    const Tensor* in_x = context.Input<Tensor>("X");
+    Tensor* out = context.Output<Tensor>("Out");
+    Tensor* mask = context.Output<Tensor>("Mask");
+
+    bool global_pooling = context.Attr<bool>("globalPooling");
+    std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
+    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
+    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
+    if (global_pooling) {
+      for (size_t i = 0; i < ksize.size(); ++i) {
+        ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
+      }
+    }
+
+    switch (ksize.size()) {
+      case 2: {
+        paddle::operators::math::MaxPool2dWithIndexFunctor<Place, T>
+            pool2d_forward;
+        pool2d_forward(context.device_context(), *in_x, *out, *mask, ksize,
+                       strides, paddings);
+      } break;
+      case 3: {
+        paddle::operators::math::MaxPool3dWithIndexFunctor<Place, T>
+            pool3d_forward;
+        pool3d_forward(context.device_context(), *in_x, *out, *mask, ksize,
+                       strides, paddings);
+      } break;
+    }
+  }
+};
+
+template <typename Place, typename T>
+class MaxPoolWithIndexGradKernel : public framework::OpKernel {
+ public:
+  void Compute(const framework::ExecutionContext& context) const override {
+    const Tensor* mask = context.Input<Tensor>("Maks");
+    const Tensor* out_grad =
+        context.Input<Tensor>(framework::GradVarName("Out"));
+    Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
+
+    std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
+    std::vector<int> strides = context.Attr<std::vector<int>>("strides");
+    std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
+
+    if (in_x_grad) {
+      in_x_grad->mutable_data<T>(context.GetPlace());
+      auto temp = framework::EigenVector<T>::Flatten(*in_x_grad);
+      temp.device(context.GetEigenDevice<Place>()) =
+          temp.constant(static_cast<T>(0));
+
+      switch (ksize.size()) {
+        case 2: {
+          paddle::operators::math::MaxPool2dWithIndexGradFunctor<Place, T>
+              pool2d_backward;
+          pool2d_backward(context.device_context(), *in_x_grad, *out_grad,
+                          *mask, ksize, strides, paddings);
+        } break;
+        case 3: {
+          paddle::operators::math::MaxPool3dWithIndexGradFunctor<Place, T>
+              pool3d_backward;
+          pool3d_backward(context.device_context(), *in_x_grad, *out_grad,
+                          *mask, ksize, strides, paddings);
+        } break;
+      }
+    }
+  }
+};
+}  // namespace operators
+}  // namespace paddle
diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py
new file mode 100644
index 0000000000..2945c8b7a4
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_pool_max_op.py
@@ -0,0 +1,125 @@
+import unittest
+import numpy as np
+from op_test import OpTest
+
+
+def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
+
+    N, C, D, H, W = x.shape
+    if global_pool == 1:
+        ksize = [D, H, W]
+    D_out = (D - ksize[0] + 2 * paddings[0]) / strides[0] + 1
+    H_out = (H - ksize[1] + 2 * paddings[1]) / strides[1] + 1
+    W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1
+    out = np.zeros((N, C, D_out, H_out, W_out))
+    mask = np.zeros((N, C, D_out, H_out, W_out))
+    for k in xrange(D_out):
+        d_start = np.max((k * strides[0] - paddings[0], 0))
+        d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D))
+        for i in xrange(H_out):
+            h_start = np.max((i * strides[0] - paddings[0], 0))
+            h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H))
+            for j in xrange(W_out):
+                w_start = np.max((j * strides[1] - paddings[1], 0))
+                w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W))
+                x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end]
+
+                out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4))
+                # mask[:,:, k, i, j] = np.argmax(x_masked, axis=(2, 3, 4))
+    return out
+
+
+def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
+
+    N, C, H, W = x.shape
+    if global_pool == 1:
+        ksize = [H, W]
+    H_out = (H - ksize[0] + 2 * paddings[0]) / strides[0] + 1
+    W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1
+    out = np.zeros((N, C, H_out, W_out))
+    mask = np.zeros((N, C, H_out, W_out))
+    for i in xrange(H_out):
+        for j in xrange(W_out):
+            r_start = np.max((i * strides[0] - paddings[0], 0))
+            r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H))
+            c_start = np.max((j * strides[1] - paddings[1], 0))
+            c_end = np.min((j * strides[1] + ksize[1] - paddings[1], W))
+            x_masked = x[:, :, r_start:r_end, c_start:c_end]
+
+            out[:, :, i, j] = np.max(x_masked, axis=(2, 3))
+            # mask[:,:, i, j] = np.argmax(x_masked, axis=(2, 3))
+
+    return out
+
+
+class TestMaxPoolWithIndex_Op(OpTest):
+    def setUp(self):
+        self.initTestCase()
+        self.op_type = "maxPool3dWithIndex"
+        input = np.random.random(self.shape).astype("float32")
+        output = self.pool_forward_naive(input, self.ksize, self.strides,
+                                         self.paddings, self.global_pool)
+        # mask = np.zeros(output.shape)
+
+        self.attrs = {
+            'strides': self.strides,
+            'paddings': self.paddings,
+            'ksize': self.ksize,
+            'globalPooling': self.global_pool,
+        }
+
+        self.inputs = {'X': input}
+        self.outputs = {'Out': output}
+
+    def test_check_output(self):
+        self.check_output()
+
+    # def test_check_grad(self):
+    #     self.check_grad(set(['X']), ['Out'], max_relative_error=0.07)
+
+    def initTestCase(self):
+        self.global_pool = 0
+        self.pool_forward_naive = max_pool3D_forward_naive
+        self.shape = [2, 3, 7, 7, 7]
+        self.ksize = [3, 3, 3]
+        self.strides = [1, 1, 1]
+        self.paddings = [1, 1, 1]
+
+
+""""
+class TestCase1(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = 1
+        self.op_type = "maxPool3dWithIndex"
+        self.pool_forward_naive = max_pool3D_forward_naive
+        self.shape = [2, 3, 5, 5, 5]
+        self.ksize = [3, 3, 3]
+        self.strides = [1, 1, 1]
+        self.paddings = [0, 0, 0]
+
+
+class TestCase2(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = 0
+        self.op_type = "maxPool2dWithIndex"
+        self.pool_forward_naive = max_pool2D_forward_naive
+        self.shape = [2, 3, 7, 7]
+        self.ksize = [3, 3]
+        self.strides = [1, 1]
+        self.paddings = [1, 1]
+
+
+class TestCase3(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = 1
+        self.op_type = "maxPool2dWithIndex"
+        self.pool_forward_naive = max_pool2D_forward_naive
+        self.shape = [2, 3, 5, 5]
+        self.ksize = [3, 3]
+        self.strides = [1, 1]
+        self.paddings = [0, 0]
+
+
+if __name__ == '__main__':
+    unittest.main()
+"""

From 884e31a59b72856ea1a807561f01a623c1138053 Mon Sep 17 00:00:00 2001
From: Luo Tao <luotao02@baidu.com>
Date: Fri, 29 Sep 2017 15:28:25 +0800
Subject: [PATCH 02/82] add interpolation op

---
 paddle/operators/interp_op.cc                 | 107 ++++++++++++++++++
 .../v2/framework/tests/test_interp_op.py      |  28 +++++
 2 files changed, 135 insertions(+)
 create mode 100644 paddle/operators/interp_op.cc
 create mode 100644 python/paddle/v2/framework/tests/test_interp_op.py

diff --git a/paddle/operators/interp_op.cc b/paddle/operators/interp_op.cc
new file mode 100644
index 0000000000..04bcb9ade8
--- /dev/null
+++ b/paddle/operators/interp_op.cc
@@ -0,0 +1,107 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/framework/op_registry.h"
+#include "paddle/operators/net_op.h"
+
+namespace paddle {
+namespace operators {
+
+class InterpOp : public NetOp {
+ public:
+  InterpOp(const std::string &type, const framework::VariableNameMap &inputs,
+           const framework::VariableNameMap &outputs,
+           const framework::AttributeMap &attrs)
+      : NetOp(type, inputs, outputs, attrs) {
+    PADDLE_ENFORCE_NE(Input("X"), framework::kEmptyVarName,
+                      "Input(X) of InterpOp should not be null.");
+    PADDLE_ENFORCE_NE(Input("Y"), framework::kEmptyVarName,
+                      "Input(Y) of InterpOp should not be null.");
+    PADDLE_ENFORCE_NE(Input("W"), framework::kEmptyVarName,
+                      "Input(W) of InterpOp should not be null.");
+    PADDLE_ENFORCE_NE(Output("MinusOut"), framework::kEmptyVarName,
+                      "Output(MinusOut) of InterpOp should not be null.");
+    PADDLE_ENFORCE_NE(Output("MulOut"), framework::kEmptyVarName,
+                      "Output(MulOut) of InterpOp should not be null.");
+    PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName,
+                      "Output(Out) of InterpOp should not be null.");
+
+    // MinusOut = X - Y
+    auto x = Input("X");
+    auto y = Input("Y");
+    auto minus_out = Output("MinusOut");
+    AppendOp(framework::OpRegistry::CreateOp("elementwise_sub",
+                                             {{"X", {x}}, {"Y", {y}}},
+                                             {{"Out", {minus_out}}}, {}));
+
+    // MulOut = MinusOut * W = (X - Y) * W
+    auto w = Input("W");
+    auto mul_out = Output("MulOut");
+    AppendOp(framework::OpRegistry::CreateOp(
+        "elementwise_mul", {{"X", {minus_out}}, {"Y", {w}}},
+        {{"Out", {mul_out}}}, {{"axis", 0}}));
+
+    // Out = MulOut + Y = (X - Y) * W + Y = X * W + Y * (1 - W)
+    AppendOp(framework::OpRegistry::CreateOp("elementwise_add",
+                                             {{"X", {mul_out}}, {"Y", {y}}},
+                                             {{"Out", {Output("Out")}}}, {}));
+
+    CompleteAddOp(false);
+    LOG(INFO) << DebugString();
+  }
+};
+
+class InterpOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  InterpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput("X", "A 2-D Tensor, the first input of interp_op");
+    AddInput("Y", "A 2-D Tensor, the second input of interp_op");
+    AddInput("W", "A 1-D Tensor, the interpolated values");
+    AddOutput("MinusOut",
+              "A 2-D Tensor, the intermediate outputs, saving X - Y.")
+        .AsIntermediate();
+    AddOutput("MulOut",
+              "A 2-D Tensor, the intermediate outputs,"
+              "saving the mul mul of (X - Y) and W")
+        .AsIntermediate();
+    AddOutput("Out",
+              "A 2-D Tensor, the output of interp_op, same shape with X");
+    AddComment(R"DOC(
+    Linear Interpolation with two inputs, used in NEURAL TURING MACHINE.
+
+    Equation:
+      Out.row[i] = X.row[i] * W[i] + Y.row[i] * (1 - W[i])
+                 = (X.row[i] - Y.row[i]) * W[i] + Y.row[i]
+
+    Example:
+      X = [[1,2],[3,4]],
+      Y = [[2,1],[4,3]],
+      W = [0.3, 0.4]
+
+      Then, Out = [[1.7,1.3],[3.6,3.4]]
+
+      where 1.7 = 1*0.3+2*(1-0.3),
+            1.3 = 2*0.3+1*(1-0.3),
+            3.6 = 3*0.4+4*(1-0.4),
+            3.4 = 4*0.4+3*(1-0.4)
+)DOC");
+  }
+};
+
+}  // namespace operators
+}  // namespace paddle
+
+namespace ops = paddle::operators;
+REGISTER_OP_WITHOUT_GRADIENT(interp, ops::InterpOp, ops::InterpOpMaker);
diff --git a/python/paddle/v2/framework/tests/test_interp_op.py b/python/paddle/v2/framework/tests/test_interp_op.py
new file mode 100644
index 0000000000..f82dcc7f50
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_interp_op.py
@@ -0,0 +1,28 @@
+import unittest
+import numpy as np
+from op_test import OpTest
+
+
+class TestInterpOp(OpTest):
+    def setUp(self):
+        self.op_type = "interp"
+        x = np.random.random((2, 3)).astype("float32")
+        y = np.random.random((2, 3)).astype("float32")
+        w = np.random.random(2).astype("float32")
+
+        minus_out = x - y
+        mul_out = minus_out * w.reshape(2, 1)
+        out = mul_out + y
+
+        self.inputs = {'X': x, 'Y': y, 'W': w}
+        self.outputs = {'Out': out, 'MinusOut': minus_out, 'MulOut': mul_out}
+
+    def test_check_output(self):
+        self.check_output()
+
+    def test_check_grad_normal(self):
+        self.check_grad(['X', 'Y'], 'Out')
+
+
+if __name__ == "__main__":
+    unittest.main()

From a815d6abcf49d4778d0a49c852c45264bd8a684a Mon Sep 17 00:00:00 2001
From: zhouxiao-coder <zhouxiaocoder@gmail.com>
Date: Fri, 29 Sep 2017 17:29:52 +0800
Subject: [PATCH 03/82] elu: Optimize gradient calculation;Add more comments

---
 paddle/operators/activation_op.cc             | 25 ++++++++++++
 paddle/operators/activation_op.cu             |  4 ++
 paddle/operators/activation_op.h              | 40 +++++++++++++++++++
 .../v2/framework/tests/test_activation_op.py  | 20 ++++++++++
 4 files changed, 89 insertions(+)

diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc
index 1e1d3cf7f7..e83666c9f9 100644
--- a/paddle/operators/activation_op.cc
+++ b/paddle/operators/activation_op.cc
@@ -174,6 +174,25 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
   }
 };
 
+template <typename AttrType>
+class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  ELUOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput("X",
+             "Input of ELU operator, it shouldn't be empty. Input is flattened "
+             "and treated as a 1D array.");
+    AddOutput("Y", "Output of ELU operator, has same shape as the input.");
+    AddComment(
+        "ELU activation operator. It applies this element-wise computation on "
+        "the input: f(x) = max(0, x) + min(0, alpha * (exp(x) - 1))."
+        "Check .. _Link: https://arxiv.org/abs/1511.07289 for more details");
+    AddAttr<AttrType>("alpha",
+                      "alpha value in the elu formulation, default to 1.")
+        .SetDefault(static_cast<AttrType>(1.));
+  }
+};
+
 template <typename AttrType>
 class PowOpMaker : public framework::OpProtoAndCheckerMaker {
  public:
@@ -311,6 +330,12 @@ REGISTER_OP_CPU_KERNEL(soft_relu,
 REGISTER_OP_CPU_KERNEL(
     soft_relu_grad, ops::SoftReluGradKernel<paddle::platform::CPUPlace, float>);
 
+REGISTER_OP(elu, ops::ActivationOp, ops::ELUOpMaker<float>, elu_grad,
+            ops::ActivationOpGrad);
+REGISTER_OP_CPU_KERNEL(elu, ops::ELUKernel<paddle::platform::CPUPlace, float>);
+REGISTER_OP_CPU_KERNEL(elu_grad,
+                       ops::ELUGradKernel<paddle::platform::CPUPlace, float>);
+
 REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker<float>, pow_grad,
             ops::ActivationOpGrad);
 REGISTER_OP_CPU_KERNEL(pow, ops::PowKernel<paddle::platform::CPUPlace, float>);
diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu
index 56886d8b1b..48800b11ec 100644
--- a/paddle/operators/activation_op.cu
+++ b/paddle/operators/activation_op.cu
@@ -97,6 +97,10 @@ REGISTER_OP_GPU_KERNEL(soft_relu,
 REGISTER_OP_GPU_KERNEL(
     soft_relu_grad, ops::SoftReluGradKernel<paddle::platform::GPUPlace, float>);
 
+REGISTER_OP_GPU_KERNEL(elu, ops::ELUKernel<paddle::platform::GPUPlace, float>);
+REGISTER_OP_GPU_KERNEL(elu_grad,
+                       ops::ELUGradKernel<paddle::platform::GPUPlace, float>);
+
 REGISTER_OP_GPU_KERNEL(pow, ops::PowKernel<paddle::platform::GPUPlace, float>);
 REGISTER_OP_GPU_KERNEL(pow_grad,
                        ops::PowGradKernel<paddle::platform::GPUPlace, float>);
diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h
index b9f52e1af3..3428aca817 100644
--- a/paddle/operators/activation_op.h
+++ b/paddle/operators/activation_op.h
@@ -296,6 +296,46 @@ class SoftReluGradKernel : public framework::OpKernel<T> {
   }
 };
 
+template <typename Place, typename T, typename AttrType = T>
+class ELUKernel : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext& context) const override {
+    auto* X = context.Input<framework::Tensor>("X");
+    auto* Y = context.Output<framework::Tensor>("Y");
+    auto alpha = static_cast<T>(context.Attr<AttrType>("alpha"));
+    Y->mutable_data<T>(context.GetPlace());
+
+    auto x = framework::EigenVector<T>::Flatten(*X);
+    auto y = framework::EigenVector<T>::Flatten(*Y);
+    auto place = context.GetEigenDevice<Place>();
+    y.device(place) =
+        x.cwiseMax(static_cast<T>(0)) +
+        (alpha * (x.exp() - static_cast<T>(1))).cwiseMin(static_cast<T>(0));
+  }
+};
+
+template <typename Place, typename T, typename AttrType = T>
+class ELUGradKernel : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext& context) const override {
+    auto* X = context.Input<framework::Tensor>("X");
+    auto* Y = context.Input<framework::Tensor>("Y");
+    auto* dY = context.Input<framework::Tensor>(framework::GradVarName("Y"));
+    auto* dX = context.Output<framework::Tensor>(framework::GradVarName("X"));
+    auto alpha = static_cast<T>(context.Attr<AttrType>("alpha"));
+    dX->mutable_data<T>(context.GetPlace());
+
+    auto x = framework::EigenVector<T>::Flatten(*X);
+    auto y = framework::EigenVector<T>::Flatten(*Y);
+    auto dy = framework::EigenVector<T>::Flatten(*dY);
+    auto dx = framework::EigenVector<T>::Flatten(*dX);
+    auto place = context.GetEigenDevice<Place>();
+    dx.device(place) =
+        dy * (x > static_cast<T>(0)).template cast<T>() +
+        dy * (y + alpha) * (x < static_cast<T>(0)).template cast<T>();
+  }
+};
+
 template <typename Place, typename T, typename AttrType = T>
 class PowKernel : public framework::OpKernel<T> {
  public:
diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py
index c44eb84906..9ea01d43c5 100644
--- a/python/paddle/v2/framework/tests/test_activation_op.py
+++ b/python/paddle/v2/framework/tests/test_activation_op.py
@@ -144,6 +144,26 @@ class TestSoftRelu(OpTest):
         self.check_grad(['X'], 'Y', max_relative_error=0.02)
 
 
+class TestELU(OpTest):
+    def setUp(self):
+        self.op_type = "elu"
+        x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
+        alpha = 1.
+        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
+        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
+        self.inputs = {'X': x}
+        self.attrs = {'alpha': alpha}
+        self.outputs = {
+            'Y': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
+        }
+
+    def test_check_output(self):
+        self.check_output()
+
+    def test_check_grad(self):
+        self.check_grad(['X'], 'Y', max_relative_error=0.02)
+
+
 class TestReciprocal(OpTest):
     def setUp(self):
         self.op_type = "reciprocal"

From 4436ba0c56d105b0c1305a739158fdc08258f7a9 Mon Sep 17 00:00:00 2001
From: zhouxiao-coder <zhouxiaocoder@gmail.com>
Date: Fri, 29 Sep 2017 17:52:18 +0800
Subject: [PATCH 04/82] elu: Optimize gradient calculation;Add more comments

---
 paddle/operators/activation_op.cc | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc
index e83666c9f9..7d086ac5df 100644
--- a/paddle/operators/activation_op.cc
+++ b/paddle/operators/activation_op.cc
@@ -180,16 +180,18 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
   ELUOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
       : OpProtoAndCheckerMaker(proto, op_checker) {
     AddInput("X",
-             "Input of ELU operator, it shouldn't be empty. Input is flattened "
-             "and treated as a 1D array.");
-    AddOutput("Y", "Output of ELU operator, has same shape as the input.");
-    AddComment(
-        "ELU activation operator. It applies this element-wise computation on "
-        "the input: f(x) = max(0, x) + min(0, alpha * (exp(x) - 1))."
-        "Check .. _Link: https://arxiv.org/abs/1511.07289 for more details");
-    AddAttr<AttrType>("alpha",
-                      "alpha value in the elu formulation, default to 1.")
+             "(Tensor) The input of ELU operator, it shouldn't be empty. Input "
+             "is flattened and treated as a 1D array.");
+    AddOutput("Y",
+              "(Tensor) The output of ELU operator. It has the same shape as "
+              "the input.");
+    AddAttr<AttrType>(
+        "alpha", "(float, default 1.0) Alpha value in the elu formulation.")
         .SetDefault(static_cast<AttrType>(1.));
+    AddComment(R"DOC(
+        ELU activation operator. It applies this element-wise computation on
+        the input: f(x) = max(0, x) + min(0, alpha * (exp(x) - 1)).
+        Check .. _Link: https://arxiv.org/abs/1511.07289 for more details.)DOC");
   }
 };
 

From 3c66b307f7b6173a69cd4ccc9cf9f7541de964d2 Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Fri, 29 Sep 2017 19:57:02 +0800
Subject: [PATCH 05/82] Remove the pserver, trainer, evaluators and some
 useless gradientmachines when compile mobile inference library.

---
 CMakeLists.txt                                |  8 +++
 cmake/util.cmake                              | 57 ++++++++++++-------
 paddle/CMakeLists.txt                         | 35 +++++++-----
 paddle/capi/CMakeLists.txt                    |  8 +--
 paddle/gserver/CMakeLists.txt                 | 22 +++++++
 .../gradientmachines/GradientMachine.cpp      | 13 ++++-
 .../gradientmachines/GradientMachine.h        |  7 ++-
 .../gradientmachines/NeuralNetwork.cpp        | 18 ++++--
 .../gserver/gradientmachines/NeuralNetwork.h  |  3 +
 paddle/gserver/layers/Layer.cpp               |  2 +
 10 files changed, 128 insertions(+), 45 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4921226ec1..ec4e6e2e86 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -86,6 +86,14 @@ if(ANDROID OR IOS)
         "Disable MKLDNN when cross-compiling for Android and iOS" FORCE)
     set(WITH_MKLML OFF CACHE STRING
         "Disable MKLML package when cross-compiling for Android and iOS" FORCE)
+
+    if(WITH_C_API)
+      # Compile PaddlePaddle mobile inference library
+      set(MOBILE_INFERENCE ON)
+      add_definitions(-DPADDLE_MOBILE_INFERENCE)
+    endif()
+    set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling
+                    for Android and iOS" FORCE)
 endif()
 
 set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
diff --git a/cmake/util.cmake b/cmake/util.cmake
index d1aee3e170..5ebfc0945f 100644
--- a/cmake/util.cmake
+++ b/cmake/util.cmake
@@ -73,25 +73,44 @@ function(link_paddle_exe TARGET_NAME)
         generate_rdma_links()
     endif()
 
-    target_circle_link_libraries(${TARGET_NAME}
-        ARCHIVE_START
-        paddle_gserver
-        paddle_function
-        ARCHIVE_END
-        paddle_pserver
-        paddle_trainer_lib
-        paddle_network
-        paddle_math
-        paddle_utils
-        paddle_parameter
-        paddle_proto
-        paddle_cuda
-        paddle_optimizer
-        ${EXTERNAL_LIBS}
-        ${CMAKE_THREAD_LIBS_INIT}
-        ${CMAKE_DL_LIBS}
-        ${RDMA_LD_FLAGS}
-        ${RDMA_LIBS})
+    if(MOBILE_INFERENCE)
+        target_circle_link_libraries(${TARGET_NAME}
+            ARCHIVE_START
+            paddle_gserver
+            paddle_function
+            ARCHIVE_END
+            paddle_math
+            paddle_utils
+            paddle_parameter
+            paddle_proto
+            paddle_cuda
+            paddle_optimizer
+            ${EXTERNAL_LIBS}
+            ${CMAKE_THREAD_LIBS_INIT}
+            ${CMAKE_DL_LIBS}
+            ${RDMA_LD_FLAGS}
+            ${RDMA_LIBS})
+    else()
+        target_circle_link_libraries(${TARGET_NAME}
+            ARCHIVE_START
+            paddle_gserver
+            paddle_function
+            ARCHIVE_END
+            paddle_pserver
+            paddle_trainer_lib
+            paddle_network
+            paddle_math
+            paddle_utils
+            paddle_parameter
+            paddle_proto
+            paddle_cuda
+            paddle_optimizer
+            ${EXTERNAL_LIBS}
+            ${CMAKE_THREAD_LIBS_INIT}
+            ${CMAKE_DL_LIBS}
+            ${RDMA_LD_FLAGS}
+            ${RDMA_LIBS})
+    endif()
 
     if(ANDROID)
         target_link_libraries(${TARGET_NAME} log)
diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt
index b435de80a2..3eb494ae47 100644
--- a/paddle/CMakeLists.txt
+++ b/paddle/CMakeLists.txt
@@ -3,25 +3,30 @@ add_subdirectory(function)
 add_subdirectory(utils)
 add_subdirectory(testing)
 add_subdirectory(math)
-add_subdirectory(parameter)
 add_subdirectory(gserver)
-add_subdirectory(pserver)
-add_subdirectory(trainer)
 add_subdirectory(scripts)
 add_subdirectory(string)
+add_subdirectory(parameter)
 
-if(Boost_FOUND)
-  add_subdirectory(memory)
-  add_subdirectory(platform)
-  add_subdirectory(framework)
-  add_subdirectory(operators)
-  add_subdirectory(pybind)
-endif()
-
-if(WITH_C_API)
+if(MOBILE_INFERENCE)
   add_subdirectory(capi)
-endif()
+else()
+  add_subdirectory(pserver)
+  add_subdirectory(trainer)
+
+  if(WITH_C_API)
+    add_subdirectory(capi)
+  endif()
+
+  if(Boost_FOUND)
+    add_subdirectory(memory)
+    add_subdirectory(platform)
+    add_subdirectory(framework)
+    add_subdirectory(operators)
+    add_subdirectory(pybind)
+  endif()
 
-if(WITH_SWIG_PY)
-  add_subdirectory(api)
+  if(WITH_SWIG_PY)
+    add_subdirectory(api)
+  endif()
 endif()
diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt
index b9bbe58951..a19a19d719 100644
--- a/paddle/capi/CMakeLists.txt
+++ b/paddle/capi/CMakeLists.txt
@@ -37,9 +37,7 @@ set(PADDLE_CAPI_INFER_LIBS
     paddle_cuda
     paddle_function
     paddle_gserver
-    paddle_proto
-    paddle_pserver
-    paddle_network)
+    paddle_proto)
 
 cc_library(paddle_capi_whole DEPS paddle_capi ${PADDLE_CAPI_INFER_LIBS})
 
@@ -50,7 +48,9 @@ if(NOT IOS)
   add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
   set_target_properties(paddle_capi_shared	PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
   target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
-  link_paddle_exe(paddle_capi_shared)
+
+link_paddle_exe(paddle_capi_shared)
+
 endif()
 
 # install library & headers.
diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt
index 62cff9361c..cd469875df 100644
--- a/paddle/gserver/CMakeLists.txt
+++ b/paddle/gserver/CMakeLists.txt
@@ -60,6 +60,28 @@ if(NOT WITH_PYTHON)
             dataproviders/PyDataProvider.h)
 endif()
 
+if(MOBILE_INFERENCE)
+    # Remove evaluators
+    list(REMOVE_ITEM GSERVER_SOURCES
+         layers/ValidationLayer.cpp
+         evaluators/Evaluator.cpp
+         evaluators/DetectionMAPEvaluator.cpp
+         evaluators/CTCErrorEvaluator.cpp
+         evaluators/ChunkEvaluator.cpp)
+
+    # Remove useless gradientmachines
+    list(REMOVE_ITEM GSERVER_SOURCES
+         gradientmachines/MultiNetwork.cpp
+         gradientmachines/RecurrentGradientMachine.cpp
+         gradientmachines/ParallelNeuralNetwork.cpp
+         gradientmachines/GradientMachineMode.cpp
+         gradientmachines/MultiGradientMachine.cpp)
+
+    # Remove useless layers
+    list(REMOVE_ITEM GSERVER_SOURCES
+    	 layers/RecurrentLayerGroup.cpp)
+endif()
+
 if(WITH_GPU)
     cuda_add_library(paddle_gserver ${GSERVER_SOURCES})
 else()
diff --git a/paddle/gserver/gradientmachines/GradientMachine.cpp b/paddle/gserver/gradientmachines/GradientMachine.cpp
index b44e4dc202..de5faf5e1e 100644
--- a/paddle/gserver/gradientmachines/GradientMachine.cpp
+++ b/paddle/gserver/gradientmachines/GradientMachine.cpp
@@ -17,12 +17,15 @@ limitations under the License. */
 #include <fstream>
 #include "paddle/utils/Logging.h"
 
+#include "NeuralNetwork.h"
+#include "hl_gpu.h"
+
+#ifndef PADDLE_MOBILE_INFERENCE
 #include "GradientMachineMode.h"
 #include "MultiGradientMachine.h"
 #include "MultiNetwork.h"
-#include "NeuralNetwork.h"
 #include "ParallelNeuralNetwork.h"
-#include "hl_gpu.h"
+#endif
 
 namespace paddle {
 
@@ -30,13 +33,16 @@ GradientMachine* GradientMachine::create(
     const ModelConfig& config,
     int mode,
     const std::vector<ParameterType>& parameterTypes) {
+#ifndef PADDLE_MOBILE_INFERENCE
   if (auto gm = IGradientMachineMode::tryCreateGradientMachine(mode, config)) {
     return gm;
   }
   if (FLAGS_trainer_count > 1) {
     return new MultiGradientMachine(config, FLAGS_use_gpu);
   }
+#endif
   if (FLAGS_trainer_count == 1) {  // single
+#ifndef PADDLE_MOBILE_INFERENCE
     NeuralNetwork* nn;
     if (config.type() == "multi_nn") {
       /* multi submodel calculate, thread(s) will be initialized inside */
@@ -48,6 +54,9 @@ GradientMachine* GradientMachine::create(
       /* single thread calculate */
       nn = NeuralNetwork::create(config);
     }
+#else
+    NeuralNetwork* nn = NeuralNetwork::create(config);
+#endif
     ParamInitCallback testParamInitCb = [](int paramId, Parameter* para) {
       para->enableType(PARAMETER_VALUE);
     };
diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/gserver/gradientmachines/GradientMachine.h
index f9c82a2bef..ebfe0573cf 100644
--- a/paddle/gserver/gradientmachines/GradientMachine.h
+++ b/paddle/gserver/gradientmachines/GradientMachine.h
@@ -20,13 +20,16 @@ limitations under the License. */
 #include "ModelConfig.pb.h"
 #include "TrainerConfig.pb.h"
 #include "paddle/gserver/dataproviders/DataProvider.h"
-#include "paddle/gserver/evaluators/Evaluator.h"
 #include "paddle/gserver/layers/Layer.h"
 #include "paddle/math/Matrix.h"
 #include "paddle/parameter/Parameter.h"
 #include "paddle/parameter/ParameterUpdaterBase.h"
 #include "paddle/utils/Thread.h"
 
+#ifndef PADDLE_MOBILE_INFERENCE
+#include "paddle/gserver/evaluators/Evaluator.h"
+#endif
+
 namespace paddle {
 /**
  * @brief A gradient machine is capable of calculating some outputs given
@@ -147,6 +150,7 @@ public:
 
   virtual void onPassEnd() = 0;
 
+#ifndef PADDLE_MOBILE_INFERENCE
   /**
    * Create an evaluator which can be used for eval()
    */
@@ -156,6 +160,7 @@ public:
    * evaluate using the given evaluator
    */
   virtual void eval(Evaluator* evaluator) const = 0;
+#endif
 
   std::vector<ParameterPtr>& getParameters() { return parameters_; }
 
diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp
index 26cff3e677..dcf0acb5a2 100644
--- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp
+++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp
@@ -14,15 +14,17 @@ limitations under the License. */
 
 #include "paddle/utils/Util.h"
 
+#include "NeuralNetwork.h"
+#include "hl_gpu.h"
+#include "paddle/gserver/layers/AgentLayer.h"
 #include "paddle/utils/CustomStackTrace.h"
 #include "paddle/utils/Logging.h"
+#include "paddle/utils/Stat.h"
 
+#ifndef PADDLE_MOBILE_INFERENCE
 #include "MultiNetwork.h"
-#include "NeuralNetwork.h"
 #include "RecurrentGradientMachine.h"
-#include "hl_gpu.h"
-#include "paddle/gserver/layers/AgentLayer.h"
-#include "paddle/utils/Stat.h"
+#endif
 
 namespace paddle {
 void parameterInitNN(int paramId,
@@ -54,6 +56,7 @@ void parameterInitNN(int paramId,
 }
 
 NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
+#ifndef PADDLE_MOBILE_INFERENCE
   if (config.type() == "recurrent_nn") {
     return newNeuralNetwork("root");
   } else if (config.type() == "multi_nn") {
@@ -61,6 +64,9 @@ NeuralNetwork* NeuralNetwork::create(const ModelConfig& config) {
   } else {
     return newNeuralNetwork();
   }
+#else
+  return new NeuralNetwork();
+#endif
 }
 
 std::map<std::string, bool> NeuralNetwork::dllInitMap;
@@ -304,6 +310,8 @@ void NeuralNetwork::onPassEnd() {
   }
 }
 
+#ifndef PADDLE_MOBILE_INFERENCE
+
 class CombinedEvaluator : public Evaluator {
 public:
   void addEvaluator(std::unique_ptr<Evaluator>&& evaluator) {
@@ -466,6 +474,8 @@ Evaluator* NeuralNetwork::makeEvaluator() const {
 
 void NeuralNetwork::eval(Evaluator* evaluator) const { evaluator->eval(*this); }
 
+#endif
+
 void NeuralNetwork::setOutputGrad(const std::vector<Argument>& args) {
   CHECK_GE(outputLayers_.size(), args.size());
   for (size_t i = 0; i < args.size(); ++i) {
diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/gserver/gradientmachines/NeuralNetwork.h
index 12810f6425..56a1ec7846 100644
--- a/paddle/gserver/gradientmachines/NeuralNetwork.h
+++ b/paddle/gserver/gradientmachines/NeuralNetwork.h
@@ -97,9 +97,12 @@ public:
 
   virtual void onPassEnd();
 
+#ifndef PADDLE_MOBILE_INFERENCE
   virtual Evaluator* makeEvaluator() const;
 
   virtual void eval(Evaluator* evaluator) const;
+#endif
+
   virtual void resetState();
   virtual void setOutputGrad(const std::vector<Argument>& args);
 
diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp
index e95f42c863..075e8166ef 100644
--- a/paddle/gserver/layers/Layer.cpp
+++ b/paddle/gserver/layers/Layer.cpp
@@ -103,10 +103,12 @@ LayerPtr Layer::create(const LayerConfig& config) {
     return LayerPtr(new MultiClassCrossEntropy(config));
   else if (type == "rank-cost")
     return LayerPtr(new RankingCost(config));
+#ifndef PADDLE_MOBILE_INFERENCE
   else if (type == "auc-validation")
     return LayerPtr(new AucValidation(config));
   else if (type == "pnpair-validation")
     return LayerPtr(new PnpairValidation(config));
+#endif
 
   return LayerPtr(registrar_.createByType(config.type(), config));
 }

From bb07120b64528ba37de75c01ec2d1d71a2e9cb03 Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Fri, 29 Sep 2017 20:16:04 +0800
Subject: [PATCH 06/82] Remove dataproviders.

---
 paddle/gserver/CMakeLists.txt | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt
index cd469875df..5f39167afc 100644
--- a/paddle/gserver/CMakeLists.txt
+++ b/paddle/gserver/CMakeLists.txt
@@ -69,6 +69,14 @@ if(MOBILE_INFERENCE)
          evaluators/CTCErrorEvaluator.cpp
          evaluators/ChunkEvaluator.cpp)
 
+    # Remove dataproviders
+    list(REMOVE_ITEM GSERVER_SOURCES
+         dataproviders/DataProvider.cpp
+         dataproviders/MultiDataProvider.cpp
+         dataproviders/ProtoDataProvider.cpp
+         dataproviders/PyDataProvider2.cpp
+         dataproviders/PyDataProvider.cpp)
+
     # Remove useless gradientmachines
     list(REMOVE_ITEM GSERVER_SOURCES
          gradientmachines/MultiNetwork.cpp

From 33299ef972302c310cc2b117f4cb58377daa6bd1 Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Fri, 29 Sep 2017 20:39:36 +0800
Subject: [PATCH 07/82] Remove cuda.

---
 cmake/util.cmake | 1 -
 1 file changed, 1 deletion(-)

diff --git a/cmake/util.cmake b/cmake/util.cmake
index 5ebfc0945f..45a8d66120 100644
--- a/cmake/util.cmake
+++ b/cmake/util.cmake
@@ -83,7 +83,6 @@ function(link_paddle_exe TARGET_NAME)
             paddle_utils
             paddle_parameter
             paddle_proto
-            paddle_cuda
             paddle_optimizer
             ${EXTERNAL_LIBS}
             ${CMAKE_THREAD_LIBS_INIT}

From ea4672bea0bdef1e73f18da8802cd8a467739299 Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Fri, 29 Sep 2017 20:47:51 +0800
Subject: [PATCH 08/82] Remove optimizer.

---
 CMakeLists.txt   | 8 +++++---
 cmake/util.cmake | 1 -
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index ec4e6e2e86..70b35154aa 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -168,9 +168,11 @@ endif(USE_NNPACK)
 
 add_subdirectory(proto)
 
-# "add_subdirectory(go)" should be placed after the following loine,
-# because it depends on paddle/optimizer.
-add_subdirectory(paddle/optimizer)
+if(NOT MOBILE_INFERENCE)
+    # "add_subdirectory(go)" should be placed after the following loine,
+    # because it depends on paddle/optimizer.
+    add_subdirectory(paddle/optimizer)
+endif()
 
 # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be
 # placed after this block, because they depends on it.
diff --git a/cmake/util.cmake b/cmake/util.cmake
index 45a8d66120..2ab1e8c8e4 100644
--- a/cmake/util.cmake
+++ b/cmake/util.cmake
@@ -83,7 +83,6 @@ function(link_paddle_exe TARGET_NAME)
             paddle_utils
             paddle_parameter
             paddle_proto
-            paddle_optimizer
             ${EXTERNAL_LIBS}
             ${CMAKE_THREAD_LIBS_INIT}
             ${CMAKE_DL_LIBS}

From d94dd75e3b932fb7e792d6d1b56701ac2d76bd16 Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Fri, 29 Sep 2017 21:19:26 +0800
Subject: [PATCH 09/82] Remove string, scripts and retain cuda.

---
 cmake/util.cmake      | 1 +
 paddle/CMakeLists.txt | 6 +++---
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/cmake/util.cmake b/cmake/util.cmake
index 2ab1e8c8e4..117ab7f49c 100644
--- a/cmake/util.cmake
+++ b/cmake/util.cmake
@@ -83,6 +83,7 @@ function(link_paddle_exe TARGET_NAME)
             paddle_utils
             paddle_parameter
             paddle_proto
+            paddle_cuda
             ${EXTERNAL_LIBS}
             ${CMAKE_THREAD_LIBS_INIT}
             ${CMAKE_DL_LIBS}
diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt
index 3eb494ae47..7d2becbdd7 100644
--- a/paddle/CMakeLists.txt
+++ b/paddle/CMakeLists.txt
@@ -1,18 +1,18 @@
 add_subdirectory(cuda)
 add_subdirectory(function)
 add_subdirectory(utils)
-add_subdirectory(testing)
 add_subdirectory(math)
 add_subdirectory(gserver)
-add_subdirectory(scripts)
-add_subdirectory(string)
 add_subdirectory(parameter)
+add_subdirectory(testing)
 
 if(MOBILE_INFERENCE)
   add_subdirectory(capi)
 else()
   add_subdirectory(pserver)
   add_subdirectory(trainer)
+  add_subdirectory(string)
+  add_subdirectory(scripts)
 
   if(WITH_C_API)
     add_subdirectory(capi)

From bee95fc8917e09f61ba46586a94d2b9003cddf13 Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Fri, 29 Sep 2017 11:45:04 +0800
Subject: [PATCH 10/82] fix code format and some bug

---
 paddle/operators/math/pooling.cc              |  20 +--
 paddle/operators/math/pooling.cu              | 147 ++++++++++--------
 paddle/operators/math/pooling.h               |   1 -
 paddle/operators/pool_with_index_op.cc        |  71 +++++----
 paddle/operators/pool_with_index_op.h         |  10 +-
 .../v2/framework/tests/test_pool_max_op.py    |  52 +++++--
 6 files changed, 180 insertions(+), 121 deletions(-)

diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc
index 0e4d9007a6..da0e8ff3d2 100644
--- a/paddle/operators/math/pooling.cc
+++ b/paddle/operators/math/pooling.cc
@@ -26,7 +26,6 @@ class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
                   framework::Tensor& mask, std::vector<int>& ksize,
                   std::vector<int>& strides, std::vector<int>& paddings) {
     const int batch_size = input.dims()[0];
-
     const int input_height = input.dims()[2];
     const int input_width = input.dims()[3];
     const int output_channels = output.dims()[1];
@@ -112,11 +111,11 @@ class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T> {
             input_grad_data[input_idx] += output_grad_data[output_idx];
           }
         }
+        // offset
+        input_grad_data += input_stride;
+        output_grad_data += output_stride;
+        mask_data += output_stride;
       }
-      // offset
-      input_grad_data += input_stride;
-      output_grad_data += output_stride;
-      mask_data += output_stride;
     }
   }
 };
@@ -152,6 +151,7 @@ class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
     const int padding_width = paddings[2];
     const int input_stride = input_depth * input_height * input_width;
     const int output_stride = output_depth * output_height * output_width;
+
     const T* input_data = input.data<T>();
     T* output_data = output.mutable_data<T>(context.GetPlace());
     T* mask_data = mask.mutable_data<T>(context.GetPlace());
@@ -170,17 +170,17 @@ class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
               int wstart = pw * stride_width - padding_width;
               int wend = std::min(wstart + ksize_width, input_width);
               wstart = std::max(wstart, 0);
+
               int output_idx = (pd * output_height + ph) * output_width + pw;
               T ele = static_cast<T>(-FLT_MAX);
               int index = -1;
               for (int d = dstart; d < dend; ++d) {
                 for (int h = hstart; h < hend; ++h) {
                   for (int w = wstart; w < wend; ++w) {
-                    if (ele <
-                        input_data[(d * input_height + h) * input_width + w]) {
-                      index = (d * input_height + h) * input_width + w;
-                      ele =
-                          input_data[(d * input_height + h) * input_width + w];
+                    int input_idx = (d * input_height + h) * input_width + w;
+                    if (ele < input_data[input_idx]) {
+                      index = input_idx;
+                      ele = input_data[input_idx];
                     }
                   }
                 }
diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu
index f32e6a26d0..5321ed2163 100644
--- a/paddle/operators/math/pooling.cu
+++ b/paddle/operators/math/pooling.cu
@@ -20,14 +20,14 @@ namespace operators {
 namespace math {
 
 template <typename T>
-__global__ void KernelMaxPool2dWithIdxForward(
+__global__ void KernelMaxPool2dWithIdx(
     const int nthreads, const T* input_data, T* output_data, T* mask_data,
     const int channels, const int input_height, const int input_width,
     const int output_height, const int output_width, const int ksize_height,
     const int ksize_width, const int stride_height, const int stride_width,
     const int padding_height, const int padding_width) {
-  int index = blockIdx.x * blockDim.x + threadIdx.x;
-  if (index < nthreads) {
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+       index += blockDim.x * gridDim.x) {
     int pw = index % output_width;
     int ph = (index / output_width) % output_height;
     int c = (index / output_width / output_height) % channels;
@@ -43,51 +43,58 @@ __global__ void KernelMaxPool2dWithIdxForward(
 
     input_data += (batch_idx * channels + c) * input_height * input_width;
     T ele = -FLT_MAX;
-    int index = -1;
+    int max_index = -1;
     for (int h = hstart; h < hend; ++h) {
       for (int w = wstart; w < wend; ++w) {
-        if (ele < input_data[h * input_width + w]) {
-          index = h * input_width + w;
-          ele = input_data[h * input_width + w];
+        int input_index = h * input_width + w;
+        if (ele < input_data[input_index]) {
+          max_index = input_index;
+          ele = input_data[input_index];
         }
       }
     }
     output_data[index] = ele;
-    mask_data[index] = index;
+    mask_data[index] = max_index;
   }
 }
 
 template <typename T>
-__global__ void KernelMaxPool2DWithIdxBackward(
+__global__ void KernelMaxPool2DWithIdxGrad(
     const int nthreads, T* input_grad, const T* output_grad, const T* mask_data,
     const int channels, const int input_height, const int input_width,
     const int output_height, const int output_width, const int ksize_height,
     const int ksize_width, const int stride_height, const int stride_width,
     const int padding_height, const int padding_width) {
-  int index = blockIdx.x * blockDim.x + threadIdx.x;
-  if (index < nthreads) {
-    int offsetW = index % input_width + padding_width;
-    int offsetH = (index / input_width) % input_height + padding_height;
-    int offsetC = (index / input_width / input_height) % channels;
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+       index += blockDim.x * gridDim.x) {
+    int w_offset = index % input_width;
+    int h_offset = (index / input_width) % input_height;
+    int c_offset = (index / input_width / input_height) % channels;
     int batch_idx = index / input_width / input_height / channels;
 
-    int phstart = (offsetH < ksize_height)
-                      ? 0
-                      : (offsetH - ksize_height) / stride_height + 1;
-    int pwstart = (offsetW < ksize_width)
-                      ? 0
-                      : (offsetW - ksize_width) / stride_width + 1;
-    int phend = min(offsetH / stride_height + 1, output_height);
-    int pwend = min(offsetW / stride_width + 1, output_width);
+    int ph_start =
+        (h_offset + padding_height < ksize_height)
+            ? 0
+            : (h_offset + padding_height - ksize_height) / stride_height + 1;
+    int pw_start =
+        (w_offset + padding_width < ksize_width)
+            ? 0
+            : (w_offset + padding_width - ksize_width) / stride_width + 1;
+    int ph_end =
+        min((h_offset + padding_height) / stride_height + 1, output_height);
+    int pw_end =
+        min((w_offset + padding_width) / stride_width + 1, output_width);
+
     T gradient = 0;
+    int input_current_featuremap_idx = h_offset * input_width + w_offset;
     int output_idx =
-        (batch_idx * channels + offsetC) * output_height * output_width;
+        (batch_idx * channels + c_offset) * output_height * output_width;
+
     mask_data += output_idx;
     output_grad += output_idx;
-    for (int ph = phstart; ph < phend; ++ph) {
-      for (int pw = pwstart; pw < pwend; ++pw) {
-        if ((offsetH * input_width + offsetW) ==
-            mask_data[ph * output_width + pw])
+    for (int ph = ph_start; ph < ph_end; ++ph) {
+      for (int pw = pw_start; pw < pw_end; ++pw) {
+        if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
           gradient += output_grad[ph * output_width + pw];
       }
     }
@@ -125,7 +132,7 @@ class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
     dim3 threads(1024, 1);
     dim3 grid(blocks, 1);
 
-    KernelMaxPool2dWithIdxForward<
+    KernelMaxPool2dWithIdx<
         T><<<grid, threads, 0,
              reinterpret_cast<const platform::CUDADeviceContext&>(context)
                  .stream()>>>(nthreads, input_data, output_data, mask_data,
@@ -167,7 +174,7 @@ class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
     dim3 threads(1024, 1);
     dim3 grid(blocks, 1);
 
-    KernelMaxPool2DWithIdxBackward<
+    KernelMaxPool2DWithIdxGrad<
         T><<<grid, threads, 0,
              reinterpret_cast<const platform::CUDADeviceContext&>(context)
                  .stream()>>>(nthreads, input_grad_data, output_grad_data,
@@ -184,7 +191,7 @@ template class MaxPool2dWithIndexFunctor<platform::GPUPlace, double>;
 template class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, double>;
 
 template <typename T>
-__global__ void KernelMaxPool3DWithIdxForward(
+__global__ void KernelMaxPool3DWithIdx(
     const int nthreads, const T* input_data, T* output_data, T* mask_data,
     const int channels, const int input_depth, const int input_height,
     const int input_width, const int output_depth, const int output_height,
@@ -200,6 +207,7 @@ __global__ void KernelMaxPool3DWithIdxForward(
     int c = (index / output_width / output_height / output_depth) % channels;
     int batch_idx =
         index / output_width / output_height / output_depth / channels;
+
     int dstart = pd * stride_depth - padding_depth;
     int hstart = ph * stride_height - padding_height;
     int wstart = pw * stride_width - padding_width;
@@ -209,8 +217,9 @@ __global__ void KernelMaxPool3DWithIdxForward(
     dstart = max(dstart, 0);
     hstart = max(hstart, 0);
     wstart = max(wstart, 0);
+
     T ele = -FLT_MAX;
-    int index = -1;
+    int max_index = -1;
     input_data +=
         (batch_idx * channels + c) * input_depth * input_height * input_width;
 
@@ -218,19 +227,19 @@ __global__ void KernelMaxPool3DWithIdxForward(
       for (int h = hstart; h < hend; ++h) {
         for (int w = wstart; w < wend; ++w) {
           if (ele < input_data[(d * input_height + h) * input_width + w]) {
-            index = (d * input_height + h) * input_width + w;
-            ele = input_data[(d * input_height + h) * input_width + w];
+            max_index = (d * input_height + h) * input_width + w;
+            ele = input_data[max_index];
           }
         }
       }
     }
     output_data[index] = ele;
-    mask_data[index] = index;
+    mask_data[index] = max_index;
   }
 }
 
 template <typename T>
-__global__ void KernelMaxPool3DWithIdxBackward(
+__global__ void KernelMaxPool3DWithIdxGrad(
     const int nthreads, T* input_grad, const T* output_grad, const T* mask,
     const int channels, const int input_depth, const int input_height,
     const int input_width, const int output_depth, const int output_height,
@@ -240,37 +249,45 @@ __global__ void KernelMaxPool3DWithIdxBackward(
     const int padding_width) {
   for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
        index += blockDim.x * gridDim.x) {
-    int offsetW = index % input_width + padding_width;
-    int offsetH = (index / input_width) % input_height + padding_height;
-    int offsetD =
-        (index / input_width / input_height) % input_depth + padding_depth;
-    int offsetC = (index / input_width / input_height / input_depth) % channels;
+    int w_offset = index % input_width;
+    int h_offset = (index / input_width) % input_height;
+    int d_offset = (index / input_width / input_height) % input_depth;
+    int c_offset =
+        (index / input_width / input_height / input_depth) % channels;
     int batch_idx = index / input_width / input_height / input_depth / channels;
 
-    int pdstart = (offsetD < ksize_depth)
-                      ? 0
-                      : (offsetD - ksize_depth) / stride_depth + 1;
-    int phstart = (offsetH < ksize_height)
-                      ? 0
-                      : (offsetH - ksize_height) / stride_height + 1;
-    int pwstart = (offsetW < ksize_width)
-                      ? 0
-                      : (offsetW - ksize_width) / stride_width + 1;
-    int pdend = min((offsetD) / stride_depth + 1, output_depth);
-    int phend = min((offsetH) / stride_height + 1, output_height);
-    int pwend = min((offsetW) / stride_width + 1, output_width);
+    int pd_start =
+        (d_offset + padding_depth < ksize_depth)
+            ? 0
+            : (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
+    int ph_start =
+        (h_offset + padding_height < ksize_height)
+            ? 0
+            : (h_offset + padding_height - ksize_height) / stride_height + 1;
+    int pw_start =
+        (w_offset + padding_width < ksize_width)
+            ? 0
+            : (w_offset + padding_width - ksize_width) / stride_width + 1;
+    int pd_end =
+        min((d_offset + padding_depth) / stride_depth + 1, output_depth);
+    int ph_end =
+        min((h_offset + padding_height) / stride_height + 1, output_height);
+    int pw_end =
+        min((w_offset + padding_width) / stride_width + 1, output_width);
 
     T gradient = 0;
-    int output_idx = (batch_idx * channels + offsetC) * output_depth *
+    int input_current_feature_map_idx =
+        (d_offset * input_height + h_offset) * input_width + w_offset;
+    int output_idx = (batch_idx * channels + c_offset) * output_depth *
                      output_height * output_width;
     mask += output_idx;
     output_grad += output_idx;
 
-    for (int pd = pdstart; pd < pdend; ++pd) {
-      for (int ph = phstart; ph < phend; ++ph) {
-        for (int pw = pwstart; pw < pwend; ++pw) {
-          if (((offsetD * input_height + offsetH) * input_width + offsetW) ==
-              mask[(pd * output_height + ph) * output_width + pw])
+    for (int pd = pd_start; pd < pd_end; ++pd) {
+      for (int ph = ph_start; ph < ph_end; ++ph) {
+        for (int pw = pw_start; pw < pw_end; ++pw) {
+          if (mask[(pd * output_height + ph) * output_width + pw] ==
+              input_current_feature_map_idx)
             gradient +=
                 output_grad[(pd * output_height + ph) * output_width + pw];
         }
@@ -308,7 +325,7 @@ class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
 
     const T* input_data = input.data<T>();
     T* output_data = output.mutable_data<T>(context.GetPlace());
-    T* mask_data = output.mutable_data<T>(context.GetPlace());
+    T* mask_data = mask.mutable_data<T>(context.GetPlace());
 
     int nthreads = batch_size * output_channels * output_depth * output_height *
                    output_width;
@@ -316,7 +333,7 @@ class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
     dim3 threads(1024, 1);
     dim3 grid(blocks, 1);
 
-    KernelMaxPool3DWithIdxForward<
+    KernelMaxPool3DWithIdx<
         T><<<grid, threads, 0,
              reinterpret_cast<const platform::CUDADeviceContext&>(context)
                  .stream()>>>(
@@ -341,10 +358,10 @@ class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
     const int input_depth = input_grad.dims()[2];
     const int input_height = input_grad.dims()[3];
     const int input_width = input_grad.dims()[4];
-    const int output_channels = input_grad.dims()[1];
-    const int output_depth = input_grad.dims()[2];
-    const int output_height = input_grad.dims()[3];
-    const int output_width = input_grad.dims()[4];
+    const int output_channels = output_grad.dims()[1];
+    const int output_depth = output_grad.dims()[2];
+    const int output_height = output_grad.dims()[3];
+    const int output_width = output_grad.dims()[4];
     const int ksize_depth = ksize[0];
     const int ksize_height = ksize[1];
     const int ksize_width = ksize[2];
@@ -365,7 +382,7 @@ class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
     dim3 threads(1024, 1);
     dim3 grid(blocks, 1);
 
-    KernelMaxPool3DWithIdxBackward<
+    KernelMaxPool3DWithIdxGrad<
         T><<<grid, threads, 0,
              reinterpret_cast<const platform::CUDADeviceContext&>(context)
                  .stream()>>>(
diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h
index 3a05cd98fe..308a9341b6 100644
--- a/paddle/operators/math/pooling.h
+++ b/paddle/operators/math/pooling.h
@@ -23,7 +23,6 @@ namespace operators {
 namespace math {
 //////////////////////
 #define FLT_MAX __FLT_MAX__
-/////////////////////
 
 template <typename Place, typename T>
 class MaxPool2dWithIndexFunctor {
diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc
index d7a07a403d..c51145b923 100644
--- a/paddle/operators/pool_with_index_op.cc
+++ b/paddle/operators/pool_with_index_op.cc
@@ -76,8 +76,8 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
 
  protected:
   void InferShape(framework::InferShapeContextBase *ctx) const override {
-    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("X")),
-                   "X(Input) of MaxPoolWithIndexOpGrad should not be null.");
+    PADDLE_ENFORCE(ctx->HasInput("X"),
+                   "X(Input) of Pooling should not be null.");
     PADDLE_ENFORCE(
         ctx->HasOutput(framework::GradVarName("X")),
         "X@GRAD(Input@GRAD) of MaxPoolWithIndexOpGrad should not be null.");
@@ -97,28 +97,37 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
         "number of channels, H and W is the height and width of image.");
     AddOutput("Out",
               "The output tensor of pooling operator."
-              "The format of output tensor is also NCHW.");
+              "The format of output tensor is also NCHW."
+              "Where N is batch size, C is "
+              "the number of channels, H and W is the height and "
+              "width of image.");
     AddOutput("Mask",
               "The Mask tensor of pooling operator."
-              "The format of output tensor is also NCHW.");
+              "The format of output tensor is also NCHW."
+              "Where N is batch size, C is the number of channels, H and W "
+              "is the height and width of image."
+              "The value in it is the index in current feature map");
 
     AddAttr<std::vector<int>>(
-        "ksize", "pooling size(height, width) of pooling operator.");
+        "ksize",
+        "Pooling size(height, width) of pooling operator."
+        "If globalPooling = true, ksize is ignored and need not be "
+        "specified.");  // TODO(Add checker)
     AddAttr<bool>(
         "globalPooling",
-        "whether to use the globalPooling."
-        "int constant equal to false or true"
-        "default false"
+        "Whether to use the globalPooling."
+        "Bool constant equal to false or true."
+        "Default false."
         "If globalPooling = true, ksize is ignored and need not be specified.")
         .SetDefault(false);
     AddAttr<std::vector<int>>("strides",
-                              "strides(height, width) of pooling operator."
-                              "default {1,1}")
-        .SetDefault({1, 1});
+                              "Strides(height, width) of pooling operator."
+                              "Default {1,1}.")
+        .SetDefault({1, 1});  // TODO(Add checker)
     AddAttr<std::vector<int>>("paddings",
-                              "paddings(height, width) of pooling operator."
-                              "default {0,0}")
-        .SetDefault({0, 0});
+                              "Paddings(height, width) of pooling operator."
+                              "Default {0,0}.")
+        .SetDefault({0, 0});  // TODO(Add checker)
 
     AddComment(R"DOC(
 The maxPooling2d with index operation calculates the output and the mask based on
@@ -140,30 +149,40 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
         "image.");
     AddOutput("Out",
               "The output tensor of pooling operator."
-              "The format of output tensor is also NCDHW.");
+              "The format of output tensor is also NCDHW."
+              "Where N is batch size, C is "
+              "the number of channels, D, H and W is the depth, height and "
+              "width of image.");
     AddOutput("Mask",
               "The Mask tensor of pooling operator."
-              "The format of output tensor is also NCDHW.");
+              "The format of output tensor is also NCDHW."
+              "Where N is batch size, C is the number of channels, D, H and W "
+              "is the depth, height and width of image."
+              "The value in it is the index in current feature map");
 
     AddAttr<std::vector<int>>(
-        "ksize", "pooling size(depth, height, width) of pooling operator.");
+        "ksize",
+        "Pooling size(depth, height, width) of pooling operator."
+        "If globalPooling = true, ksize is ignored and need not be "
+        "specified.");  // TODO(Add checker)
     AddAttr<bool>(
         "globalPooling",
-        "whether to use the globalPooling."
-        "int constant equal to false or true"
-        "default false"
+        "Whether to use the globalPooling."
+        "Bool constant equal to false or true."
+        "Default false."
         "If globalPooling = true, ksize is ignored and need not be specified.")
         .SetDefault(false);
     AddAttr<std::vector<int>>(
         "strides",
-        "strides(depth, height, width) of pooling operator."
-        "default {1,1,1}")
-        .SetDefault({1, 1, 1});
+        "Strides(depth, height, width) of pooling operator."
+        "Default {1,1,1}.")
+        .SetDefault({1, 1, 1});  // TODO(Add checker)
     AddAttr<std::vector<int>>(
         "paddings",
-        "paddings(depth, height, width) of pooling operator."
-        "default {0,0,0}")
-        .SetDefault({0, 0, 0});
+        "Paddings(depth, height, width) of pooling operator."
+        "Default {0,0,0}.")
+        .SetDefault({0, 0, 0});  // TODO(Add checker)
+
     AddComment(R"DOC(
 The maxpooling3d with index operation calculates the output and the mask based on
 the input and ksize, strides, paddings parameters.
diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h
index 91abeed016..5fe2f5df93 100644
--- a/paddle/operators/pool_with_index_op.h
+++ b/paddle/operators/pool_with_index_op.h
@@ -32,11 +32,10 @@ class MaxPoolWithIndexKernel : public framework::OpKernel {
     Tensor* out = context.Output<Tensor>("Out");
     Tensor* mask = context.Output<Tensor>("Mask");
 
-    bool global_pooling = context.Attr<bool>("globalPooling");
     std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
     std::vector<int> strides = context.Attr<std::vector<int>>("strides");
     std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
-    if (global_pooling) {
+    if (context.Attr<bool>("globalPooling")) {
       for (size_t i = 0; i < ksize.size(); ++i) {
         ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
       }
@@ -63,7 +62,7 @@ template <typename Place, typename T>
 class MaxPoolWithIndexGradKernel : public framework::OpKernel {
  public:
   void Compute(const framework::ExecutionContext& context) const override {
-    const Tensor* mask = context.Input<Tensor>("Maks");
+    const Tensor* mask = context.Input<Tensor>("Mask");
     const Tensor* out_grad =
         context.Input<Tensor>(framework::GradVarName("Out"));
     Tensor* in_x_grad = context.Output<Tensor>(framework::GradVarName("X"));
@@ -71,6 +70,11 @@ class MaxPoolWithIndexGradKernel : public framework::OpKernel {
     std::vector<int> ksize = context.Attr<std::vector<int>>("ksize");
     std::vector<int> strides = context.Attr<std::vector<int>>("strides");
     std::vector<int> paddings = context.Attr<std::vector<int>>("paddings");
+    if (context.Attr<bool>("globalPooling")) {
+      for (size_t i = 0; i < ksize.size(); ++i) {
+        ksize[i] = static_cast<int>(in_x_grad->dims()[i + 2]);
+      }
+    }
 
     if (in_x_grad) {
       in_x_grad->mutable_data<T>(context.GetPlace());
diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py
index 2945c8b7a4..ffc345198d 100644
--- a/python/paddle/v2/framework/tests/test_pool_max_op.py
+++ b/python/paddle/v2/framework/tests/test_pool_max_op.py
@@ -3,7 +3,11 @@ import numpy as np
 from op_test import OpTest
 
 
-def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
+def max_pool3D_forward_naive(x,
+                             ksize,
+                             strides,
+                             paddings=[0, 0, 0],
+                             global_pool=0):
 
     N, C, D, H, W = x.shape
     if global_pool == 1:
@@ -25,8 +29,19 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
                 x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end]
 
                 out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4))
-                # mask[:,:, k, i, j] = np.argmax(x_masked, axis=(2, 3, 4))
-    return out
+
+                for n in xrange(N):
+                    for c in xrange(C):
+                        arr = x_masked[n, c, :, :, :]
+                        index = np.where(arr == np.max(arr))
+                        sub_deep = index[0][0]
+                        sub_row = index[1][0]
+                        sub_col = index[2][0]
+                        index = ((d_start + sub_deep) * H +
+                                 (h_start + sub_row)) * W + w_start + sub_col
+                        mask[n, c, k, i, j] = index
+
+    return out, mask
 
 
 def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
@@ -47,19 +62,25 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings=[0, 0], global_pool=0):
             x_masked = x[:, :, r_start:r_end, c_start:c_end]
 
             out[:, :, i, j] = np.max(x_masked, axis=(2, 3))
-            # mask[:,:, i, j] = np.argmax(x_masked, axis=(2, 3))
 
-    return out
+            for n in xrange(N):
+                for c in xrange(C):
+                    arr = x_masked[n, c, :, :]
+                    index = np.where(arr == np.max(arr))
+                    sub_row = index[0][0]
+                    sub_col = index[1][0]
+                    index = (r_start + sub_row) * W + c_start + sub_col
+                    mask[n, c, i, j] = index
+
+    return out, mask
 
 
 class TestMaxPoolWithIndex_Op(OpTest):
     def setUp(self):
         self.initTestCase()
-        self.op_type = "maxPool3dWithIndex"
         input = np.random.random(self.shape).astype("float32")
-        output = self.pool_forward_naive(input, self.ksize, self.strides,
-                                         self.paddings, self.global_pool)
-        # mask = np.zeros(output.shape)
+        output, mask = self.pool_forward_naive(input, self.ksize, self.strides,
+                                               self.paddings, self.global_pool)
 
         self.attrs = {
             'strides': self.strides,
@@ -69,7 +90,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
         }
 
         self.inputs = {'X': input}
-        self.outputs = {'Out': output}
+        self.outputs = {'Out': output, "Mask": mask}
 
     def test_check_output(self):
         self.check_output()
@@ -78,7 +99,8 @@ class TestMaxPoolWithIndex_Op(OpTest):
     #     self.check_grad(set(['X']), ['Out'], max_relative_error=0.07)
 
     def initTestCase(self):
-        self.global_pool = 0
+        self.global_pool = False
+        self.op_type = "maxPool3dWithIndex"
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 7, 7, 7]
         self.ksize = [3, 3, 3]
@@ -86,10 +108,9 @@ class TestMaxPoolWithIndex_Op(OpTest):
         self.paddings = [1, 1, 1]
 
 
-""""
 class TestCase1(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
-        self.global_pool = 1
+        self.global_pool = True
         self.op_type = "maxPool3dWithIndex"
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 5, 5, 5]
@@ -100,7 +121,7 @@ class TestCase1(TestMaxPoolWithIndex_Op):
 
 class TestCase2(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
-        self.global_pool = 0
+        self.global_pool = False
         self.op_type = "maxPool2dWithIndex"
         self.pool_forward_naive = max_pool2D_forward_naive
         self.shape = [2, 3, 7, 7]
@@ -111,7 +132,7 @@ class TestCase2(TestMaxPoolWithIndex_Op):
 
 class TestCase3(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
-        self.global_pool = 1
+        self.global_pool = True
         self.op_type = "maxPool2dWithIndex"
         self.pool_forward_naive = max_pool2D_forward_naive
         self.shape = [2, 3, 5, 5]
@@ -122,4 +143,3 @@ class TestCase3(TestMaxPoolWithIndex_Op):
 
 if __name__ == '__main__':
     unittest.main()
-"""

From 6fc44800ed800e78822c6af5750e202c041d9173 Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Sat, 30 Sep 2017 15:20:44 +0800
Subject: [PATCH 11/82] fix unit test

---
 .../v2/framework/tests/test_pool_max_op.py    | 72 ++++++++++++++++++-
 1 file changed, 69 insertions(+), 3 deletions(-)

diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py
index ffc345198d..17028c3bf6 100644
--- a/python/paddle/v2/framework/tests/test_pool_max_op.py
+++ b/python/paddle/v2/framework/tests/test_pool_max_op.py
@@ -98,6 +98,28 @@ class TestMaxPoolWithIndex_Op(OpTest):
     # def test_check_grad(self):
     #     self.check_grad(set(['X']), ['Out'], max_relative_error=0.07)
 
+    def initTestCase(self):
+        self.global_pool = True
+        self.op_type = "maxPool3dWithIndex"
+        self.pool_forward_naive = max_pool3D_forward_naive
+        self.shape = [2, 3, 5, 5, 5]
+        self.ksize = [3, 3, 3]
+        self.strides = [1, 1, 1]
+        self.paddings = [1, 1, 1]
+
+
+class TestCase1(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = True
+        self.op_type = "maxPool3dWithIndex"
+        self.pool_forward_naive = max_pool3D_forward_naive
+        self.shape = [2, 3, 5, 5, 5]
+        self.ksize = [3, 3, 3]
+        self.strides = [1, 1, 1]
+        self.paddings = [1, 1, 1]
+
+
+class TestCase2(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = False
         self.op_type = "maxPool3dWithIndex"
@@ -108,7 +130,18 @@ class TestMaxPoolWithIndex_Op(OpTest):
         self.paddings = [1, 1, 1]
 
 
-class TestCase1(TestMaxPoolWithIndex_Op):
+class TestCase3(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = False
+        self.op_type = "maxPool3dWithIndex"
+        self.pool_forward_naive = max_pool3D_forward_naive
+        self.shape = [2, 3, 7, 7, 7]
+        self.ksize = [3, 3, 3]
+        self.strides = [2, 2, 2]
+        self.paddings = [0, 0, 0]
+
+
+class TestCase4(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = True
         self.op_type = "maxPool3dWithIndex"
@@ -116,10 +149,21 @@ class TestCase1(TestMaxPoolWithIndex_Op):
         self.shape = [2, 3, 5, 5, 5]
         self.ksize = [3, 3, 3]
         self.strides = [1, 1, 1]
+        self.paddings = [1, 1, 1]
+
+
+class TestCase5(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = True
+        self.op_type = "maxPool3dWithIndex"
+        self.pool_forward_naive = max_pool3D_forward_naive
+        self.shape = [2, 3, 5, 5, 5]
+        self.ksize = [3, 3, 3]
+        self.strides = [2, 2, 2]
         self.paddings = [0, 0, 0]
 
 
-class TestCase2(TestMaxPoolWithIndex_Op):
+class TestCase6(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = False
         self.op_type = "maxPool2dWithIndex"
@@ -130,7 +174,18 @@ class TestCase2(TestMaxPoolWithIndex_Op):
         self.paddings = [1, 1]
 
 
-class TestCase3(TestMaxPoolWithIndex_Op):
+class TestCase7(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = False
+        self.op_type = "maxPool2dWithIndex"
+        self.pool_forward_naive = max_pool2D_forward_naive
+        self.shape = [2, 3, 7, 7]
+        self.ksize = [3, 3]
+        self.strides = [2, 2]
+        self.paddings = [0, 0]
+
+
+class TestCase8(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = True
         self.op_type = "maxPool2dWithIndex"
@@ -138,6 +193,17 @@ class TestCase3(TestMaxPoolWithIndex_Op):
         self.shape = [2, 3, 5, 5]
         self.ksize = [3, 3]
         self.strides = [1, 1]
+        self.paddings = [1, 1]
+
+
+class TestCase9(TestMaxPoolWithIndex_Op):
+    def initTestCase(self):
+        self.global_pool = True
+        self.op_type = "maxPool2dWithIndex"
+        self.pool_forward_naive = max_pool2D_forward_naive
+        self.shape = [2, 3, 5, 5]
+        self.ksize = [3, 3]
+        self.strides = [2, 2]
         self.paddings = [0, 0]
 
 

From 6627801a586bf93f1d872c643c121e19d5c2f1bf Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Sat, 30 Sep 2017 15:32:29 +0800
Subject: [PATCH 12/82] Follow comments.

---
 CMakeLists.txt             | 12 ++++++++----
 paddle/capi/CMakeLists.txt |  4 +---
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 70b35154aa..7d549b864b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -87,11 +87,15 @@ if(ANDROID OR IOS)
     set(WITH_MKLML OFF CACHE STRING
         "Disable MKLML package when cross-compiling for Android and iOS" FORCE)
 
-    if(WITH_C_API)
-      # Compile PaddlePaddle mobile inference library
-      set(MOBILE_INFERENCE ON)
-      add_definitions(-DPADDLE_MOBILE_INFERENCE)
+    # Compile PaddlePaddle mobile inference library
+    if (NOT WITH_C_API)
+        set(WITH_C_API ON CACHE STRING
+            "Always compile the C_API when cross-compiling for Android and iOS" FORCE)
     endif()
+    set(MOBILE_INFERENCE ON)
+    add_definitions(-DPADDLE_MOBILE_INFERENCE)
+
+    # TODO: Need Open the WITH_TESTING
     set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling
                     for Android and iOS" FORCE)
 endif()
diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt
index a19a19d719..2c458a78c5 100644
--- a/paddle/capi/CMakeLists.txt
+++ b/paddle/capi/CMakeLists.txt
@@ -48,9 +48,7 @@ if(NOT IOS)
   add_library(paddle_capi_shared SHARED ${CAPI_SOURCES})
   set_target_properties(paddle_capi_shared	PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
   target_include_directories(paddle_capi_shared PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
-
-link_paddle_exe(paddle_capi_shared)
-
+  link_paddle_exe(paddle_capi_shared)
 endif()
 
 # install library & headers.

From bb33c2b3a5f2f2aa50e9ceaf1f2120edfe76455b Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Sat, 30 Sep 2017 15:44:02 +0800
Subject: [PATCH 13/82] fix kernel func

---
 paddle/operators/math/pooling.cc      | 227 ++++++++++++++++++++++++++
 paddle/operators/math/pooling.cu      |  10 +-
 paddle/operators/math/pooling.h       |  37 +++++
 paddle/operators/pool_with_index_op.h |   4 +-
 4 files changed, 270 insertions(+), 8 deletions(-)

diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc
index 3b706529d8..5accde8b07 100644
--- a/paddle/operators/math/pooling.cc
+++ b/paddle/operators/math/pooling.cc
@@ -458,6 +458,233 @@ template class Pool3dGradFunctor<
     platform::CPUPlace, paddle::operators::math::MaxPoolGrad<double>, double>;
 template class Pool3dGradFunctor<
     platform::CPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
+
+template <typename T>
+class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input.dims()[0];
+    const int input_height = input.dims()[2];
+    const int input_width = input.dims()[3];
+    const int output_channels = output.dims()[1];
+    const int output_height = output.dims()[2];
+    const int output_width = output.dims()[3];
+    const int ksize_height = ksize[0];
+    const int ksize_width = ksize[1];
+    const int stride_height = strides[0];
+    const int stride_width = strides[1];
+    const int padding_height = paddings[0];
+    const int padding_width = paddings[1];
+    const int input_stride = input_height * input_width;
+    const int output_stride = output_height * output_width;
+
+    const T* input_data = input.data<T>();
+    T* output_data = output.mutable_data<T>(context.GetPlace());
+    T* mask_data = mask.mutable_data<T>(context.GetPlace());
+
+    for (int i = 0; i < batch_size; i++) {
+      for (int c = 0; c < output_channels; ++c) {
+        for (int ph = 0; ph < output_height; ++ph) {
+          int hstart = ph * stride_height - padding_height;
+          int hend = std::min(hstart + ksize_height, input_height);
+          hstart = std::max(hstart, 0);
+          for (int pw = 0; pw < output_width; ++pw) {
+            int wstart = pw * stride_width - padding_width;
+            int wend = std::min(wstart + ksize_width, input_width);
+            wstart = std::max(wstart, 0);
+
+            T ele = static_cast<T>(-FLT_MAX);
+            int index = -1;
+            for (int h = hstart; h < hend; ++h) {
+              for (int w = wstart; w < wend; ++w) {
+                if (ele < input_data[h * input_width + w]) {
+                  ele = input_data[h * input_width + w];
+                  index = h * input_width + w;
+                }
+              }
+            }
+            output_data[ph * output_width + pw] = ele;
+            mask_data[ph * output_width + pw] = index;
+          }
+        }
+        // offset
+        input_data += input_stride;
+        output_data += output_stride;
+        mask_data += output_stride;
+      }
+    }
+  }
+};
+
+template <typename T>
+class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input_grad.dims()[0];
+    const int input_height = input_grad.dims()[2];
+    const int input_width = input_grad.dims()[3];
+    const int output_channels = output_grad.dims()[1];
+    const int output_height = output_grad.dims()[2];
+    const int output_width = output_grad.dims()[3];
+    const int input_stride = input_height * input_width;
+    const int output_stride = output_height * output_width;
+
+    const T* mask_data = mask.data<T>();
+    const T* output_grad_data = output_grad.data<T>();
+    T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
+
+    for (int n = 0; n < batch_size; ++n) {
+      for (int c = 0; c < output_channels; ++c) {
+        for (int ph = 0; ph < output_height; ++ph) {
+          for (int pw = 0; pw < output_width; ++pw) {
+            const int output_idx = ph * output_width + pw;
+            const int input_idx = static_cast<int>(mask_data[output_idx]);
+            input_grad_data[input_idx] += output_grad_data[output_idx];
+          }
+        }
+        // offset
+        input_grad_data += input_stride;
+        output_grad_data += output_stride;
+        mask_data += output_stride;
+      }
+    }
+  }
+};
+
+template class MaxPool2dWithIndexFunctor<platform::CPUPlace, float>;
+template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, float>;
+template class MaxPool2dWithIndexFunctor<platform::CPUPlace, double>;
+template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, double>;
+
+template <typename T>
+class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input.dims()[0];
+    const int input_depth = input.dims()[2];
+    const int input_height = input.dims()[3];
+    const int input_width = input.dims()[4];
+    const int output_channels = output.dims()[1];
+    const int output_depth = output.dims()[2];
+    const int output_height = output.dims()[3];
+    const int output_width = output.dims()[4];
+    const int ksize_depth = ksize[0];
+    const int ksize_height = ksize[1];
+    const int ksize_width = ksize[2];
+    const int stride_depth = strides[0];
+    const int stride_height = strides[1];
+    const int stride_width = strides[2];
+    const int padding_depth = paddings[0];
+    const int padding_height = paddings[1];
+    const int padding_width = paddings[2];
+    const int input_stride = input_depth * input_height * input_width;
+    const int output_stride = output_depth * output_height * output_width;
+
+    const T* input_data = input.data<T>();
+    T* output_data = output.mutable_data<T>(context.GetPlace());
+    T* mask_data = mask.mutable_data<T>(context.GetPlace());
+
+    for (int i = 0; i < batch_size; i++) {
+      for (int c = 0; c < output_channels; ++c) {
+        for (int pd = 0; pd < output_depth; ++pd) {
+          int dstart = pd * stride_depth - padding_depth;
+          int dend = std::min(dstart + ksize_depth, input_depth);
+          dstart = std::max(dstart, 0);
+          for (int ph = 0; ph < output_height; ++ph) {
+            int hstart = ph * stride_height - padding_height;
+            int hend = std::min(hstart + ksize_height, input_height);
+            hstart = std::max(hstart, 0);
+            for (int pw = 0; pw < output_width; ++pw) {
+              int wstart = pw * stride_width - padding_width;
+              int wend = std::min(wstart + ksize_width, input_width);
+              wstart = std::max(wstart, 0);
+
+              int output_idx = (pd * output_height + ph) * output_width + pw;
+              T ele = static_cast<T>(-FLT_MAX);
+              int index = -1;
+              for (int d = dstart; d < dend; ++d) {
+                for (int h = hstart; h < hend; ++h) {
+                  for (int w = wstart; w < wend; ++w) {
+                    int input_idx = (d * input_height + h) * input_width + w;
+                    if (ele < input_data[input_idx]) {
+                      index = input_idx;
+                      ele = input_data[input_idx];
+                    }
+                  }
+                }
+              }
+              output_data[output_idx] = ele;
+              mask_data[output_idx] = index;
+            }
+          }
+        }
+        // offset
+        input_data += input_stride;
+        output_data += output_stride;
+        mask_data += output_stride;
+      }
+    }
+  }
+};
+
+template <typename T>
+class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, T> {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings) {
+    const int batch_size = input_grad.dims()[0];
+    const int input_depth = input_grad.dims()[2];
+    const int input_height = input_grad.dims()[3];
+    const int input_width = input_grad.dims()[4];
+    const int output_channels = output_grad.dims()[1];
+    const int output_depth = output_grad.dims()[2];
+    const int output_height = output_grad.dims()[3];
+    const int output_width = output_grad.dims()[4];
+    const int input_stride = input_depth * input_height * input_width;
+    const int output_stride = output_depth * output_height * output_width;
+
+    const T* mask_data = mask.data<T>();
+    const T* output_grad_data = output_grad.data<T>();
+    T* input_grad_data = input_grad.mutable_data<T>(context.GetPlace());
+
+    for (int n = 0; n < batch_size; ++n) {
+      for (int c = 0; c < output_channels; ++c) {
+        for (int pd = 0; pd < output_depth; ++pd) {
+          for (int ph = 0; ph < output_height; ++ph) {
+            for (int pw = 0; pw < output_width; ++pw) {
+              const int output_idx =
+                  (pd * output_height + ph) * output_width + pw;
+              const int input_idx = static_cast<int>(mask_data[output_idx]);
+              input_grad_data[input_idx] += output_grad_data[output_idx];
+            }
+          }
+        }
+        // offset
+        input_grad_data += input_stride;
+        output_grad_data += output_stride;
+        mask_data += output_stride;
+      }
+    }
+  }
+};
+
+template class MaxPool3dWithIndexFunctor<platform::CPUPlace, float>;
+template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, float>;
+template class MaxPool3dWithIndexFunctor<platform::CPUPlace, double>;
+template class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, double>;
 }  // namespace math
 }  // namespace operators
 }  // namespace paddle
diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu
index 6aafedf912..06263737a9 100644
--- a/paddle/operators/math/pooling.cu
+++ b/paddle/operators/math/pooling.cu
@@ -637,7 +637,7 @@ __global__ void KernelMaxPool2dWithIdx(
     const int output_height, const int output_width, const int ksize_height,
     const int ksize_width, const int stride_height, const int stride_width,
     const int padding_height, const int padding_width) {
-  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
        index += blockDim.x * gridDim.x) {
     int pw = index % output_width;
     int ph = (index / output_width) % output_height;
@@ -676,7 +676,7 @@ __global__ void KernelMaxPool2DWithIdxGrad(
     const int output_height, const int output_width, const int ksize_height,
     const int ksize_width, const int stride_height, const int stride_width,
     const int padding_height, const int padding_width) {
-  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
        index += blockDim.x * gridDim.x) {
     int w_offset = index % input_width;
     int h_offset = (index / input_width) % input_height;
@@ -766,7 +766,6 @@ class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
     const int input_channels = input_grad.dims()[1];
     const int input_height = input_grad.dims()[2];
     const int input_width = input_grad.dims()[3];
-    const int output_channels = output_grad.dims()[1];
     const int output_height = output_grad.dims()[2];
     const int output_width = output_grad.dims()[3];
     const int ksize_height = ksize[0];
@@ -810,7 +809,7 @@ __global__ void KernelMaxPool3DWithIdx(
     const int ksize_width, const int stride_depth, const int stride_height,
     const int stride_width, const int padding_depth, const int padding_height,
     const int padding_width) {
-  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
        index += blockDim.x * gridDim.x) {
     int pw = index % output_width;
     int ph = (index / output_width) % output_height;
@@ -858,7 +857,7 @@ __global__ void KernelMaxPool3DWithIdxGrad(
     const int ksize_width, const int stride_depth, const int stride_height,
     const int stride_width, const int padding_depth, const int padding_height,
     const int padding_width) {
-  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
+  for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
        index += blockDim.x * gridDim.x) {
     int w_offset = index % input_width;
     int h_offset = (index / input_width) % input_height;
@@ -969,7 +968,6 @@ class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
     const int input_depth = input_grad.dims()[2];
     const int input_height = input_grad.dims()[3];
     const int input_width = input_grad.dims()[4];
-    const int output_channels = output_grad.dims()[1];
     const int output_depth = output_grad.dims()[2];
     const int output_height = output_grad.dims()[3];
     const int output_width = output_grad.dims()[4];
diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h
index d214c68923..d819e5986e 100644
--- a/paddle/operators/math/pooling.h
+++ b/paddle/operators/math/pooling.h
@@ -117,6 +117,43 @@ class MaxPool3dGradFunctor {
                   std::vector<int>& strides, std::vector<int>& paddings);
 };
 
+template <typename Place, typename T>
+class MaxPool2dWithIndexFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
+
+template <typename Place, typename T>
+class MaxPool2dWithIndexGradFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
+
+template <typename Place, typename T>
+class MaxPool3dWithIndexFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  const framework::Tensor& input, framework::Tensor& output,
+                  framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
+
+template <typename Place, typename T>
+class MaxPool3dWithIndexGradFunctor {
+ public:
+  void operator()(const platform::DeviceContext& context,
+                  framework::Tensor& input_grad,
+                  const framework::Tensor& output_grad,
+                  const framework::Tensor& mask, std::vector<int>& ksize,
+                  std::vector<int>& strides, std::vector<int>& paddings);
+};
 }  // namespace math
 }  // namespace operators
 }  // namespace paddle
diff --git a/paddle/operators/pool_with_index_op.h b/paddle/operators/pool_with_index_op.h
index 5fe2f5df93..01b961ca82 100644
--- a/paddle/operators/pool_with_index_op.h
+++ b/paddle/operators/pool_with_index_op.h
@@ -25,7 +25,7 @@ namespace operators {
 using Tensor = framework::Tensor;
 
 template <typename Place, typename T>
-class MaxPoolWithIndexKernel : public framework::OpKernel {
+class MaxPoolWithIndexKernel : public framework::OpKernel<T> {
  public:
   void Compute(const framework::ExecutionContext& context) const override {
     const Tensor* in_x = context.Input<Tensor>("X");
@@ -59,7 +59,7 @@ class MaxPoolWithIndexKernel : public framework::OpKernel {
 };
 
 template <typename Place, typename T>
-class MaxPoolWithIndexGradKernel : public framework::OpKernel {
+class MaxPoolWithIndexGradKernel : public framework::OpKernel<T> {
  public:
   void Compute(const framework::ExecutionContext& context) const override {
     const Tensor* mask = context.Input<Tensor>("Mask");

From 71dff503ce6934fd78508879545debdbf8776c51 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 3 Oct 2017 15:28:44 -0700
Subject: [PATCH 14/82] API of GAN

---
 doc/design/gan_api.md | 134 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 134 insertions(+)
 create mode 100644 doc/design/gan_api.md

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
new file mode 100644
index 0000000000..65ca49410a
--- /dev/null
+++ b/doc/design/gan_api.md
@@ -0,0 +1,134 @@
+'''
+GAN implementation, just a demo.
+'''
+# pd for short, should be more concise.
+from paddle.v2 as pd
+import numpy as np
+import logging
+
+X = pd.data(pd.float_vector(784))
+
+# Conditional-GAN should be a class. 
+### Class member function: the initializer.
+class DCGAN(object):
+  def __init__(self, y_dim=None):
+  
+    # hyper parameters  
+    self.y_dim = y_dim # conditional gan or not
+    self.batch_size = 100
+    self.z_dim = z_dim # input noise dimension
+
+    # define parameters of discriminators
+    self.D_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
+    self.D_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
+    self.D_W2 = pd.Varialble(np.random.rand(128, 1))
+    self.D_b2 = pd.Variable(np.zeros(128))
+    self.theta_D = [D_W1, D_b1, D_W2, D_b2]
+
+    # define parameters of generators
+    self.G_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
+    self.G_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
+    self.G_W2 = pd.Varialble(np.random.rand(128, 1))
+    self.G_b2 = pd.Variable(np.zeros(128))
+    self.theta_G = [D_W1, D_b1, D_W2, D_b2]
+    
+    self.build_model()
+
+### Class member function: Generator Net
+def generator(self, z, y = None):
+
+    # Generator Net
+    if not self.y_dim:
+      z = pd.concat(1, [z, y])
+      
+    G_h0 = pd.fc(z, self.G_w0, self.G_b0)
+    G_h0_bn = pd.batch_norm(G_h0)
+    G_h0_relu = pd.relu(G_h0_bn)
+    
+    G_h1 = pd.fc(G_h0_relu, self.G_w1, self.G_b1)
+    G_h1_bn = pd.batch_norm(G_h1)
+    G_h1_relu = pd.relu(G_h1_bn)
+    
+    G_h2 = pd.deconv(G_h1_relu, self.G_W2, self.G_b2))
+    G_im = pd.tanh(G_im)
+    return G_im
+    
+### Class member function: Discriminator Net
+def discriminator(self, image):
+
+    # Discriminator Net
+    D_h0 = pd.conv2d(image, self.D_w0, self.D_b0)
+    D_h0_bn = pd.batchnorm(h0)
+    D_h0_relu = pd.lrelu(h0_bn)
+    
+    D_h1 = pd.conv2d(D_h0_relu, self.D_w1, self.D_b1)
+    D_h1_bn = pd.batchnorm(D_h1)
+    D_h1_relu = pd.lrelu(D_h1_bn)
+    
+    D_h2 = pd.fc(D_h1_relu, self.D_w2, self.D_b2)
+    return D_h2
+
+### Class member function: Build the model
+def build_model(self):
+
+    # input data
+    if self.y_dim:
+        self.y = pd.data(pd.float32, [self.batch_size, self.y_dim])
+    self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size])
+    self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size])
+    self.z = pd.data(tf.float32, [None, self.z_size])
+    
+    # if conditional GAN
+    if self.y_dim:
+      self.G = self.generator(self.z, self.y)
+      self.D_t = self.discriminator(self.images)
+      # generated fake images
+      self.sampled = self.sampler(self.z, self.y)
+      self.D_f = self.discriminator(self.images)
+    else: # original version of GAN
+      self.G = self.generator(self.z)
+      self.D_t = self.discriminator(self.images)
+      # generate fake images
+      self.sampled = self.sampler(self.z)
+      self.D_f = self.discriminator(self.images)
+    
+    self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size))
+    self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size))
+    self.d_loss = self.d_loss_real + self.d_loss_fake
+    
+    self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie))
+
+# Main function for the demo:
+if __name__ == "__main__":
+
+    # dcgan
+    dcgan = DCGAN()
+    dcgan.build_model()
+
+    # load mnist data
+    data_X, data_y = self.load_mnist()
+    
+    # Two subgraphs required!!!
+    d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss)
+    g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.g_loss)
+
+    # executor
+    sess = pd.executor()
+    
+    # training
+    for epoch in xrange(10000):
+      for batch_id in range(N / batch_size):
+        idx = ...
+        # sample a batch
+        batch_im, batch_label = data_X[idx:idx+batch_size], data_y[idx:idx+batch_size]
+        # sample z
+        batch_z = np.random.uniform(-1., 1., [batch_size, z_dim])
+
+        if batch_id % 2 == 0:
+          sess.run(d_optim, 
+                   feed_dict = {dcgan.images: batch_im,
+                                dcgan.y: batch_label,
+                                dcgan.z: batch_z})
+        else:
+          sess.run(g_optim,
+                   feed_dict = {dcgan.z: batch_z})

From e21dcc5bdaacbd9dbab5be134b71ba8c57eda717 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 3 Oct 2017 15:59:25 -0700
Subject: [PATCH 15/82] gan api

---
 doc/design/gan_api.md | 31 ++++++++++++++++++++++---------
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 65ca49410a..b5f37051c6 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -1,15 +1,17 @@
 '''
 GAN implementation, just a demo.
 '''
+```python
 # pd for short, should be more concise.
 from paddle.v2 as pd
 import numpy as np
 import logging
 
 X = pd.data(pd.float_vector(784))
-
+```
 # Conditional-GAN should be a class. 
 ### Class member function: the initializer.
+```python
 class DCGAN(object):
   def __init__(self, y_dim=None):
   
@@ -19,22 +21,26 @@ class DCGAN(object):
     self.z_dim = z_dim # input noise dimension
 
     # define parameters of discriminators
+    self.D_W0 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
+    self.D_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
     self.D_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
     self.D_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
     self.D_W2 = pd.Varialble(np.random.rand(128, 1))
     self.D_b2 = pd.Variable(np.zeros(128))
-    self.theta_D = [D_W1, D_b1, D_W2, D_b2]
+    self.theta_D = [self.D_W0, self.D_b0, self.D_W1, self.D_b1, self.D_W2, self.D_b2]
 
     # define parameters of generators
+    self.G_W0 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
+    self.G_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
     self.G_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
     self.G_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
     self.G_W2 = pd.Varialble(np.random.rand(128, 1))
     self.G_b2 = pd.Variable(np.zeros(128))
-    self.theta_G = [D_W1, D_b1, D_W2, D_b2]
-    
-    self.build_model()
+    self.theta_G = [self.G_W0, self.G_b0, self.G_W1, self.G_b1, self.G_W2, self.G_b2]
+```
 
 ### Class member function: Generator Net
+```python
 def generator(self, z, y = None):
 
     # Generator Net
@@ -52,8 +58,10 @@ def generator(self, z, y = None):
     G_h2 = pd.deconv(G_h1_relu, self.G_W2, self.G_b2))
     G_im = pd.tanh(G_im)
     return G_im
-    
+```
+
 ### Class member function: Discriminator Net
+```python
 def discriminator(self, image):
 
     # Discriminator Net
@@ -67,8 +75,10 @@ def discriminator(self, image):
     
     D_h2 = pd.fc(D_h1_relu, self.D_w2, self.D_b2)
     return D_h2
+```
 
 ### Class member function: Build the model
+```python
 def build_model(self):
 
     # input data
@@ -97,8 +107,10 @@ def build_model(self):
     self.d_loss = self.d_loss_real + self.d_loss_fake
     
     self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie))
+```
 
 # Main function for the demo:
+```python
 if __name__ == "__main__":
 
     # dcgan
@@ -109,7 +121,7 @@ if __name__ == "__main__":
     data_X, data_y = self.load_mnist()
     
     # Two subgraphs required!!!
-    d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss)
+    d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss, )
     g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.g_loss)
 
     # executor
@@ -125,10 +137,11 @@ if __name__ == "__main__":
         batch_z = np.random.uniform(-1., 1., [batch_size, z_dim])
 
         if batch_id % 2 == 0:
-          sess.run(d_optim, 
+          sess.eval(d_optim, 
                    feed_dict = {dcgan.images: batch_im,
                                 dcgan.y: batch_label,
                                 dcgan.z: batch_z})
         else:
-          sess.run(g_optim,
+          sess.eval(g_optim,
                    feed_dict = {dcgan.z: batch_z})
+```

From 2a7f59e73ef0350c92ec3174ed1aa97efe266f52 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 3 Oct 2017 16:41:35 -0700
Subject: [PATCH 16/82] more gan

---
 doc/design/dcgan.png  | Bin 0 -> 57995 bytes
 doc/design/gan_api.md |  12 ++++++++----
 2 files changed, 8 insertions(+), 4 deletions(-)
 create mode 100644 doc/design/dcgan.png

diff --git a/doc/design/dcgan.png b/doc/design/dcgan.png
new file mode 100644
index 0000000000000000000000000000000000000000..15e8e290a111ff43900934341365cb4360d87d28
GIT binary patch
literal 57995
zcmdR0^+QzO)3!lEkOpax?rx-eDe01qrMppLK{}*My1QFky1SN;?(Tjs`uYA5@BM`p
z?%sROoH@_TJo6mCD}W@B5%3V6J$r^MB`K=(?Ac4^XV0D+z`p=~BW2f~2mE;HAT0G6
z9v&XHEdK}iC$6KIx}&m<iKC0Yz40?sYa1(LdIv*$V`FOvGaJXlmu&*io)JHj5*7OF
znzFZG?D^Sr4(`w9Ry`%U3z;xq<R7%WPt;0Onr0b4oM9!FR!ufd=gx>Z_2~2Fc8sV$
zB_j>K5i_sw^wy_dLW#evM6pU8>~_O(u{sR#>;JjiR1DR}c?FE(*)#QkDxJR{o_&>l
z^Yq~h9C6I^rw`>QEVxfUpWXhy{l#A7`ADO=4$qs$daj0B2`_<7m$1bB;3QllmZyo$
zq$ag-ggs9Gp3W^DZgsc1Eh{T~I6iIh!7SJLd`ddY@O0C&KSG{m)?jbg2s2xd%<|pL
z5U6>!kOvti#?IJd^mIcxFMff8cD94YVy9L2u+jb!&Ec8wFg#Ihh=uNm%iE`0cvYNN
z%LaEj33(iJwh!8~=3LF%aUl9le=Ytsuu5}4<+Uj#GVX`F<=-|C7yYIblZh@e%BQbt
zX3@e;H{W&^<E6f)l$03$^$Bk8W)R`&;Y?{rPj@(X3z5t;tt{~cSSkpA*}mq7m1g*O
zqjfkd4kF$#Jgw!Iz=6#Rs{{^SvNlZ-8Ev=-H;2!U)-M|AFAs$%UOqkJi=Ng{^3#WB
zC9nT84rA5qJm}`^-{SxBwu05lWtp$iUco;7{mR!=mgK?dQXSSrSxf9OwEu1c6-@3V
zCE;a__BfN!KmX@Idp{;EYSigei(>bMHDCPqbm~}X6&;<D>D^g|?2o^0{uz>yot>T6
zd5<Z1P(?)rc=qY(se_{<IeezgQY!-kgQeQE_J8kb-GkMYRadi-kUS?LkLmqP6FX_a
z?{a`Igv!`@MGBi_$DTPvdz#KgPJTWW8{5S2@bHC1gLXJ73tku;3ZYR|vB{g&-lsdh
z=vjcl#w@ch;oSJrW-U!}(&C;c{x$4{SlRUImEZf<P^J>qerbOCm;X&D<TK5>Ef)$t
zxs(!3Y^9QX|5xURr$?1DIypHBXB>UYWvQAi9yKFJd#k0w&iS94kYR=7)Ye`cc3*o6
zM0A6%-8GR0nx1yyR|0A(sswg0*5K+WG8&GdaSFzXns}%gbRj<S-w-9v+*_oxFzxG^
zd(AzM_tT_{ChfP)Ou8zK4ZB0liz<-1uAY|hSF7u*K&ttLD(C%jO?tkJNJ<QZj_9op
zjI2-9Y-A!iN)<X^s{Y&Ox4x^cW|z7z8diF`&gU9=U_#NEnP@R#!pAEgWp)nmEoJ^Q
z^}!!5!nv&YPZWa9`Z`c`tkeWcEGaAo?R?du*^=_!e-8AOzmcGS?F70zZLxa@re~Eg
za@ZWQGL-2N{?A!{+wm1kFlP8cGitz&E_hv%!NF3}`bNQttfo8X?#{H9!v9UR%@!YJ
ze^I&k^FxJu07%D%S2EJbQ3+w=51Z9oH3xeCf1@g0<{})cX_}Z_k6@>*IY&0oFhJX0
zn=>)^-`?g;A?9)1rg3uSu6K?-Xjk80@w#zL1$A2ogu(q=-jZiRkpwF?-IYxu4{xJG
zL9$)=KdMXy$IRIFRA(gr4QGT0l^ho%ofvpbwX+j2`<NdFv3;9^$<u|{J^w8ERn?EY
z;OEd=G}Cwy@H88~7FJA(1(pmpgXV;Ld`!S!RQ)>T|1<wxDmwWPC~&H@)y7goDL9Hz
zyV22{cER)QKd;in_08nzaEayOCeUtbTy8WT?JeL*`!aj7Ucwyr&&r>6MIwy?{YwpU
zPW~jtx(|Bw_gZi_uhXhlH>$t+Z&GMN(d{}-Aq25L#Trg!HKDnN(MoI>d6S(i|J=~5
z1l|{ZdrBl<0bjT7=78prT&|lu&w|Q&H`w^UIR`JldOwxLsYk)v?EQDeQ!+ES`_+og
z=vkRb{sxDiPEcSyO^MP(qZ{-W77`OM<RmC_ET}+$N1nU9n`8a&UFWv=hciVeYIH;M
zuE1C_w-eSr(+}!YIu_hBQX>8vvDNeNRRWLm_2ACL4^s=AYl0u;x|iv-t9ynx^?d(X
zk7LD}4j4<=1Ly@AH4Vi_xncBybit*xI-0<LH`GG0`d!hs+iQyOx+?1@9J4;Lfv4zN
zbcd}2B3}IWnqVu-#7_)@Pu>8cBa>v|U|*n#8cbk@3K;(9Gu7-|;epPFA3syQD`}}q
zp%aP*mLT-AetJdd)k+BCOz9Jrnjs=NbtbEpsg7C`h6}^q7wI|~4Ns-szSE)3cXT{O
zwUhFXl+wH1eAwr`By9AonrL4T8OTvDnLGHbs%o;i!F_$jal*$H&Rg#>p`xnI>(xh3
z{56vso+O1h#nV$dI9T48FgQG%LaLHl+8S>J^b{i4@C+NA?O{RB{T8pc?&Ty-t%=ws
z?>zPp4<G93Zs*SB9NO@Y`ffRkkg;^GU2Gz*Ji9hDOK!>*`p3p4bPAcuHQbWL0naI2
z2tepuJ_jEz74mhGMh&L1c9%l#ME-73yuoaN^2<M8QG6uBoa#M_4{nYJ?AM!fWPGmO
zb}C;BYL@Nsx>J|!%q5m|BP`u+#78VX9>Iuwj+oQ#_J65hA=;cuM(;$9$o*8nU$oVu
z3z3szJqbA79Ii`$&GK*4hKYX{e>^Q3{T`)#vKXeaHSKW<5mQXFM-t%*U&V-*7{zZ@
z_>^H0i$`L_G6hEV*CsJ3J8=G%o&U$*S~&6U=!PA+JFYV~7u;~y8G;S#wXgi^uwTWY
zwgTya3qDAB4et=BzZsfXP2LqKf3$nq7~mG<W$?(}@K{|33CH2y=xi^uZ<He(U5H6d
zf}uxe0ti77NsHIT0(*gL5!}Q^1|{V9&*|96<WvqgA3d!kD!Qu=1#GdRZ<2nuz&!@<
zU!GjOUF{BYS4{TWx~j3S1zTcE1|uAd$q~m3mZw;1TWInN(d$+Cp8vNe%@}sZWDeUn
z4qnvt-_0&LImQ$F+k>oNF1;u15{DG$+wAsZf=yiDP0RT~=fwTjQi{L^J~gfs*;U=G
zaWTM^>(0$BVluc6#ss1ZUEDwPGQ$e3xPkdJ>!NcR9Jn01^B!5j*`=`HO-@?86z(w?
zo66sPjS)Y|0>h5Z7H&FUlga^rX9`?X>xc35ylI20K9wT%P-7e@lcmmwC%^s>T39)|
z<YiYN?BTfd=69BjaF9ctkg_{kAQ{|)Uc|Ti)DC=qRAJW$yF=vdPu?8Y=d9=NQc3Sr
z#MCNFcXR3WMi=CVPCk@V&d$z$xO^O~kT*Eh`E9_g$9EOj+FIIOzO3E)%X!3z&1ip~
zq%%R0Od?n`bED}d>t8;B*I!)1+w9>{aIi=}nP+%OtUuVgiam&IUG;G6#e2<zTQiv#
zk={dFvJSsKdaTnNOE>?puYqX9;vYF_aliXZH%olnu%}<UeL>xXug0M%H0OVi=$viL
zf?L^HdG8^|V|CdM#p_^!7NqiK_wbmUnPn)<%(Jt-`j-lf5CTzUlrw7On1!>njTb#8
zliSYx-r{{)RMhD2{@#DF>=SRms;KCaQE>m@eEq?JtZZ;DZ_c^$@>=vq8BP1skAI=>
z(`jLTez`iGrP_1CciqQ99Y(?3{n~9)Kb!c`W3snF=Jz}B7VW;T--sYuNh173C^KDF
z)1l&m0{te0BT24si8Hy(SHDHTY9FeB;C6$Em&9v2loI#-A4q@QFs{*@Wq8i_JRE(w
z<<?$wMPzqsUJDeLxiU6(&ey&-Tct((U(`s7(k)Xf_$;0kwJ-b&9re{{g{e)cx~J>?
z+?!;^(F$twH?8HbHU9?IPqZfNg{qHtWi#R5TD_n1>m`(5mXZ38<mfCla(i6C&BtBz
zYQtiN540l_P;pUlM!<jkv(#wS>6-}(N=j-vS!xlV%L7PbcA2IfwRQw~Qvt*u<*_qu
zA%||3%4-nvvu8gW?i-GM9_!B5hEF-W>rbcd4_kchcZ;mrhD*z+GX3-f%6GgFNc>TI
z(aHQLxIM(>uGjCmaj6Mn+XnAFw+l<&KjJcy&(JTGG9xJ~E014!6_n|<4KWQgIBZ^d
zdXvd_t{(r8HYSsQK^T#pKXS0ZNYpzpUtfCrCqw%i>cS#A>;6P30g#=fvYDgB!{}i8
z?MW5So{a>rr_9+kEH>yJnjwf_I}a0GjP}LeUaB$TJ}wp<Re=S~66&YC@B8lN^Q}$=
z{zN4xmQ5uveOLHuSgd0GSgpA)FL+NLC5h)oSTu=;j_&Yqciko5^10x%VOQw!@v-@u
z+n#nv3GOY9NzCy{l&qK6!h$*kl_go0(!~;8OBS6%_Rq1aoVTJG<;!PZRq2=*_)!5P
z7)}ui)-O@l=au2m<`<8vrruuiqL*S!xdiQ>-+&E0V}G|j+$a}@-d%RY4=+%~U3dH3
zZFe)1KJTA|)n#UqMoFG^eXwCW;lqK9vZY2wj$B1OG`SqMI`3;H4;x*4&EEP`x=<Bi
zVq&fB;}YuwAYx9Nt-<ZoN13=#0$x>OvlSnqzh#Cde^smXYA9nIKI`4)DQ6A_)ZyfG
zne0*5Zz++l`E0~Dp2ADThIu-EI*rRY`|>KnbSUwCc=}^^n>Up*G*}}-SzUc=brovG
z_Abyc#5hbawyDWOQSm)YFyX>Cb33whp@CF!qss{ya;Ka+u9q(~Q@P$hY2h@k`~E3C
zs<wtSM&qW-C)Sj$+;UfW1aOgwUZkGvPa(@tB1s>{vTgaad8@P<Zl@a~sda?N<EM;$
z<l!Q>7y}!IFr@X+g7+i~YUPS1gF6jsi|QkUGrl&wp#Po?yqBL#SP`o7AeD6RW3g@S
zcd>RurTM>vz^s$*W=BsjvAJo&YRGq+Us&4?z?vWFF{yecS{WE*W_Tj-B*h|<pRKHN
zCi{r}aY8Vmb}z(Z_*HdubXFzSIq9RxYg|;>Ueo(!Z;u@CbHC8vfH=9t$K;qbDz{<N
z3{I&Osj_=x_2@i>v7eZ{t#&te19P0ZA0RuyW!eq)=bEMBWw5rRkoayN*Q>Lfz0<th
z$4fZ<;pMlWwXg3D5id;XU~Xr9IaREibm!qa8>@5%O`c5~hlhuIdjrmxm+#?U+1c4y
zs*N5*Vqnf?VGm;CYqm|<n%>F;Z#YW(WXv1swwTB%*Tl6q%K#Ceg$_{w=d8a}n=sz+
zmX+Z&odc}<quIWlQLuQ{VJg`6ai+t^>qOr_5QC)ucxlsz+sn<krIV53I7VJD=;%j<
z7XLF8W0X^d#~}lb^hd*Vsk~igN3PAGY?+vLz^PO}2D%$}zypZeBlBMkNl-B?3qh?^
zAMn{<0yE<@qTqHdFC0v_67-u{to5kjaigREO^%OHHA^n;u1_y&RX$2ZXJll^wIcZG
zRKT9TtXX1uxmTGaw*ZIY@v~+iib5g`e>R~F?=KGj5-~D1mP_TC>}34aJ{Y#Ic`%y)
z8SO>#UDt=OL+UE6i<DkeVb!Aby|e|CFynU^%+3P|o4O#Zzdk321XV9l_5HdZlQ!NP
zL#bx{FlfnkLHH2-dlDJCXOL9bX(>=;6H!xnw_mqdS!bRh*Ez(BTUisgzu2oj5aosT
z_T=K^mLiK9JnhT~*Liik+Av&mjrLUZeXZS|s{!NU;sT4)D20#Di|l&*s}NJA^P*CT
z7RWoLsQt6X%i10ap9<u>RG@mI`8a6Cbf8tU5^)Z;Y74(>m9k+<X|O#8tTODPIivOV
z*|Rx{tkIdZ-OxG_G`Q|7Y&atAs>03k<#56@%XmLj;a5VUA1Tro+FW)f3JzN?7f;!H
zzl#61)6Q17>T<C=K{0c^8>n4M_b^oci$S#og@wWUXmc-+pyc7<c8o|p^YyKKg6aGA
z)@1OcjW%feD*{$?YzwKFRiFBygc}XRe@l=#iW*HFUELvPOe_D1UT&I?_cL*W#%>Lk
z)Ak=gsES_b)TYyVKKCjPrkvs<g-><OSB@Tw8Y~Qq&nhZHWYWq}P4)hlay+euTIL6G
zwcv&@;@+(eqxp98ixh6TdML^l#Ly>*+)&ZdYBC$moSU1&YGsg$1{___>s$p<zNK27
zE=wZf-Me?zy&<(o0YzHfX)W^mi@3=t8!Nm0&OoZ|k1pz{bduQ8giWV@bhBF)B3iDe
zO8a;CNov;CrJsm#c%3Z~_2M(B7~)|kgOqAzy8P~EW?#O1N!EslEN>8wm2_{nG#NxI
zX?xz!Fu-N-vN8|vEwtE<W&){U2mms)FNXitb!=t7e+vr=3c|s`sg@2c+iG3~<3I@b
zT+5Le!#4gLjAqDOvff^ZQc^Tr-Bjtk6NZ0JjvJfcX|j~|Q6@h81FtG+RJqQ3SHiy_
zoD?seWAEr_)D#>?7Sn5CVUgy+{Z+Mn6iP;wv76|1D{Z~_r6~>u)Yc$bj-vqF{<wnA
zG`0QDhyM~6Y>cteVu~0rm1=zaZmm|k(sgck*EgLm+*LkhEuAd<@{tvuZv7E@ul^Ot
zN%2`(h8|jz;{bI6awfG<4Nt38uk8*85>g3Dirt1gpzizx)+l=Ul)WTP_cONbZJJ{H
zwQCIkjt&!1#%)~CoH{>iUAU<F1fh@HuD>(xj~Af>>wF0n1WFo4h`u9F>ZFBnQ)HYt
z>Z|yEt5akXz*XU$hUZ#X8Q2$kVS)ShsEz3TLwgG`6_&I06^8m0Cg8u^%#$HB%fASf
z6ozlqhU8{7nFZq=pY-eM>h2TAxqVr1PnoGGIYqor<!hN`fFseOlK}}-nO;Wc$mkJp
zzBAA$diHnF@uOyR4AkuU>caMg6N<gg-1K>C8)0sUSRnWNnhVr^{CR$1kFy7d{zDzO
zca<?EeNw^UAP|3x=`@h*QOG6`V<60y!t*_U;<U`1l}t^ek|m~w2wEU*q~@Hv23Hbd
ztK{4+?Y_O}l<w~CwSMXwgo~8S4&x9)5eRpE!{7Ce!oZA4aW^?EeT|4Eo5H0?jQX_R
z_&BR5&*}s9aZbJHkf8H!Jr*3vC91j0G4xolssnyxJ`9L{Ym{Od0TiWJQ&&ypY3$u~
zhOS>C7OOUg=&7j8P>9d~*Z*uk*Uny4C=<NDb{~y_nfk*5LwaK<wde^};P~DF1ON0g
z>C_t|$JXB7`RTgYV>=OVAS5vwoi4pPu)<C}ft~xzrS+(XNleM_1I29n=;$o%N>Qi?
zR*}y9w>>O~{^Iic#VEzwIL4B)QiG<HxK$W)vq)KM)yC5}0Dp`lcCq`u&-vxC&zRjf
z=LrX-6f39;u)bS%h2p;P`?hcYi|h#p<_#xRr^X2$o85}4|6YIAqy;}>YfLW5;#GZ&
zK)P^jxE<&Nd+4U0obEcHsKmvamfUqu^cteXT4)qEtNgN?&olJ5ahy*J?R46wj&loi
zLfT$%xtMnYF3agLv}_Q_?jCMmuc5iHK4@Rl8Xy5ExG{SPa^=(Cl^8oC0-*ngFr-nN
zUP-bZ!|lU2eH`JnBl$ejySuc&v&CwmR=%}Ol@DF#Z%L{9njA7wRNbrQ1`O6qUPD%O
z^$G3`RxV8F%mpiupiE^W;RNpIsZC#mtgRVf>ukJ8P$LkiV*4JYL`&JOC=$TXDl3(6
zjBp@(qpgmcr38Z2R}Z8bnwW-*JeMw)kcdN0%ip<V@v^G^w$@)FQPP_a+YrBH*|cRd
zXd^L)p3m3DiV=Fnv|L-pE;Rn(I>5v_z(#N4G_<?j?&x3|;ALaWmpFVKf?XE~B$?4o
z^`aCxgbU3T%_F)zRsh!}B`t2>jmv7X<;VlT1IiQ4Qs3>{)G|X!&UHN*y56rFzR}9=
z{`(>2q?<3jLKAe)0Uj{j&0|bjX87thFTV?-1Dlo@a~%-iVU3h*`gY39Nzp0SH#qGr
zTwYBr^`D;3macR7W*bB+)1l3#a67Q!A;Z?c-adu>d9t_idQB9DD?fF;pMT2|ykZfH
zYuoQRmq1>qOPEOZ@p?QjtMzGFxzyM3I<9RU6B@7VWJ%9YxbJ8Eyl+B50T??&|GAC_
zoh4_*K>Gq&w#P6!Z(Z#AW`$0_)hns4+-%%{mDh;ag_9P*+%EsVvYqK8_og!eFBtYR
zr~;2&yklLU`8<UAgY&@kUeRC<B4;r^>ZZsiVh?Ao@LN$)_W%ie?Uu~iOy7NbP55wN
zb@d+><`n51&W`p*a$p5IynTBn!F~X!H2_nOo0y&kwsJl4U;lB~PUM6{Bs3cIN09}Z
zK3tB|@!6Y2JnQ<&<FTW@5ruACc+<S+keDb@imeuJdibz)>-18*rI@iXCGqd2mK)H?
zloo5Xz;YQ`4)CcHae}4E_4Ms_^R#ROWm*wWiyD)dAT3|^^Qu(C!#Qq!RtD)KH4O*5
zT>mn|U{8Kg5%{1#(W#Kthj6aO^iXd)SoYHDzVGuQU)y5@`dscJ%9Ifkj#DF~P1aek
z>Uo#Xvu6jtm?8)bIp~Nt>9p(>41ZjS(rH{97Y7b#+vzXwJuc8qvPfUv8MgU&*sHa1
z3drOlr26{GTPOLP3>Cq(ak>acV<(^2fp8$VVq#>F-hb7WQVu!{v*s0kcZ&#=sbfH^
z{wQo(@AAarHXT1F9$Q#;YH^`Liu$5bg<GtXBF}h#w!yPAcr;6#eJL#gOJ^3_26UA*
zvO-Rbh*(7F$?js%7GBFiqB9v5q3dydiQlcv`R<l&!Mj|%<>6{0>`f|U#r6Xs%I<7;
z`vOC)x+~S+9mSL~p7U6wbJI3=M4Yk51@6sl<F7j<GFR3tF0Vy~+P3B6K!hYXthO%3
zA}=udc}U+3CUV;Jt@R!p99Vgue$0vIg#H}&aOoz`;c@f$hu3Tb$tiI)O>Ty>y<x20
z!!efHyn-X&1Ws;OLK^W9g}H=7HEwU`(;43DDou10ebA)eXe>ksnMK2a31uqGyep)k
z89tcBO^q(KbpX%xvtw>O^#CsPxeka3R`-Z(jZVAfPFQOD%}H>=JzS7b_GUdc_PFb;
z7b;^0gOXxn>gs&7J*O6RE?C#nv$qp|RP*?1R@fV#IvTLtx>yFudju%!xgPvBt^@h$
zc@*#lpbAgzN^lGbD%@}18aBxDbdIwo%hnj@Ojm+%op>^|y=gZKd(BotIy|FhX1lvt
z^P$^1u)^RFB8N9tR#Mi^7lXWQ(li#yrHzzdoq;%4j))U}+ugx}#M9_zI4$G{lLF&H
z^ab>ngY-=X6URZC{VB~Y%t*#S!WExXdR83*(h7d!04kji)hq1Rw-mEPNfl$#bmM(m
zw}OpV1tV0zyG4B#-|O0vxf!Ts^`cIp!!kFI4i?o~=*3H2>Lol*UfY8oj2k$Sc~XB*
zhB3y~!v${b?AU|S5rV&?(_-L2sQFt^e^=JpeD~1*Ip4XSk=M7#e7>n+%I_76BMh(q
z@Ny}EDcfU}*7Gl&>Pn+pq*9`)RU*faip};_zurf-I41ouO6~(}IMN3R>Io%#*?5`6
z?_$ksPOt3l2ImaoYA{T`r=ye^b+nXjL?DYVQy-mRWv1uJip{vkE?FoCAGXQ|zPdZP
ze?(B(39D)?LBYFo*EBK&4KjsJ8VJ=sH)^vfEGj+=3Evm66#b-48J)|F6vM;K>*}W0
z%VVwuj|yeV$;*>Bo9Qh{2KPx5-RO@`PTrD(ZcMHV597*itgrj95RKrU0ET+ov2n6U
z4O~-RV0`-9ZeEaQYIaG`Vf<z;_yVmTRoMG|)Ps&kb&)#OkCFYWDf)}5Yc#R(iUM<*
zQ1x0%jhIrcE<4+mK=I2Twvzr1Je@9_YD-)mX0V9<cW+D9S$%H2H&&31&)rT<2bwhv
z-TPQskPJC+!TtRV_;*=<PQ?6C1BVZpJ&lYED#!Yhi4S*Q{@~`OZSg8i;ZxL84nzIT
zG%yMsuQTjOPv*?0p?SE_uiBwy$0wQoSz4qvi5p7aXp9e}9lw-Ech7-#U%8wv;xF5$
zS#$~#J3uv6j6F2!kVw|Bao49?4HDHPFA2N7VV_lu7`0-R*$Cp3bR`sI;ux$NRUWpt
zbahJ8c(qN-NxL-F9nsObcy3>}-7W_zwuTF5DD<pG;S6q`+V1rxc34!H?R=iD0ujK<
z4+hdwJu4))*R4E)vbQ8!q<7Ye&(53fHq&>K@Qgx?4+tOLD|T~~-fPiSZGqVXG<I9l
z`5vk&vV88=+SE=8HTSCEpV&hAPD>7Ojb@J#J(QysiDd83++>SHC`Lb~rP_q>9a$EW
zKd>Ecr;=k9=I7_`W>_RphRf5u-r1<b9A<(fkZmmVfyPVDId6@_ZKha=uFt&_VZ`*6
zL+jZbWyqK~$i0r{cjA1tz17Xzo@Z14(*nSq8bV1|prn=&i;d;RgOz)V=oEZ*^hkan
zm*rorJJAN%k6(`jq?GbZaU9G){B(Qh(Z{wSDw-4?1*ltj;sNbC8t*?c?+yrE1S}KO
zF0qv-E3LO%L}dr39`A3b8GY`(+p!HQKMDjt^&MPk-uMaSD7CkC(syyx4z!3IeZ%aZ
zV4t*%AGg0F&3e&znZWJg4Afm7X7mn6M?sRZ0_E;va)~VibwE5~C`pHG3?*}xNwq@W
zwye|<&c)7;hAq>y4b2?ygQbu^_;-K^79Wn-agQUhS%92LcD>|xW=atfQ~@9KY4fT>
zO>cLmXqfdi3#^>=n<!0WZ>dRYF%=!#t`nA@5RDoYermY2x|X84?Iu(lf{b%NM$5?6
zE9%)`nm$UFIFIf&NzWFJHP`z~L_N%jcbc*6k3<Cik52%nW>DPKPkC|3=L$h>x*+1{
zSQ_P^?M|ujIsLZEUEjm(^yiNPV`t0Z`_S#HTK2rcRLU?pcOx*1nF<XJO<#XMEe*}x
z-Q7%SPRlQ*0Zy7X(Bqc=IErvw%IMzN()@rcF;No#dmiwVRt%+wt82Lqs0v7oy0b4}
zb)M!c33e>E2{zymV~r-4p(J*2-w$ehkYT(7Chr?r*VA;M`GU)}o%*YdNKH)*Tf0>~
z(o2ds>PEbi3CVA@zN7s(u@ualtb)1D-YEvSYy8(wzM1|AaB`{AsA!}=B&`5u=#`JP
zHnyNyP)r>POCbyA14X!0Fp{Gfyg5xssQ>5R;EW5vXa5MPG-y1!Z{GWD4)MC&S|-rP
zFz520KAvtc!s!t*_t?AXEtQ6F5YA!m3Y&Uft;9F&1R_hZq`{(iydHjA!Ib<`tthDn
z=2*OclUt3fsgVGrm!jh6+4i_xZXUsGA89msAQEn+J`_>|p03pCk6Gq=IA}Vo{RZO>
zUu}J_2%q7#PZxI$ks$Ur8BXSOTM?C)mj_~=GYfWn!r8-Zyy+P(x8{K*QECDPW^%N1
z<F>)<c$58l-#Kd@Edt^^&np+A3j{rKVVnzDf?3s&PP38CG+yw8c>qhp?*%9?DH>%@
zW(w_#?>`u$wD}%P)jfUb<G34}Sdko7E(cTJG;i(g5B*3@JZ(=~(|`FYE?98NDj4UX
zqNzN92a_b`8=Q`S7nS`Qw$7@z?hmiqpw^YH;+*8AJNJDLKe7@CkAdZiJfe@*t^V`^
zCD=!^;yB0lqoJb4t!OFCf&1eOu1Jr1^5gw<8>DT7cNeedFq@sr=fM+DG$<=6%`mL>
zG9i&kvGDV^s5nv*aM}JDNMyAljv?`9!oPeQAqu~*6&J;2V#)j67zgrXBS>Q1m-dcn
zUGQl#qdpD=P)&jMU2OLY(?lewv~_?Al*Iq3oh~&v*HZM_AW^rBk&&@(-6o`S1s4lz
zlrxdI*EHBbyk*gn?WEJ7s@JGSJ=K%%W4N?$CSKYrC{wFwFlyt6ekLSkcUR&!##Xod
zwM)MaY`j--8aQ{?FfrRd9Q9VuT!o9uAcOs7-w^&_I@)J=RdbxDcR*M`*H4>Ip2-7e
zh(pTdJ5isne=D!SErM;i)VYkvgaoC7@E~t&5sSi;6zQL)*jP8byCbf!xMOsmS`?>M
z{QP;MK$()E?yXvdweG9qav`FN?(Xi*2*HOhO~Dj?=>Eg|Q<WE({hq(cP=yh(=)Jvu
zUjbz~hPCy3uV_}z(%p+OR5;G2nYgHIYeq_5kBdEkrsKT!Q8lUJ25dk}OUujKBog?p
zRu&5n&poF)B|ATl`z4ggJuH`xvF=JUTs7z9d+6EspiD0OcM+6IRFXF&I_x-*x7jQ}
z&jY<%gvSX>3Rnbcetw(|qx~7Ym_^3y>7S9hayJ6$K=FL^vrV3I%DnJgwLn{x%pVoP
zOOwN1S4f9Lf{O57xF=JK_5QS~Qnx5ZnAzW&P*3pk(>V$vg)QS<Str=WtFW*TpCH}J
z^_43gLV)m-I73Vs2<VigXv}6yCpl7CdYFAiKr}(KeR7L0sG9Yz$KX|%H=PJPx2voF
z#h%yR<>1?wx{i)@ArRf@OatXH`TaR&o5kku=#(72va<dP=H}b8Ri?vf{Isa>zkmOZ
zj*iY&+Gb5{H{e)mv~K_mpqqu+yABlaET-5=-OvNq0WdQXdf!@*(5yzDui>g?#Xv9g
zC5fZSX86{GBevk9cuUJB&`{^Yg}jGnj)M`XhRzzbZ^Obqn#gsgFfQr1MI|tA=6#a*
zma%hlxkQh5wee`sLXOQ?u4cEJF=?SC)33ZMJnpDmCN?C1Pw7dTg(`fjYIH`xo@=hv
zUo3u}xW8L#HLqAOrbTBW7FeHQ(~S9k%RlnG>o_Yo$d$TGKPzCphw;$Jz?`UM{BZHp
zhb~XEAs>_-k%z;U<%l*cLv|g(O5@49P_0;=R=>v(o0cWshp0<I{BAEO8IwO2gYL=P
zl^RThPTAc(qjVv{1~%59=aUnt6Pg1k3mDk%C&Kp;u^I3My_-J~yB;WSpSvgJ+Tja&
zToi-yKCoN-h=~~=zYJzU$|@?VQl}$rn763N2ZKGc49LT!-=#m|)8@gi+rME}1DRe!
zAfx#2go79{fIeq2f!68`vh9V{_O)ze@2{Beozm1g%DM$2ECL2}ZgqVNFTEuF2X<B-
zlTUWcDeQZH%k$JrGy8)Mbz0Y5LC+rsqhjj|*F2AnO~%M~VCG96$Ftrj7c@*vk>2X#
zJZ9w4<h+hsznn4UP`d5Xd#E544;2<-Wy@rp!MU1sp$%@fz18V1;dd*KJU2X=Vds=;
za`y%&d5A9bN~q`kf>&!mB7$eaQiO>@bdkmxEntf>@i{hjH;oUqa6^2fTHYr#^=_%6
zLiai_PR{F8XNgR!`6>cm+^eZ3R3E5|Pftv6HYSGlQ^#(&=j7>R4qfcKYjb2+yTU%)
z7FWm?O>S&#r180p8L@=nFbk8TeWavZ<$Cxb6s(V9m0Gx?s(*aGE1o(<pYJEhkjIrW
zV$GkvyD(1{O)L8bIytU^7U0{D4fI$1&4PKVz!IC-dP?<;>?e1_uUK$hSsJuH)kl1p
z4i~&>T^QE%0H3#J`RS$xk&IU|F7jC%R2xj09*n`rkc4lW8;!w9RwVJe5*vD>Ta<Em
z+cWktPMYaaWD=D+!Z30SgzivWs59o^i*CENEgVP%p=MnpK%g50h$yGTQnxu0I&4o=
zxtKoxhAM1L^oZeE5fq_O>wP`d>zg?j@xXlKXM?Qu5R<o;!`n7YVlMrq7a~~E;Ug=p
zgsr2_YiI)cK15Bdq?9A(>5s}XwK&)6dQfoch9nY@i$rjx6dmNjIJTT!ewZz<6qpAO
zzn1AKb_4DE5Ua{K+%0DXwVUT8Mbn@0?F^v=C5#Z781T5=G9fwGPlwn4VB?)F1i%ew
zD}xSZ;sCNVVKghCc9}N9xI!HP5pg(`*JXc>oiGBMfPer4!>Hduepc)Yb8<-8CdQ^V
z`<A0{;y{z3M8R0%co3u!4rI-Yiwlt0H5L;Vrg}c#M14`mCZ@3T+MKP2OMBKP;zEHY
z^sko#;I@a&8;6f|l@WT%3$DgJc5OjtkKQ~7_YsSUcfCFU)AqG`UHlT;$D6xd+wE+Z
zTkCoh;-dS(_%;cB(R5ED=GH~Y9R}(sn;F71&&uKz{($>T4iRemn7{=Q3Y;$67ciQs
zDxEoFsyB^qJ==MdyNw`bE{QJ+EQ%>|P^bJToPT?yFB7k`_c#St@x0RNnj0=0kHYK9
zL{j1ral8!ki(;r$jq#4pFfLE){jq}KcBcl>AQ@T{snO!aD(`K-@S^J>x~5w6@npRF
zoc>)T$b~h2B5(W)5s)L|fO{JZor1T%bW<*+S*0exGoND7c^a<lS{W+suX5jBk@^D_
zZK#}-+MEQ^M{^#A`v7I$+7k5!`!(9KAkoZjzC`_mfq{YI;^IGl{=6p>=i=p!SyRq=
zS2S5Tov(t4X#fWYshCnzb%#KDQ?$8W2pQHomzUs!S@7sgScRg+Q%iH}gM>mmEjPY*
zkirFOP22PN`d*PT|BmcvSKL{#cfK6j`oZQV^Q7zg^W4?3Pr*FpUB<MP4^|Jq89vGm
zxBXxe2-^VxzLk!TMVNxP*!9ny94{TRF0~7?<(1pXHlNAyBVCD!0LioAA%n5BxhDOc
z=D{ohJuc!4XU@!uR?CGIjW(jzum09q6fLoYOLRH_SpwJ5!Hnc!G=5{vh-~Fa=Mdtx
zy;OzSG*zN^vZSM$g39BbCz}vGK`XqoC%pV4D=;reg2dm=Np^3F_M4xU<*HcNWasA4
z`o;ZP;Lk656eD#qo?UPKaRvwXKMY+wtfM!mfBk;H+VuKhuGL{C#13T-1XUjl$a8);
zStwlA1G%_0vUA(vUm{0{A|<ifJpAajsc)z;YeS<=@$!5RP5Ik77zB02DFUV9;voR$
zMXF`q#vMVBictN8o12^Y`FWuAk4(s?PKD9c)kQ&0&bMyMTs5n$qT+Oaa}EteiO0f_
z)R`AWcr7(bvf$3=QiD}Bn?H{0s3|8GR;rt$SWt{bD?v;y8NFtUK}h&YssIc=J3mL0
z_$U+|z^%sr0%)}1K)5~J$oz-)7I9OP7+lQ1Pf8I3Ipv?{cKk00?Nb!oowin;4i9`!
zQ_z)Dh#W|xSbjED_V$CpKtp8I4CwCEe$U;`O(MQfP!(A?Nf*#|B`#a(`%W0^@_>dr
zKK9t!x>eXu8M9Q}RKK|!@i^=<D4e%(9f1Ws{xt&%(hs+s8`;kx2?NAELbqKMEx5^M
zV-uJQJ=|?J&^_uh=~}|@HXrZ2k<nO-k#4iGZDh?51)7>kvUuvKw;GdTD&h_snM6Yl
zZ~Y?`wHS{sI1+Fsh+{McZxDIohLRiTx9`ngDwC$Qg7CZVNlD)evziRvbZgl()Yn&E
z^D|qK4FXBXC^h@S-&LSwC|T-Fy);M!<<T-Q2mlgHZ*M^SDkCXb9`?g4=|Rpva@KOF
zuaC!JBUC!)<md<<1%(r_^Ld5cuqKXSO%l=@KaiH5j#Wj%d*K>m<8?Pqa{)c#IGPr(
zBAH-Z$TGq!coO_>yPMW>svo`Yg@F}V2MxiKAa$p<crp2>E;jIUw6qOe+toEwJT;Cu
z==ECfHU^WmGs*{-)HKr~yFc)ng!+lZAl13UcNt+^Iq7kn_@U@-IbQxOIGJFib@1k|
zCTITiExNI*Md1&{=GJZ!I7R`f`{fD`nLpl#{(;7jZWiXv@{vz!(!|H_A{<bT1RAW3
zhjI$9tWmU&TLsZD#KsbION1nc)aeTw?K?ygMiga_bRV2`4~e-Uk+VjhaHOJ<Da<s(
zHtPlw8?LVWiA*U)1sr++QiPEOL4RvHkrk#$2Q5eEt#u3Vk6u$QnVWne%5ptxD8W?C
zBOu`Y_;61VL(Rj}2;_Mn`8xERUZcX25_TLUfWtJYYRJva1^&?5+R8`B$jf_cY5kxa
z*Ul8GTT)t7T+BdSSXRigRZZb1L>#3_hvgqRFsre#;=5WW=<TEuYXV(ueFHPlF!xT&
z3ZWH-hcZzzGWusI!@LhCe11+8xcwDM%zOr}tWk02gOyp5`X{@h`6&RD9ngqMWpS&_
z?4_Vw3`FHYB4F-QP@XyZA~uyCYptJ)2op@X&I`B4W4W1pmSi!N6LWlPFssmeYOF92
zUFC8e7m~s$_&74G)0Gg$4Hu_3S>J}S?f<@pJeQHNbVO47vGv<aiG#J>U0cV)i>vZ>
zNG4);xJgzAwyn^YbO8`Ab_ueZ4LtTZf%-`iv+LE!w+ffQ&`E7U9-Fzjv*FEGjrN6o
z0AJ?${bl?qQb>*=&Ib1TPW43GDw7s|W9g<KV}aJ#fY+RiI*vzYCQVo!Orch@UU@&?
zH3ZbTU#v9)iqN#QG<jCbB}<N?&xN#Vr740wt$^f)fnjK2A+>)!Hz$XGGN(ipD7ZNA
z*l1#Pv+PiIelf`o6`;SV(cD;BnPk?cia8cD@B?`mE#Ud1KJ0}czcscG>D5-z)C7F~
z!^z>+fqsqp!=06KypRZapUK`cpi`5!v<wYIM?=fc)3U(jlm2XPG4x&Vnl#GPRx-J*
zteSi&NHT@kWFYB$2xcRbGZuF}s~V?*cY?yWFm30m>Hd(O+||ZMVF-f|+nwrSbRU1x
zqJ!19-bU&VSABXzvUk~zvKFl&^T-a$>BfcF9iZ~Md5c%WVQaj+q41!0_Yhw1X5|uo
z`#2PggP}+Fi;HVme+!Q2@n*g55JhiuivTx${*W<AJjnaHs3D~7e&dum*|uv1Kar<Z
z#@6pcRt?wgC-YR3)%Y0B*>_*ABFi}6?VVdD^BLe^p%VC^CUJXQXxX&R5*pwNDKdN$
z5(?A3Uv6MVGGs}lp`~@Ox0j|VBH(NBTUoU^SY(LrPdC{Ihjd0KCc15p<(!<@`}jN(
z^1EN%-U_ZOuoiVHDQhaZ?W%e?<>cmenK8j<D`}N6gs;B$Li=#Lc<`lI6{2q&J+kYx
z`Hm2^*Qj<F6Us!9deC#jexG}nRZx2F0^~WTxBGQVD=RC+G3?Qjr9mJm!72~9-;t(p
zLB?`#piGD5;W!YvWP$q@m!()qa{8mHkMMJ%fj+nAZFg&YKjLi7eKup^7=5^j>l?~K
zW({y5@5=PX9cJL8?h@l;)+M#!nr{ccx;$Py8}f0wm{PN9BPw4XQp~)}lkosbN_&L;
z)4@gomvjL<fVvvyHtH**C9<RD+lwvZXgis#QoT}mKFSPm??Y+#7eA<nhK7!Y^RCTm
z=71_@ZDYf}VgBdOpF^p<f_BXmP$sMpu@Y4^6_x9o8&dyJal<m*mKQHy5;?4vYnGT)
z%^EPPVJ;L>(_%_yGk8Bx_+>ENqN!2YBkmE~*{;cu%bny}A1?MY!IS8uN^9xajO`A*
z@tZXxhaa=3362HvE$>Sqg$uMcYz}esIuX5Q!8OCE(Of8o=EL4b3BONG@^<g$==hS>
zKJtCmhzaQw0a*lI?tVQU&@`43@WG==)P|sX<^)kqRF9OF;e^9=Id`Ete_3aF+)8jh
zr)GIS0gwfP4_B)uJ9*Z1I1pwWBzO{rcrX(VysxCZJob0dh(||efJhA0EU86?l@%Au
zf<PA+7ZmWM+}zh0!xHfNII3D&9vcIR&CSgzDJf&Y2sP3l9qgjGn~43nJiUz!q*s^1
z!zK-wx6j|Cc4d!~`BM?1kQi|!N(FC(bh1U1P3P~@(o!d3^{x5xUJqN;pbgHy0m2HO
zV3>H8YLR|Z7X?W)t-;u>>Ha(fbrln>&zajQu%rfwgl#`wZ_#U$K!eo=ojPPIz2nid
ztI<%fbt-S?cj;2;DpA*UojTWVTR!4aWdJ!lU%&P=Z<;|J7vI@d@ji72baF<OxdvLh
z=Y6rX#>U1i9+wwqXFqz4Dk>`EJu`ZZc)ahfDh#^|i;4^l4FTqDWqtj81j&>oF*qay
z-~$rV)0c|W7;S89vPRVbViC|CVPVzY8mYRDpG#`eW2F!eP$+vD=j7Hxh^dqMb6ojK
z6+}<3-JSS_C5_;S2!RZf+q_0IOdO!ERsa#7Grg6+0!4_N&+=KgRgk`A3GdGW*8m7J
zyj(i->OQq{(oU_8H~nenn*Q|U#+>xzVm!2MBZ_E1;Nn4MkyVvGjq92|qHM=j>0$?N
z2|4U^bmFtXu_fl~Ys`Sxa!Lu}IV-(u!_3u*Iv9bgA7I-JFcJn9pY(6P0DY4>FoNyv
z@A~SBii$cq3oqquU0i^r7tcBdicJ(ga<uP2ipPZ1KRi5bQR5O3A0Hd#`7K^|c->xw
zM#awV_$A{HN%GRtQuvHyP9PA}9gs?m!C=Xm`8zv1m6erny7>xJKcsVJErEMXa40A#
zDLFZ(M@N~dsHmu^w-2&AyYqp+6c!@D!+(N*|BBi3$`%DBBGCF(T1<??+19A9ukX!`
zJ0~Znjm=?Ck0k7~oP<R8x0i4RIB}_|3kfzXCRr7=&9Y^ai+!z@BNkyisabV(MLxtr
zXr2`blxU}imd@6+@@0ygD;9EqfGF9Lw(_%@T9r=EDHS858Wn~_I6k1XHExRAY+pSF
zP$|uRKPB#)eu4r`2x!;Ec^dSF#0CH+H~^SNm^zrAVlbb%QVd<>@}FtrrOsVszh|Ax
zB>oLzxew0eb)%N;w!YA1RIc1>7#Oz;h&no2s;XL=N{R}ou)_c?wK8p9r{A>X6Sb?l
zhJWBS&fH@fW3AeC-A={Dbgk!QL<$0tZfYOcAN>wZ_g7D?P?l45?}xL$-)lB@m?~;&
zx}R++24~vh0}`ULf&#ZcZ*d?}IZ9chuBRJ=aD>s)Ij@9fSTwxuTiPT<R4{WmfBg7y
z<k}1@1+a*4x?6w#eCd}5#2Y*dUbN+r<h|?m)WI|Wu}4Se`6ocLWQrqrw<n5BvxI|#
z!};h>J0IcUck^O^R?EyZ0Hh_agviOsU%h&zfH9dPgAL0P&#EYYx|ldQ>lz!~S316v
zMg?W&2L%N^fB7azl+>3P&)V(Uli&aeYWBJ2Tt$Ti8=FB-K}FlqdYGZ~lNmGaYx{-}
zpFa!xy>=<Y-1t@PW_A3xk-amHz-+v3FT_Oopzhx<L^2dSFJ?`B?$6_OmxQ6lNmJ|n
z@w>aba7Z}6oJlU*nPkahByzIT7OD`1=)g*~AX!MokXF6zivQcNLx*lV-^U}lOQv9G
z4k2@5@mkmUyo2cu*uoW%@|H$6)nAE<za>}lj~wf+D{Y)(<?^RAn^1dQQV6uqK4p3q
zvS;oEP8#$IIFs+zg)zU>%WOc|d-mIm7To;ie)aScGWQ^BtJ%(^>Pvj)&JUcuNJ~Rm
z_&ILL0njlcT3@ZK=wbC&P1eistG3j^!1+FdimMDpw&X$hj9<Tg1;!b1B(K{y-03TS
zvSbVm4dwH?sqQs$Hz4M9InV}y-iaxCh8Xumk_ZV2vB_!CVfhlvQj7z>4@%`RX2VNO
zNda`(qN4tJd3jdJGDJk_U6-Z#U<(=>%n22h$sZWI>=tPX9oRA*2yh(|ET-{kWuY89
zP7<8OG2Gna^QuRR4Mpt6EPm|@8X5)T+Y=TwMMXtQ1#F435)p(Y`T4E`MU76o8!IQ4
z%Eel;fE3Kt)fLE%XBpP6KYsjJq4V3JZ*Xv-*^L#Z!uJZIPuf6A!+k!Vwg7LS5d$<W
zDd|%569Q!7WN|n_vnjhsB>vT!R#H;EC0cb1dac^JO2s8bt0A2)3fRh(n-xllse^y!
zLzbr3ORdFI55_98GT!POfX*!hZGE>QN6vSSvrKC(!w()wy_VKUsu~xG9hKob<XB;)
zoj74#yVGU;>-GS)G+8B^O9GW>M@I_32Ntxvr>6(bKtoI{K)1}r!2xQgO^gckR05&P
z+Vjn=KY#vN=?nq3gSEA_VRv|^-Td${BOjkOIhtLAwpQ|zLp;FjS3A)-$%QrMYL+R&
zbvSPvT^9zQH1<hX`1zSiQmGMJUX{J=woAz!q`0+0@(7)Z>UHE<0QykUGBOR?4Cz3S
z?CkD_@w7d1IZD3Ag}QZ2_w7=KH#9WNT2T}g7W%fUTLPkDWZu_%!Sr!r6nQCt7n2O|
zVnFUVagn;cc*Th!8qrbA)&mK7+O~HZ`%7()^9>H}ExL4AP4)G!-n_w5Q-Rewyr;z<
zA?vEr$H}#IkU(;{zDg%aMR$!@SZI(Vm1Lo6-JM+R9<sdUZ(MqoG~9Ooby6vdy?}u*
z?fzg0A$GRT!G4a`kD#N(5R<S$J1sSJkVzJ-Q7A%%AucAC3K)_E0~hx@DgXpDbac&b
zrxbGIfXNU7e1|uIjvR1ubodLC0wyMK#`N2_Z~Pt?7$KeTB)-IV_xI_CJGCE2EZf@R
zoGSSQ<;zsZ`3U0~G5U<d({#z@$o#cmzF2tSm$ZQlivdgrWTgFleL&oyjFmP|vaVNR
zf~<xw3V41$%V4ByO<GW57l=qoN&*g`))txNQ3bpA)gWlt)z$3nrRm^6zEGH;|F)f{
z<U=E=cDz1O0}gV^f)gh#N`Uy1pI`5^J9WN09U#06Ae<e(i<8s&>1m84xp2k^hwtWA
zVwF7W)*@Cvj(?>*;pHBx?C6=pXJeMF$;ru{p{`~@ep8~Q52m2>#Y*mM`IQAi_3vus
zB5y~xa}<STX5+R6LG%-z=Y#&637uaQ;~B25VUr#pvq9Iu`X3Pj52guC9SUld?u*r-
zgMm(1r1jmu<OMug6=sdd<ito71K$xa=rjUMxFOj21Q=BRx_gTNFE4D$@ak90d*SIa
zJ)bY|ctWUXMH<9peSLlQ_V$GJ!N9TD<KuRwJs^WY@r02}v5=GdLzjUwaSvYbNlFE0
zCnv)2I`y_0%bzexR6T%53M?N$*Z{#anmj-_L$?fwA)BYy?d|PsaxS}5VVU_U`)vS}
z2wr}91{?qeB=^7tM4a8J5}*hJ>VT0E1^-C(WhxiDb%X$nK$%gtsh^{zD%r5j5gry?
z;h<deg3ohZalcvcw2W6RV4qNdRv2)=s+Q5&c?&phSOunp=%=-l01X)DKeg&zKXdxy
zBM8`c5;gk=OjxC&c~Y$}4;Fxy9@H+Sd$pml(fM%k{y^!Zy?wYrvUt*h85aqkP`-aT
zP0&$+3QGnbpXqM)A)d*?sYWaysgP3sqFT$K;IoRVyW@Igy^sigix=w9?(Id}?cyR^
zOq8;iO<QfoiXeJQ`mq+KVNLPvcIvi|0>(wf=l^K|9C8PhBj+^ITdqerGuc@e7kCfH
ziM*8`ukMjt!Y#V;4%=?C40a#WD3AD0_Jcb42+Qj0xd5YgubS7)0p%(nZ3m9S&VL^Q
zYJ|zjSn}TDV!Eu+{OR4pF7~9{mX;Q7e2|W}wYPU#MQ!Qwl~;}uASR{`=CNDx-%qKn
zFoP#~7r45+D-&%5V6BQu?5w4ALL-n_i;_moS_*jI)dQ!#kx%RRJJ#)AzIXvN&KY$Y
zMWeF=0|P}#?QCroXkz&Yhn>l~?6hfO=e$qP1Q7sgRJ90fVUev=AW90T&oyf-Vtb8N
zPOqhN0B^|#MA&HGv-9)eN!BJFZlW~hD8Mlg<PTHxMIXOOET3KjqVJ8Ref$cUTHuf}
zP;6{+0LocEo=-tGG=EinEJ3~M6)VRT17>WhYHIQE@td2Q*jQL|)*osY6M;>dC`*BX
zkiK1e_UHP%u~4h-Gv;~{4<<w(2SVoW&%*6zXyMf2m)tbAK;V%JOY1E@I%?#FwZ+;q
zC)zXWa@Dwb?fuMnXAQc>9zNCa(@7zRCkYjTC#%@JiZWDJILwfNIB(<c1nZl9dDJDc
z=^e$PH@F1pJ<MzdlnUV(h@*skXj@^V7Y^Iwu4UI|uV)cg#E=_jlVe3%-QU0eb!vxS
zM{jkwvC%cZ)Mf5L57B=m)MsR$5upc_c<W0ng+_9AY5vMZj|$Lu`|Ki6N<|aH!!Z2Y
z+9Gq!gEN654-5ta%OEVA_Q;!IHJJ_Mx`2uyI2a+OR}@t=UBHW!l$0E;7f^8z0K3f7
z8|oukD?9a0h{7<&iUju;LNxip!oqCn3edVn?_IChW(3-y5L93FDSx0l85SfAZ%n5r
zP%1>3Tax&VI0(q#fVc@*8OQ>nB-d6~$MfWgOifMwQRT+<^@lm`eJeD>-uNjNO@i5-
zVljbS%bU4^G*Mei&1H87v#OWe#SWd9YebttY8>d*7G_Hl$E|H@3&Hb)>A)}HgHPV=
zCU_isPHiAF-{0IN9-n?R?8*NwV&-r@G`s8jre)=54+;!4;(+}X$!S=KX>jm~swH=<
zz>%OmLn@a+<^Ed}o%1L|P9A3h*wvxMi*LW$+m{bUsDKWxzDbov)+o@Q$3P%DM@jT{
z3}lDslS>I$jC2~lF^nWP@3-;^T)i^Xr9%7e>E#7<RwSa=vNr5FjA}I5@#Fvul9Q8@
zl9Cb@_En%7=<kQs*`!xhRh5<5czQOg)5S^W)VfPXTOJf}lmKTJr_@WPfKdLV(nmCJ
zbLG>GXV9rf8EUNK1)9*tfZU$oZ;GhiMO^K=J=X5`W^#IZsopLN_@56xe(bFMsApu9
zEu3L%YwJ2S{1(c@lR9L?GCM~(F*(`ovt-GfT#SV%<&X^;dPhizPe?>UuENuKzU$+B
zXdMlG&xV5+qMz`I0{WKp(Sk!*a{J)8I5nY!KuQr1^SNO&rYyF$!H%Y{ZZAt7E(BFY
zZ=F7{+s_|~pV9AN`CLCt$%y%b&-~Le)>4TI6_p!IZhUHE>j$qla=Ki!lVmAcbeC+8
z=|A|j+wM-$qoAP7rVf_urzlWW)Bhh$UmZ{9`~QC!W_HBnnPz8dOb)};<e2Vey1Qdy
zOmlSia17Ja)5G+1*K~J(ulMKo_}zaR<J|XkU)SsPtXB+ZS6W*i9=!pJL%T!|XwJVp
zB>`vYnP>Bed9_Q|42B<YON0ddl53F%Ac7Sa=>ULi$1&cE9f&P-sJ}%WDB0bvX}Z0Q
zEpIX!3cRzi&PZmr*fMk76UMB6j8Rjk7Ds2MO^VfXbbNeFW<oS>IUg<bKu!*W5fBhO
ze{Ny*o+wmIzSTS1#ywZ9q}gRh4LmPUR_W?ylv-iXh@H4EuvgT+>CE!|;sfmnazq!^
zxKFSSHq4L0!F+esZOWYj<)c{mLv~UFBhhbd0#1L5ce9^PwOM`sjT06U`c&OV4{b9|
z+}*a#9{5&R&0FI#mKOiXxbM303QMRW{uq639>D$$wDC!-`uAR+kiyjV4i1dW%v_|A
zwRG&M-_TM0Z32Hk4!->aqazM?Am<^CB#rFO9;XfOax_bE0RA~LpB$y)eH`>E8qD^I
zg0jgSDh&Z*n$FcD>qT};D=Vr^`j?pPwHXC!B{d)YnHd>HFabKhyV#mVjR|x`fo<7p
zB|yal{3GV6|FoDfX==3g*46-<vA?uL9oI`L*JUL;<m>e)CE^XV!(N<LiH>!kIfd=8
zOQH<_NoJs={qO1KM)fkCw~lL4H=!nAm!-({BKdDe`bvcJOqvmBn>aA*FXwOCE&GY1
zJ#+TbuIJ0Eu%{=mJ^v7mWF)|;u0>16uYYAM9d%I$SyK?>viu8WHHLLt)(9>({&;81
zMe4$*3!aFkW=fmE50^a96J}>qdw6?`??)boZ*;V`XPTb8)PD6<ue=<I1m-BfgBvC%
zyi%W-oaBG|797UiJw2epmXwqX%dLJGT5#);R4wq!a=OV#;cqS^!Ib_gOif4>!<73y
zAgpA{RkyC`#`fT>Zg1XYzGY{>VW5Hd$Dy){O4w)G<}bqdqh54lH*@#*4hGOT(WiL!
zEq;ejoc_Ia+W#}hIpmRfZ~~lOVJ0?L8@uw^zP+S@BKSdx4O(G_Tzoj`j0-WKwH#q^
z(0h}%N`ORuGpcAVFH3W|(M@bJ>{K7Z4?}2O6zXdw`kFX=5znieuQp!3$nt3?_`sLF
zk)(fftN8D_@ZoF#wSP(lL8Ixdsv6QAa_Z^Bl(4<ECE_FI%t!HY42i@DY=b_rjn@Em
z6>&Jo7h$FhM8Mv@?6qKn#i)l|-UMPlbBm<@s=>@%tr{_HE#a0sxij!Hhat=ehSBlE
zoQpJ63{B+l=58x#w%UR~2}3;oHa(rZWPE013<XkDTwMQyhxsX_QgC`5T@KIWgt(f5
z@8^!+q`ucI6CD{#V>Pu@rRP@VqXS#-{rU}{^b&L=?<8{1j25H4xi}b_GK`e?<l^!M
zlATj9{y21CM~^EZzFk@AA1J{Acaiy(YRZrLlW5uiw}sO4psAdWklffYrZorhECG+o
zHUrF%_mWY6fy7|#r_6^BD3F`;y^nPAHyPO$o?2&IVflQGe6zDNoY1FOPqB`lqABuv
zZz?c#nZba-?kQw=W@dCgqjy<c{aL6))CA?gVVN_Zu8T_z5*Zm8`RLK3=fR+82G!hD
z30sOR_=6yy10Unk#q|nPLuf#>5AfZv@jUKR62PaU{-ON`0x?sR<{-W6{}k<W_DY{U
zQ!XaS<he)Pi23Po&ii-o-ubo)5n}aLTG!1M0$T~>GoIk>VTaiXi;}9UqA}EnVXBuN
zKa5!>fI44EDJ%*843f<bgFx6@^`+<tH4|Tk8fgaPJJVHl?NPqN{yVPVM`VA?IjrlK
zzWnpOolQ~86~Vu`rjs-@)bD%4{L!|d?=I@>Ql*csYRc`u6u7=Po6+|>4W_>+ZuLt|
z$b0li5n32w`oh7mm^*LetF2jJ8-#`n1{D}86ygn_hGM)=)rlt0i_Pa!Rb2-e3_s23
zgSbk`RQNtaPmY8A-m9nr8VO+BqK&`p&5K1m|5|gW$<mdKr~a!0E^FYt1D>VqS09Ax
zcUB28lo1Q9cZZKZ^Ll{V#&1ZyTZxWkCr;NC5B~Nl7+tc?Zod9LVMl#lB>I5kR|zh!
zh-|CF)qUNf_jQUHrT3D5toe^545{O^4G#e5KdXBojUP(%JW}GLY64!Q^WL<0x}|Sr
zW4%NE+g0G>Y!rl9oOX1tzQE2fw-u<kYhK<!Gw837J)n<V9e5!}`q*IgcW|CAI)n_@
ztRc;!NeF3p(K5ezAV%wt&ljseJ*ts4eSSsQX?z^Xv!K!JSp%0|w*LtZ>c+;|jT07~
zpZ4uRPXvZSL~{V32Qq5-GUWEQHW+{vQd)HRJ>iNdO^7`&`4fyZ++6~XO!-a|MSP{+
zcB^j<|E<r1axX<<bbt9+!U0Fm09uPu9CTKVW|2G|1GF~!6(uGU=Z0(TR1}DrbAQ=~
zWZl<EuI>KV5B#eWs&sCH9)BS0$0e;^eGjDvWy$=K0q!E>O|5tLh4nMP<rm(LPVP4P
z^!Bng6&Rd{+X?DB8m3j~mFw49PxBu%aaxSRoAvZnRE+CpLtJU8h7LWO5pe9XzI+BO
z6K>X3<RIlHCHd^zb+N<w(q{eJ1ya%u2&B1G_cFPKOfzfuIFPy*dG!=Xu;!1B8?H(Y
z8Y)wrbkTn~H2wRgecp0)tt|iUYzgqA5_Qb6Qvcarn8|QLKrF#q=3??^-RRgmbsBW9
z^(0I(E~_L2C>QJO^v_%-iHM1D?iEGI>T($ZsDCieI^9mi+NAOG^Miw^*O-Zc0o=)i
zgoL+m--hMFpur}<wTL9e3GorsS5}S#4n1b(coUEQL>6!bP>ba$!p}M#=15u&(W|D@
zrQnw;ABe>frqvPz1C_ke+br}q*{r^YV-QH^(Z(P8v2VQ^gb-l`xLgc)$r4Z@c<?P|
z#yu;`0K8OriyWWt296CM3MY5H&I(fM7EJ`QFj!)05+2TrCw;xH^W?3CxVU%tDL|tG
zT=ZhL;4~6mtd3qTbk+R)3@v!|S@~pYl91~`J<}1b?W@$*TLMBH6o_P<O;-=shvc)~
zZgfn<cdZ|Xn_k*=<vz>7@?|tV*mYZ<fTh-S-f^dF^5J5uFw6I_?eS67H-qT6)InWc
zUEpH^A!7=NgX&=G$UGSie360rtlly{39NWg=vYaEak+b0_=q_dr|F-mdZ$7pQe0d-
zfcnx~uyyg^<O<ban2U6s*Y@goJ%HMrE7jxV-;n?-CQw~%ZEvS?Ss*0xENpD<HiRBR
z8Jwl^^h?Io)YNF{%d@i~(pIBfHXJD!C#hgf4y-e!`RqxldnPKV647H$28H_2lR|<_
z3!U-{bt5p_*2S-LBV2nrQ>xs`y?h7fFQOg<cYd?%?Cx4yT84?yx;9c|Tx0l2nE8<u
zA+a!gDoJh<|7gIYR@F+*m|uJ9e9lctO;g|1dccAVrzY{J*`_3~Ja4W)IgC1Y&8jgD
zV~ReqNd&j>seJ&rg`1nT7qx;9i(jnv`CBuYG}C#ggC-Xm94B{nvOKqoAHTN3C#X5F
zv9Xy4g5!MYxF$W;?Rbt$^$8=cLxIJyvAv_mqZS-oVq-vm(1JwX-Q9(Xp+eY_gR`=-
z<j1%W_F`#vGYTc+4fD3Ds$QO+O2ve^hlNS;X=yTJ&T#zaFxYRlzdL7A_BDBVk4Bb8
zgQd`LUh3WX`UU6A+w#DmxeTr@E+#*p!KMGLuP^iNpIiY4LU2z1x~Pqwr<WHelrc;+
z(nx?(o|(Ac{-dLl)5-Dig3kr;2FwAP9_&hnAczI89k5c`w)Ntw;|dHxgzTUo>^1J|
z?PVv8{Hmv>qjUTFMRdezaY8~QcBz<DL8l56B~GXS)zOs#UUa@W1VR<<#m^5{gg~6%
zkW+1Llh@`(%EOIDFtL8<zQMyk5`)iqe8;&{UsImFKN-?)y~yitL{|BCEdQ9@OXIdV
z_G}gy%8QQJ@Q_B#%+6BaJnnI*WX9d|?5zCWzuwOFy}X>2GWrqZhdthCHEwO59difU
zrrsc%SV0ElGx!h@xhR;1K;0pN_up5+&2nwjO<Z4C(97IVE>`jL^Yij*lq+22l`9;J
z98bC(6}GDx&~)i3J1d7|XG(q<_z@$<=<D9$r>_#0{zv=a_Twn;dSmO|QSzUN7_bhW
z*#{J*`n{iz+S7ix`P1%oFw|g@L|J85?=qA|mg(^?#p|Hijc~m?92?pJSg(#{dw^y`
zATB47T}Ble(04R2Ok}7SvRvMbYs;BWGcmS|5h6_3H8G*a^+btB92NWbkH$u_D<0zT
zFS!hggk$2Wy~g;@@zMNKUp@{L$=rBMh6;(H89LcA^#=dloxu<0@Td_`KKy1;CM>9`
zA`r!-sn#lmOM}XxnvM!f8Q>RVTf?fCcfQ69{f=H<{2bM@d%RISt4Bab8{Nan#+Hpl
z>XlELb0Og9s3^t^(xXcy!qVNRLrQjbtU)m*)3vqW(Ee$^EGjlH!($VOH)CDf6c%=K
z(CR0PGl77M(>`lm%!=p{*t1CoVRK|+nL-tAZcM~+Vl+h2G6F;Nafn|eAJcFkn>K>6
zVO@mcYGR6O9$|*mA_ly4{hy6AC8eK8D*+>mSIKln$Bzzpd80g6@(C8#wPEtS`fG4P
z?TU9z1jL=XiYMaQYWR61IiG*V15|QdUESoeeYTcN>mpTnS6EmWpb|RR!g8T5<zSUt
zdph@^Kxip!V!qX{;KK*x5%30d4yZ+{M3Zj-HAeVEZbI^aDcqf1Wjf156XZUL=P+Pp
zln<j0{|nS=#0vOfInI>&?Hy(G&<2PW>_{|?Ca^c{PoGuW^R;=WpsFph{6*fJA@jPk
zR_J*Id2rhr@=9O(WT4w$tZm8zshMT%jY8ksd-88`Z>sjJ-i)}c9+x7j>ZkE%7`M|E
z%MEwS)tuANqSMg)r<XA_Pb%(a7vD_{zu%?^x6{)zepj{sB-oHQ%8Dx$97;GL;3eo_
z+v@cHbFyImJ25RJYbD)6^7Rab_-?#ma?Xt-=?hhVnhwM8dc5FT%jed-AA~AK%8dv^
zu}=AUn1i`{vA>8i_iHgH641*rYgIF>J~<?mNChlBz?8h(fc{K_nK)8Hk;+8uL*qfq
z4MW%rAaTj;nsP?WF#)E-!MpU5EGkK5Xm}GFBAPM%y>7O~^HC>TX&cBL@v(cFD%5Jk
z0ih;}ezQ0Gskc{Txy%?&`xG^J^>1<Kb+0N82|by~W*>S(ZZvt)tH|Uowm%>&!fk)f
zqHfmZKkL1C?yWfxfG9S26_k7)|BO3&x@5)y<?O?@<xZ0%{ZLm*jz!+Xxew4l7GfEu
z1X37G*r#V?HAkVSs)}=b<Z3f7dd8Z&ZnoL|$jF&kiEy0*Aw2pS&yjfNySClz&9xO1
z<ou~iKeb$pXESjJJLYG)!YMJswkeH<eDIv+{`Z`0i)JhKnl~OAm-fA?<|<{N&>k~p
z?Usj(x=g?2FJAkUkLqNfiTd5_2M$dl>dJp|doD%3jJ{p(Dor!}IrxDv6W$Bp-uS<~
zg@V(X5jdF3K~D$(=3Z+%-vHhm2xR-3In2n<PiTC$(WIaF@89F8ici{gwvl4AQdS=l
zO%hFjII0<Zr!)fm6n<!sT8RnlTTziz-bj90_n*vnUe~8vQsl4kFmHFJALQyC-a5XJ
z3S%nz-lw6YB}H6Mf!EHt9b@8vjv}g|p;3g~F4JuUkT-8s<4U}WiRQUyvlI><Wa)p4
ziMhXKWGS%Z0OVPgm*ZX;wiz7a`dtD$0?@JSxz1X1H_qGEXh|NUvNBNzd4`voc9q_6
zH@W7x#Uq`^_b&%0-wS!3HcAlJwzO7+^z8qLHla`cF3(NrKUbMIyEt2Weg!JFZ{0p8
zStU?jkv|%qWBzW7zRO)CLLQU4Bvamuqm!q7X^-!RlyCO4R^#Jx2Jc=DHTxQ0o0(zX
zwY`)cHLWrckQ(E1?2qC3jP{dAJQn_z5X&%7M*`Y0EFtvWu}&f{$3iP{9aI$}2f&hq
z-i#kA#;2JQELZqKS_uIc8|u^eAy=pzGDh_nVox5TM86fkvlLD$?ME*9+krf4RqCfe
zM~oq?x)irWC@QEwq#4WkDl8WO#^zjM@t@vC0pQWD^{!T}ga{2~AnW*aR=L=oH-hiw
z#=n2BwQK(vuq}Zmg*Vu6Yr-FZ4z9B{258x~W1DsX)Jrjx{F>f#5E?PvUVwCh)74m$
z@@14u0c4&K5XGid1^k#~&|o{C9qmxD+}74su!)tGy(YvWA|jgiJ{jPR@&i>ETw1FX
zn27=!2z7SFMKQ6}v<FzSrXK!b2*wVl4qn)V48P0yU@dsw(P;aLfdR#JJJRjv*{5E|
z-fQC|tfq$nZ0!@X1>U#%e%xf7atKu(X7HBw->{Z9-k<Yt^*c0O@;;*P-7e_IvGFw9
zYjANZ)$^gTeJ*Z=&DXc_6m==SQ;ID&@omUZ8$js6zRK116h%~O%o*V8GBV)+3DIDN
z1&8KRhZ)UnC_f&j#X}QOB#c^(l`<}JtedwD!3)dXJoDrv)O->k9EH;tX9_cdbyqSE
zomeDJEuj~hbaEDTF2y6<nG%lO1)jgh2t=uWBbdb+co2|I8J4?pbg9tcJd~IdE+PnU
z&{!#;hc|)csyyS{%yWl@3DVBY0Raw>U@5#V-neA{*6KBmRNX3wLKF<phC87;_NU94
z`S|z%xx+9wjO%0G`C5(w!)kh<VKTfQ4(T;^oLa@|TPmzi2*i$*c)?eeUtC~ePcAve
zrL;=ssr6p(6EpF9`ep;ROHgWqeKBp|+t}8r=iLwQN8~*K8U&0eUxsp_96IDspd886
z;GwE&V{5xmXGgUi<Uf<ns#zIi2!oe+vHxts6Ac^?T>VGXIN6GieQBP7Ba|ij`fJs<
zq5Jcr&pZr_OnIEH<CMR*<8(a~R6SN#7n@gi9`3K5uv>o%&B|Hwis(3Xw9j1B?s}iC
z@2D+gCd4&&QcF3`tQ3TBw>G-YH;lV2cq}NoP3T=WANcH_Eb29%)H99@e!6|5e>qpV
z61#5{*ZUW!N@VFC@BDs`DUA@DW+$`~p-@jpN58Z$9(T@IQiy?v_Th(o=`wmAS|vVq
zDuq~tK<Z}SjRGq$XaZhDM&%3PK!z=A@?cSwu&QYRrAw5aqksTZLMBkFk$V1UX`!r-
ze(^E6%l(U^QBNby@|qu?<qol2ny2QZliFuIORm(uz7HWC4wr|^5sFFu2}wzhX?FMa
z2Jn&#EVPCy`SM~%%=!5EIH0kFPa)aO&F@w}cUD?^c)S%?EG;b+%(B~j1@L5Zu9=aM
zSO;MvP2w{z8*}r+D{mxHE`_52=qO-N94M5Fhv&r?V<!eQ$dbpx>JeCggd2nI32pEg
z5XgoKflGg&ivzVbMXo~iE}$~O-rMB>n06`Zzl#%n+s;~fp|EheSHD@(nvc(Qcl6KQ
zTs59zvE9z;Qe#T19q^v<FN8-r9P$Gs!Mg}~aM?{IFwRxCk}f~XE3L+&;e6W6Le*01
zJ4kQa&#m9kNzGun4IUFuYvX=F(%>u$fzcy7+bKPlvH7e98{d4N1O5B+<HQ&5-2Sd%
z?<D4+%>nNR2y%(}=%NeH7U*Bs6DKBts=A-|<A%TK-y<c5;2j4OTr4=%Pde8c!03Rk
zCu5f^4D?jkK0kaX<szs*{O^+B&}ni%l6t@XzpDdhgv)0&k|)ZT*QAedEuAKNn@U_U
zMo*%D)(!#z?-`(spDDJPbAfK7Zg$U^Pm;DB6q=SokA+aKypP;(50^XW`S|#wC}Yub
zR<1qh3SUv;;8xdgkoTC^M94{2k?zh}Va8~4o2^q&wOm)0S9PT7yHm#Ls2KZHhT|~q
zUoG?aHC;<SoZeboc_bIE8@t)K+YE-uvrVh5ClSNNKk7EE9sc{s+I)Mw&3_;e4L8;A
zm`GYYX|g|@9s3^SdG}{<{@{LVT3v128~t%dqt2HvUl8!U=_<*R@u(2uboJ*ds^Y3~
zNnSK5!}gx?UTG3`yyfwIJ@{dSA2zA3uc!+7EFi-3|J-b3&wcBig}-AYnv8IpaHmmu
zZsq{6F}w?OM_{99@ww4?JvqR$2!%`m*ijk*4v+FlGAPEmivCaNFqci;EDEGpWm`jD
zf4sd5L2ZDB1Nr5kLx^s7$4--iDvS<^;o8eoNL1I$K{91YWq%_v6X>I@+nrH$^S<np
zX&J<k@fGx?9H|TscBqao!AJgSJ-d`o&2YP0o+uW&Jn@hdjZqoLeri=`Gk@0|JYDBa
z3&6}yYDUs}69z5UGTZ0DBS^$1?#s`r<$a9$!{z<Y2WWn=eXp)26uoSFOa+pWvA)E3
zBB?Q@DxX3i@F((gpL8?Qdja@IBrZZ4{aiVx!<m7wZ>b0cvg^v<;{e)3H#f5%@(KaC
zqF-mM&0q~Wufb**W(2sgZorLwO?U&K><Rq$pYg!IB$|N6NnYOc!l79&8dW_bBjf#+
ziESO(*4+8Ug&7QxELGFY{f@vo8%z_m%FvqPKvc;rgXnZfA2I*C<;?Z*X+Vsjm!^W!
zyLY6}Q|=6V`Q&7m{z0tD7`!*t_}ey-hShTMob{TTI!1VCh8%$ZRQ`|39k)p6k}IsL
zsX5ym$tzX~gG-b5fFy1bRy{y#D;LM-GQ6OO`>LFeDp8oG!3p*bhOii^{FNgWf#5i}
z=h{@1jW;chgQyu7DTQ2Qn=s&SnhiCE4)V2Kd$u!Jn>P=3tJfln=AE`;n@?EjR0=i@
z(^@rVkE5~uhIKT&<Fgq$S#Az%&cszGbxiu+tE0)jzR1CKEwY+i04?4|e&cGYbH`nO
z*4>ra!7Nv^z?RMI{BObQIMu15khk#lAqO2c*mod+PaDW(pp_t*F*44?g|Gg*^)7Au
z?U0T{qKQCDy~WnpfM#>jtP=J%>Jv;N(IkL67~d;)`G=MfNF>T-&`b<zd4eVai<A%>
zp+HjFvcaSfgyIeiPOn(vg3T|V2TSRS7LH*LP=HSdN`}`$9rmXs3aW2nrQ8KFRhWpv
zyRL6;!o$LhHEk^{o}8Uet&I5lqvYR;A<P6lqMu-fr8b!NF0-Uvr^@}LlWjsPcQ1G!
zdqs%Za=Cg?)Qgt2CjHuM>vX%nxrjMfQdO1ccvMUgcp?0SsZ(Tg=dFRR$?F%D4oFS^
ze=`Cu_Sbm_Eo*?y4s0ojCXypLU2G8}Td)iqh~#xF5<Oh0LW6@iGDOtBr6nq_CO$Z$
z(wuRi@)5<}k%EF<jUuzmD70%BCYSx;&(1;9&7I58<Az45J@+T+W?)CbOo_f<kIm{#
ze!}{&PQv<VJ#Yf^XysrsRmg7xVR2b+cZ?>kTV*hkXqMTjfz4SdYD!FkTl<6{nx{@I
zZzL9W+7a;gZOcg#iS0~XVF8aY8;|#iy3+?8GgD$(B3}?Q53ZS+)TE@I%I{-aVvWhk
zvfZ=Vd#?N*AG&?w1)Zf{`c37GBrqwi|CI_Je3r@*AcCK?Vaba%Ad@!|fv_j<Vq0Y*
zq<MCFO+>V5@j+f0)@upXaZscoO)vo!>=%Bmhr0tmWkT_>>H$;jddHU^$0{_KQ)EZE
z0KqP<NYbJBg2A${y-TvzcHf52kS_~kmWow4pz**=Xs|qmKj$xn`P9+ukndT|dmkrL
z+5S<s9!@C)Xy+M3LKQ#;sge>t6yxUlnv|5Z)^2g|#QQ(0gZg>6_?@6yPb~Dk5}^cM
z$ATN4XqX{G@Cfb<29c(Oe77{mv(VF;>iiY<061fC&YPK)?dKRJ2~`@Fwt5vCXAd87
zpll7##;aIg0@~#ZZHg={N<hwgyh3$>)4)EIctym86~m0~E>}h?zqkTn7hBgG&K|#w
z`4W7@FRG{LLme^M_1ilnS?kYQ_BNl%TRjbeby7oq!7;(%-rRyBr^t7)Cj#eEMJVSO
zTx9C9rLB?Tz=CIA(Q^hH>U4;W4Yoe44aN70tiT|@G<fea4}71j=Q)rA_2mX(3OgC4
z=O)cx0>)UW?8cj=wm?90ONzrlrESg4pD_qz1pwPcG7X(bg|4Ei%Z;AKi-$*BFU&2D
ze#d$C;kOYRb+LBCKT1q?O#e^E0maI@JvenwA;zk;<;=redyYVgrY<gy?g>wk<$m*q
z!rVd`UVJh&@^vVB8P-)9p=MS+JMgw%qVZ8pCB=G_Oxy*_K6yC*V@Ng=R+cQBHdYKC
z&^h%&@Sx(Nu8KS<tm{&Pd1^81{OVF{rSdCx)=+R}*y+Q?k<bnH8~y7)AL0?p`D;0Q
zWO_4)zW6+^wLC69PA0~|R676Om+nlqT=|Qo+G?H@qbrApR+N!a;JR-Y7XslE;OhC}
zqatJfob~TF+^;?4znAxx<UFXodP7jw?nV>-G?2C*!SKz9J$ThSBZI<4lI69Vug6;m
z9>l<f55V6qmhG7nj5NEAIbI>+u|ie?Ao<bwuCQQD7UI9k9C&|DDc(knIi-JR5X@3X
zaEy7oHe&Z20-_g3k^>6F6JVYKym#ZG*YVqa?9-aR5fZs0vW6N}(|<T37{UZw<rU>*
zqB)>8?Aj1W%jYb4lhLK`YOg*j1~9lcr?}jZVuPKF=JTTk6h#4+Mv<kTT}CvRw3N{|
z&5uTGv{ORkeiiQ<)5SgM8s+1=$u2cM+GsTx&YZjBBi-SXC+8%MBo6PgticKp?yF?(
zC-1?=`7r<l)eB7?p<=Xnn3s3=!qmg#<6uexAeq=;-p9T4poX8euASZEPQyNH7o8ty
zdmE6y$rzqQK7A|qqdZBTPILaS!k<~mKeNa8s{C=wk;OY1CPsaIb@z4w?Zh=dB5GZp
z>^G!l%=#!0JaobB6MEWZ^IufwIIuk&X}oQ>JryExyNcv=+r(8i@*Zi&_-=E&4p?c_
z$o8MIxS4knji2^)FTmpXI=N$EwP#yYM@)zZM~4iPLaWtwY=*3%eiaUbf+^)k9jw0h
zWRf@6EUJemxTPSRDSID4jyHa%*FFyyPF<os(Lyd>4~zO9?uyf8F0Cyml6Jl~jryaV
zX)X3UA6wnmn;R`1%UYVd4la1Oxsk{Q>uC<7>TLAdW0{BOqRQyoAZ9^5VPhxF#MvnF
z4`MG24<OQ`W41cMaY0r_n$8BF^Ud?-{m)6P<o194g79`l;a{n6z<$Ug;TXmD!cZ+1
zH3XcXSBxRKcR<o)qmnu8R?y?CJg{%rlShMg0wD!~bTDT9S-W5?yfMZLd|mhyf)^@b
z^xm5zt`}gr3}JMj=c^2!*+7B(@qHK`Mf*#lgNHVT$MRCGgA`i=GpCK8I%7X>InbL!
zo450E;9gG@@sR#ku0~L)9QR&FkV@F~vi_co#R`r?lSa)(z(X}mdZH-8FV1-5Du51p
zAoY!msw(_K9uIBwl_cX|=#xjydab4dq<@u~f$jqox4x~d-@jX*ZIAOt+0E9n;D-W~
zS`04B!5nNTHReYsAD&+X76uYIqo;1h$~2gG27UUW2YvBjGOzjLWc-FlREyuAo;w5I
zBX@%tAjMY!%<8&x)asY_iEyHhPMYgKpU~6c!AZMH>#KI<^YwK!cLgl1N1T&`eT~Mg
z8*uTodD|0e3bAzfN#Cn>N_noO0~^aQj9WBuO-ILa%^#Tbn6VouXt3a{i5D(GDcEXh
zZI=CvX<`-D+2xa(I$>+WaK$E6@5*o2-`0gLZ;*F>-nYMcB~@A;g#3KAzS-UGhv2fJ
zBG+VVd{4@)CjVTGuKM}hjtP03lw7Dkf_%My0TM1wS;iN~o^8I5cWXJkz?N(nfrg=4
z*=jm%H1&RRR@{J|5e^R30+~7pZ-9JGvu&_2KIh>+I1gbph}6_M5?*Bgn@S@36AVUC
z*Y_FUIV0wG3M)&obgb`HUUQ3^&*9OI-n+Ixu9+mW=FVr$%IOco>Mve~k8ypM=)jS0
z200V#jw@b-|DKEB$@P2{V0%|M>D0kFB!vLsY*!36U5sI*;$aiEWJm6KK*y(5y6w2*
ztkZM<j^LnPpY!XY>EDr2;hsjn+myndydlQcBPabVxB1Stmn8E&+rnN>nwrloy$(;u
z<h`2pKz-D3vkq#LjSYTs9LyX~BsMqrt%XkTOkmvvl(|T`+hhsrQvvzW>IF;j-Bn{Q
zBm+m@xCm4f(nc*AnZH>qlZXPD-La$)f4Nb|uRH&X;qF4;*J|-w-Ma<%tB<(qtR!;{
z{ls+qR|K^E&sv8T=P6E4!;6j8-@hvvi!@AD#z%t~_Ak8q^k&)MH)4$`jcYGaJH+)w
znW)}kBiK~_K380s1`Q${e#H^D!VcwpoBmBX4S__8a->9vEA~}hE#05zNty?blYW$F
z)U~VGvf++gVCysUH=J6#db(ZPI_Yw&p(nncT=X~d@!&Lln$zqfCy4v{k|asD*DP+x
z5)Ts{h}efC{|(CC@2%GkBWJGv_SMtRf8Rlx-5uge(mqjitK=kfPLwOl(SdvxzFEi;
zIyN4>ee=ow%|<vdZvvL^`gVI6oEr49$!s7}e7@1G1lizyb?gK(iB?u2nu(1K4cj|A
zWTd3vZ1KAu%i2D>wix>c23)KHuFvDB+HyXoT{y1;g}Ug@;O33f2ASl!P$ldMo~X93
zVCT}!kl@<`|BxwVO{v#@$7ejBIPIF7;qr|KT|$Cqf#qpg-uJ01W60N|1*vdp-<#P*
z3<$vO0PS;se;*ti3;^yqKxZ*Pz_9@IzA@bYq+X>6`gr~aUD++hrwM^Q8`0|yk8r&m
z{#<By5fW+->v72F{r={;3td&!&mc~4Oo7H!+wlb?8`LX-^X!zg!mHSDvsTyfyUl}!
zfdjoV6Ze@}&osH9>zkG3n2}|!Mkh!5gt|#e)9RV@(NNGYM5xO2?00(#$J7&%y!54!
zQR*ne#j({8^6C1(F~*|2(knc&JH%4I6MU4i3o;EzKQ_#Myh`8c+t5jhkmwM!`+Kls
z`#t(#{+-7Hco%J%33ov`Sj`XR_L6W*kF%YQvdI8@p^6&z1s@5;Ct*BdT6RTRXlmSs
z3!;j^sFMooi$`-L|CWU#oi0;6BHwxC(W~P6oP3OYR2%)q8<LaO+d@|6@}|Rtbe@%?
zpScWx!R5u(Hb$obvH#ORD45IeO6~hkw2!J)KWTq>UF?IIOIO}4pgIO>O`vZ2rcE}|
zQ6o+Y4II!@cyqRt_>ng#eT3^Ws1OP2o(UUYvi4mb8?GYZFJVo~BM<5vPJ`8H>IS@-
z+Rc~u+g%W0%(h+r32Uv^nwpCB)D=!bgUz#RfHQ(}jj&ua3w;|~UtRrTkpIBFSufFK
z-uB||VsXfUeCPx&jnjy$z?>Fk%NH(_-t`g!(L@-v2{Yq;#Xx1CCwq!<ZtIRsFBPdb
z^uzi`9sS3E-j!ofsh5F1KKGfFvGNJ}JV{0{am7EcFFI3GQbkN&g4_7-V@vnqc<}@!
zV~c6u<%8m{Cq|>`J7jKqa}P7u&+9Vx6B$1+QNCoWbUjIX=AX{%6gA%+DYLlC^@F^}
zF}<qD-9GtwU@~*qo@8{80I<reeOGJlN>x^~?H_s!&?L7GQ2e<~P`JpGwrsSCcfl@=
zVIrcK`O4&as)Q(=jA8KX&VD9Z#&E_bO70Rwfe0*tb0K|X-Td`&otknXE>Js9OiW0@
za~Z-G55Qb7o)lRU)iG~p&z|Ye|1*mLkq|1R(ssIsVAE@MZjK!5i&~iuFa`~n-mK9J
zZMimr)R;*KQxIscw2qH&<hZt3h{MNhDrZOPR0x9>{(Vb7{y;lha3rhXPfB^ny*rOi
zo$NWrLUF_G+<j86&y@iYj*y6BELtAS!SFqNtqAA^4UMnIBVoBf=LcBTzli#lmivMv
zJz8me@ex??vNEG4f}_)5tm+xuqMAP<aI{Y=&tji`5>C6UL7iPUEnri6SIXi|%U<-F
z`E|Zhhi@^WB6(^?7EhgMF+;wS0)$;Upk(yyV9d;-`uM<tFdFAKo!`pH<45asrDR0K
z5PUY&kUoo;=y<FZP05()J>YJ&%1H=hJn2q0W0~`h^|Xb2Vzj>@EjeH}hglDCTHX|2
ztA&oLvgXfb3lj%fnw5OlFcxb`0}a7*hCq<HMPZ1q5|4bEdBss7LB_`&XXSYCoH~t{
z($+$b8pL0o(KqwL9%V&TXtKl)uTnvN;e00D<n89~!(tgU(#~&wfl4j`5!QCAd$(9j
zF?T7tym+}Gug`1ietu=Anes=Tn>d_;&l$*c4+VfMqJ8-=o>BeR+rV#IU{~CmZ%i_o
zE|5vm+g~?!2(`Zjk<~rXJa68B#|}7n;Hv}SxY>*U6lX>wQ8fua`3uvMPCOsr6&dBr
zsH@|lj3l+;t}e}jhknvfdv_V}83Ln-s!Y|4e0+|AiM{sgsnCPdM(E8`UF*H>je_=_
zZ*q)H*;h1TI2TJ32S8#5w4cQ)NU*AagQ=jPYj$?bDVM!9S4paXFPiET;O4--NGh3I
zDvT)|Cc66x@+;^$r_a0jo5i^p&SD)aHdF{-6s1Fg+%S}$+RbQD&ukjrd}b-*v6>XN
z@}j?Ta%XRQd#2UTZ$)K19FDnPDV=a)3qURUuT@QFY~-w|O7nVNKevWantP$IBS>IJ
zSCFWSH{3eE;buXD2D<`!br%UPW42dlVYeay%^Mfxeu!Gw0AJD91EKR8mxrs#^sQDd
zoov{(3991=b<`vBjX$P?))LWiP^Wn$e{P#_=Y}%j%c;c;icO&kySl0$uy-Pgsx&E0
zMOI|sfwCno2=bWCH}I$pW#m7*$@FmDIdk)#&vARLtJQoST6mChM8q_5aXNGMRQ+!L
zF!rE;-1kOb$gB3;?BUw3{!=o776>&!J*k=aA4gT(-0ZX39SJ;m{H=7uN%)|(Pxuho
zt~0puAq0smO4Fu|P3APifn=AK4j8b_0Sw8s3S_gKydb1&H<b6%p5h;N%6(S!8Nel_
zQB_kA2(*x=es1BsHY#`(eLX?5eWe~%)ZDRwPHo7X??<`$%-;jS>r&i0UyaE;-g2*{
zGNS(Pb5jqatGrQKD5<jFz-}7dGe!nok552>PzN(su31=F@H;XPg&U&T{+QlF|MJ#k
zJujSP`uNza?C069ZXU;1bXM3B(4Hp<c(EbJ5-j)`Ts)KUZKgN|z={E(NJ>U_b8`dm
zs&UH#1}5E8ErGN2G&HGP*trM6@>gc*y`Jb;UfW9kKXQ{E8^$V{!NNpwp(mU|UTbp6
zn>%~cS94X9rS<Zk*hHb6Q%)*~P80}CRYfat$}xxpZf|mJ`~uKS91+v&+>LYFYfRFF
zkNIZ0{#wW-R~236mMy3IRQf#(Q9R5|Z5`M={~Gty)Gn2GW1`3RCU9K;X4>D*+w9(j
zHMVl)yVhwBt2gS4<e~dRo)D`0ZI`jEHr0i9tDu_s`I-0ZoyrPD^zBMC%cE2MmWSz9
z5eVdW>-~MJ??G7v_LB@jX#{yOv2|11#>x1^M44W*=S;Olk{63+#7jc)P>H-pcXxna
z0<&WD8N<NT2a7Kqz*w2JLq`@V@s3hnUtgaZPdFOvcIIpby}i;BFTiu0+TD_U92B#D
zTH8)5FUgO__Tcz~yv$h`bKtdGF_sn%72259{`Gk@tH}(B=vPCSrHd>G?M>B0BKvX~
zLUWnJ3=f3)>K)xr8go!^%{X>!Tr+y}mrqFpQ9RV~L}ldmPt$CtA125=5|T<k+#y9c
zQg(sUb8pYa%IXy>YZm(_49n3Pz%GY$07@Uw`{142xZxl2m5a|*BP`fE6am<IE;2%`
zaD?o;#}!6ebMv|V)B&fK&BWZ?iOfw=CEAmAM4Z>ZLOr$!SIOB5t+hl&vE!kO#f*#L
zEW6fXtgj>U{0ntSgCmw@$TfEig>1eJgxgzwv#i~TAwkDIT`Cg|i_#Hc)JlWvtSp_$
zv7H3=zU9F;mW+_kg6048S(_KecT^o_GSUh^30>b(ulk-&*ZKbOgR_b`ocGJXA|&$O
zdog8UE8vm`P<sn5j^s%m{9KI!RQXYs>A*^a0^v*i&}@lFfuV!NR(*W5uw2{LyYsm@
zol>>5`}m<z8la5<9`)MV+R{?&4z@%S3LHtkO_PECx_|4BAWga%Xws$%^3*}3&20$%
zrLFCFqA#4J1V_oO<vg?BuXPNor?p-EFj0!MDt9?5kEnyt#B<QiA4|K$>wEbyQHG;G
zdRAUna{n{=gD81cr9s7nc|=ISh(XNyPtZP6PT!X0MpT@S_n`R;8-NCNZ*Nav2-s=t
zdFei;s%U8e;n|im&zyb7+ktPt9hqSfUr-IB>4ztKHl4?kSVHhpb!_wIbJ$b=yyj_0
z3cS0kR+Cgxk^Iz*_4yr-TRH(QW*8H)=^uc#FXnmP+4m6m-E}V%MkM1s(iyoEYSw=r
zxpStU!@Ge!pQvov|NNTG_^ph^^wtx8OwDGOT;F7FZ@j`2nQp~6UBVMs!PXW72!e$C
zM-hMk3ui#BYKH8FVOYAsFwwy5091d72wp)!0f2xFlL4_Ns`~c+-_Gl#aDIYUI*Rcc
z%>OQK9%EntNiJNv;bOsyCg{jrfQRQ|#;Sb%KgHEEG>6@(3fa|VOnRy5h!OJ|IwwD~
z9$_~UxfrH}kmYIC*|+aMah*?4g}}HyMn;d+;QrfJ*xX)&ozh6;hg|9>v;&op&tmWy
zO*a*MG);UH=`i%$`N%WE491t}kgp*DK9OhsWhZZq^%xb1#0AHqSo$4#$<MF6fsYIT
zjywzQpXB7^I8uNt43xNRq><!3iW`^c3dWkMgz4}vD%+QjXI}_X1+P}CSyl7*nGTI?
zVlSzbrB9e+hLL*zY{zU*Pun=y-xU}EqU?1Y8e2`>y+IQFpZl3!er%t_*<dKVHiNxm
zJ5xvcZXPdby?%qSd2zkYi#`__SvK<W(TfKMo0}Rnmg695b7+=^K)eDFo(Uhe&aQ#X
zT~!tIRRp233&I>J@h=(iL!H5l4Je>P;E2DYqqLZXs^Lq#dI}1{|49M2wzhiNs;8}s
z0$o0R%5(G#e-~^Bz(5d6<u0(rtW64TwlPOef|+|Yw1bd{2n7P<e?W{DYzRUeI@l^S
zz84nKuMz=&gPvX{5(&CrpycfMQC%Gm?*&{dNRD9#>Fjg1bx0)0YU>@dNJbMOBqm0{
zp96^b<_1V6R8@Dt={`G469kO!tgPjgm55c|Yn7R=xeU_3Rnn=TiiFbtaTVZ91b4oY
znE=fYgusF8>C+K--e&370BSgX?ytfXcA5l2d}+-@XH!myFg}{R3}+BTSQg(M@r^M!
z0j*7w&!6%F+VAkyK%Z4)tl0jiZU494@Wcq7I%aH48tD;>?qn9B#ndEv8QLr<tX?qz
zas-;g6rK$SaTQ6@w&Z@luS>S^M0#v6Sd{0x(_@p9g-9lRs!Xscj;8nbg6lb585_Qe
z>c=jlP;)pj^s<3(b@{7TdndwT98LP4sw|yF>iE|0Wr0;ge}bUij2#=h9rL0jF-0>D
z!wp)qIN44bxz1o`xyGU&>+5xs6!TZNRg;T12C<DxN-d|)bEJxcFL-P{oSdlJ`@^=G
zvV6wo-H3liD15Bpapji@OKEEp`0}tM)M~}1dy~R%3lr!ae7OIv+I+t6MM99*kb<Oj
zqnbOOIbrn@WsLl^!jg3@yB_pR^~n&BN?SvRhlhWd4x~PH_zi)CiWvcYawW5T3I`Am
z0s*O)*Qbg<zHgeS(E>$YX2Q1P^W})TFHTR5PfOYW`wdGpd7m?^@Ln3Bunq`8x8RT<
zcc2-%xVXq$5PWYVuDH9sjgUW}ZLefDLc#eaTi{Rn7j)ae7!37yj9P^SD-wdZaI+~v
zA<&t55goaw!==R)!9rU-x?BQIW<37RjT7uQvaOF8GAg{Uz>f?jM*%TEX`}=#&@9h_
zC$hMxuclTDgj>)KuzEpKDkv}745%?_OU1u4d;o_jGqDUD|HyrA=d9a^J-R25w)<V=
z+5FiVlS0R%M`+>|7>~NxHerzjXt_z?^I(SfBh3dA%T`?;?Uj~|h9=3_U58<aAkbAO
zxegZ9>s-dxzN}8N^h1JTs2SUZEwC}-oI3Y0=OeKPpAgu55l?coNqUNacNwvZ4bQds
z{5Kh_$Ds(-6NI&Z%$mZn3}wv-B+zdLz@@`-(L^@wY~;Fg@j9F(;@*^JyCaFl8;p3P
z__U*WqjSFsP(=rsD6XxBW*6eVNk|eC0a{5ye!29|ct09A54bHbi{U{Uzjof;^gkyM
zhKA!nclB=C(;Fw*W{%z;s5J_?-@CQW*xyXHHZTt%4@WW{?pE`Jb{^KLS_;FdZf47~
zu14*s2CBZj(p4eCH+gF3qL^~_yKUj0Ud?U<EAh_EN#nt7<#mo7Yq4{N`$p)Z-@|0I
z;Mq<^OYV1h*EWXNuk|X8dz15-LBawIN*s=U#ltWd@87WST2qaS8O#fg=wstZ=XJPj
zVzVp}2s1QUKkRF+WX|j6=)eJY+O!cj!zo`G1n+8<x*o3?7WMsRD=qabQ&-dx@YaoB
z?gs)?E-`i?8(x4Cfu_dqaybM{;iS@9E@Ak<ks`MIOPKnH>)wnQEsZontz=+mD2va9
z$@4-q2X6%js8glj2%!8CFD@<ywudNcOuObYH33SVw1J{<MG=uUpqCiF^@wAdD<F_g
z6|CN(Kp67&OG^8FHMD59fc6kdgKV*EbN`JS?NptY1g}#hevP9A<O7L47u6KcV4jI4
z^r@;USb#uzVpNgL5XRh(D#E~$(ar{iWH;T;Z5sfWF&Gx|MHJX5f#dYZ-Fv#y1gI*|
zM3U3@Eo73|`R1kgn*hsA{Z)YVNdT?1sasSo!@$jjXJdnkYL`?1HKSHZmjkLW80@U^
zQEoclUY|8@T;dT*bC+d*@0TgfdXtv@*HU<iP5pS^w=JxfK8-Z9mZy2#SemA*%oi(d
z_`f&4x-H-tl%y$JX_us<x)@P<IElV5Eves{UcB9p)4z8<DR~IpNR+g^&>)^O*_m1}
zPS-@3eZ17-5j@IDL}}AySB?x92J@Ijs~<WZblvi6b-Sl~!|l>|{P^Dcp62vR*I&o!
zx7@__OGD!u_I*wEa0Oy3>;zKJdS4S>0ClRyM{ztc6O)%wt3^&)iAk=ZC^X-Tig-cB
zPZR||utG{IsxT$sqtU>io&uCvLnPs1))g_i3}<I&;5Z@g0ZBcYiSp^Z{;27Y&p<zy
z&pv1CCN2F0i;H%eDcF$Gm$zzT%c{IN`2LslF|%$-oGhv8`0TEsyncVqBf^h?N7F~f
zI-*i^Z0VS|?hpuq;rz`T1XQ=);AmY>ow(z@s%qwjM<(tUQE;~NMh&IEC3u&dmKKgW
zN+>>V<I6)&zrkG4C9W9Jvr68x%&VuaUI+9tDoh3Amf_*y8-G&bzBw7dy3K1C*uEi=
zaLh`E6+&^P_?Qp1<CLLSccM&;G6?urn15RO>ni@oqMOXG&#LPzpc1G05aA!KLX=M)
zfvzg2h@z|VJU~?5i|q5Se}m6A<~T4#Y(2zj@u%zV3sMu@*czM^v|7xqXtAumJt5Qc
zUZIL2vrV?BNWtz7NSP!-eW}V><#|)l+0L(4|9!VC*oSJ@qVcTdhF;#DYDQ;rmqDSk
zbJp*6xy*dLP|(FG*J+XS!KSbN8RX02X(84A;aX|S1*O2m)##!hWk~c~Z4qsA1u6YN
zv)^cM>&<qwh<eILN!&Pr1~wqcIYP(G$ptLl(CQHo6i6d51Ax;EuN}xJKNKK(jpgNW
zJ(!AtT!JwS0cV6k1C&=rFrghlNd*z6#)~oWIK&GYHs*MSMuUk;>xE4@Zq5blNQ&_w
zsK^lXEHg->3XjJD`u|@G09&yJ-?QaR8i_HoMkM}RAZV3$)cU=8tRe9BKM#&Ic?*M@
zas?JLV7=v%66RfHija6Ajp)z=Q_dOCpJROyQ~dK2xWd6t@sXRsX7)gY);Z9-J?{7c
zwuTP{3SXMQt}!sM>>c!<?Ql6!+jeBj`S&`=00Y1A4n{~q5=~e{Mo|eb4hyKY16gBY
zqKPkQQe-7w6r2AocYcJa=$0V{M?^@73$EK(eSv+GLiA>E-oI?`!Ch&i?&a7r2wd1d
z%~BsY>k399E13a@_Mf)`s;aJ@Ht<h@!5Jj|KwJFBwp0kk@q$2n2U-YTwCPZ0>DX42
z-T~-PIH5qTLNAxj5Vq15_?WXOo$KRJ3TFuQR{-dNE8?w;jLg%gPsJ7In!VgGtO8bl
zZjBWZpt0|&f*ix2^S086TASHKb`W+`D4UYY(3kT*q<#4ZHD)fuwsQk07t;nXAivml
z(*q1EndyOjo^}LW3k!m5Y<M^!t)~U74&)$QzzxL40lK5}Leh?(4_^jB7#*aIL-`dw
z(3_U-xas%Fmq}Tc?+qBDzqfe@jZLfhBntCtw7g^7vCS$Jpn-8~n;z0(CKTf0nYG-Q
zpWo2CK^qhFp~7Di29u|o{0{EV%4PLE_`R;?IW}8t2i*KDy>|-ET0PF53>vu-s=n4d
zS&BavXzUl^Xh_ecO-y9Zo_S@b<+v)KBTaz{R!a9{oPA53+q530Lakqad$A7$7ax5l
z|L)}_Gdm#kU@09+@HybBil^nK`=*~nZMe{n4vD<nuDjd2C||fvSk$NzoK}T`OaNQA
z>jS_5fOF;%gdt3nUwr1l+}b)u&gCPGG(OO-R)f*A|CtECZS4VA0}TZtysr-Ar#E+j
z`Z%J=ram6c3(lGlUo4<S?Ku*oWp_7|{Lk7XRoDVxw!LBMSO2*b9|eO7mU&=51dldJ
zynJ$ui$xkSx%2S>j)&&2_8*NMv~17&F~9#OmI$7=p#C+{h${7kjAFbnb<DlA6j|zk
z>6$M(sz3DonhXs+@4$7t19=Bq<_2(9rw#nqjiktKB47Foop)RDM&YJ8QV@sb{;rJp
z8yy??N*dquQ?s5Ou=P{p6)j$&bmk<lsEWHL$e}T~JDq-oW1@*4g$Cs^potflu);Fr
z%qrYxW*nL5!@ZfN<_9#(9l0;2MDfM7O5S8zQb{YQ&>}R0IVh>6m4G$b&8-gDd%mVh
z<|&E*DKA^v!1D;%0BZM4T#P_y2M$p=4o0BVq^CK)zUuh>y|1_;-j(AaQ04;D2`sp`
zQovbG8WHnfFv{TxW`J-o<a17Nu?p66915^zSC4*I0&^Vb9Lrkoub&Xmr4z8oDiUUH
zm}pAS{{h(y`@jvpaRoBr0oDlsQJ}b}4h2=^*cPZFL7xFWGuV3ITNBy<zAgn%<QW$D
zoFmHy&RxufX4QOQhJdaaSW}M>Ni#*{veegSP9z!&$RW(6vgvaTA?*IGf3d+}sR{!H
z#c^XUESgi7+5^9{^=Y?xpYtwkk|!`Z`Se!Hm8Jx5<|&VrDgQdX%lU_^V_bdj*MS`0
zhU&2Ix1~h62s=)3z&Q$;1MV+K%(2%eZ~Ij$zlIDzKzIxM`WfbqJ{-ubs%h=<uny3u
zmE3Xx$CKQ?@MqJ1fFPS7@RKNMq-kF~<5wJcME;05G^B&P2bd8pYp81YHV<~d#F~_q
zV2dJD2&_5NJpS_I426JtdL8gE-CqraTEaraZTHQU!t!+P72bma*f1H1e2hb`0mcq?
zGgvx+oi|7!(Ij&twx0-!K@chi$6w|xtE^N?T(4X7ZLFzzlOhZJjNx4%Mv5undZMny
zgbfDt^0cvFY{;6UMWjTS31hHfb%TK)m9(Nsp=DnK8JST|cH8*~n0$aHqO^J|mC^O3
z1BeK?xPnA!Ai^G3Y)lWpE~B&qJ$(dW=l#I*3`lj#mSxb`pjDANKYAVJq(s?Z!Pqiy
zK|>7GVutVzMdJTFp&4>@eh<EZZA(CE`Y<Dd01<c?7#LdG+KpfcucxO*;@t{Ks}!Et
ze=%H)%HR*CjXiW10E-@ou0aLP8&$4dR|bG;5H7Lj+Bh%haco>Q9npRQO3g%l1&6yi
zMZHt&?BA8l(v=L|FAPIT#Qf#OY5C%wK&TJB{j9=~MZM-h0%OY`6jdQ8y!^-0ut%bw
z4Iw?$zZ_qcG(?X-3GG}z>m>|5;Pu3-QYT!4nGXsWUe7&=lfh|q8as-z50+1K*bz?8
zmr98~0s5JuP$|r0NWqv=#7UbZRhrr^1j0~>MlCp{pql?}K>mKYT$}3hY|%{hC+jFi
zIw-5iG!ozJoMqM$@BOvJq`6L^x<&h+ODxt5GN_fz6<&cFwF;a7H8nLrpPcd5$rL#C
zUgGPO9{_EtSb}DM66+9%sNUMTx><aP%1r<VU5&aZemrlKd<LJ(XudR<t!<(?z?dm0
z3bM~YmAugGmC^OGVVMcH@-q27p|~_4laU~q0R;kn3)Rp^Y91ay-h;9T%vt6ok2+2J
zJXrme&OczKyy-X$2sGk~^YcN{jrs9fFA#&nrMrzPfUgk)6-5LOm;!-gZ`vAg2<FJg
zl<rIIB*1D$+aH#Dc%;EZDHGITSWbcw=vji@@gwseL1v<<|HinW@GzIOBCklcdZA{m
zdtbGR0P)$CL~((*g7NdEpk5zF=UYzdZ|-?x>kNX^7{1vx>!al~1X!(Zds(hmA6k#c
z%IjIQvis;4Jc`u^UJa}o(8pY<FvZ3s7+!v<?$*<3r<ki=Z3y^M?Ek_`aJQCKUApIu
z^2z#uoj2PuCr{i5qn?uoJi4-<SHaF7#3J4HJ$5IpgsvJ33loyj1<^r0w`1+<{~Nrt
z*nnKV2Flup9wBWM=1e{oN<hapx|#RpBavW}CyfO9#<i@1JgeVvWES^|5@3pGD&`xx
ze*!`srwm4+AmI}Pd4upMbv=)ltgQ0oW2o7{(g8$K4&=Ez>l+&n+v=^P7PU@FqJ6aC
zIR>!*wuca0Dm7J?{Ju>nbm3(gPWIFluu@k6(N5!h=7$3CqJR)8upZc8peb7cgVn{Q
zjWs9sTyO5~LGdxn6^SDS9=~BS80M^5I`MQj`8$$H0m!43Ml=L~yoWZ_ymQUDAx!3r
zvE!swIbA!=Us>tA$r5Dl$kwo4yt-N$XL18+6o0tPu!{eFnS<Om)dSEs^c~!j-pn5O
z`3$i<`CJ@e%$jwyC3NdsnWj8th4seo|7dy(sH(E}fB1rkC@CNz9Yc3_w{&+S(jeU>
zptN*%r*wl#cXxMQy7S$BfA9aS8P_a_b-Cx9y`TNWC!V@RZ+v%9I|9)j_UK-be1n+F
zx%zDL`reL@cAVZ5mEXk<?he0`iV4L#zT0Qae4Hj*)xASSQ0=W1&ulNZA1kFEPSroT
z_ISD2=6!KIxH|E$e)_(jaUniK*^&l=hr#XRE+Kdc+G^d}EHDI{6S63mv8WIs<3jz)
zL}COvzGFcLk?b!Gi-0c-CoFWH7gz|=($}wF;{b<iSM$9((kr@hb-bwF#l=O|nZPYe
zW+W>+yV2-dk&+s^!QjsIY;*&O-1po>fbIPKui)ITyu5GZKyqPhb~e#y?`so~*c!b6
z4KND{jRkwK!61GBK6`@3pV8XN*QTE-lMfnTtPDgJvJi-3x>qpCz-y3+WviOyj_<j&
zkbK*tp$fIMwS@;c38{1p5c5pd_Z|{tvYMgW%=*lwW$I8RJb4L1-SzSE;`lfq-<D|M
zdUXI~y4ZCL`CE~quwaF64Pltb5Mt?=0t-w9k?@{c65}{y9H0hOVkZ6RLx8ZXdd992
zR7o5Aqci*`!6X1&Vv~;HiX^i=G!&gmA%OTXko@C1fo9AD1~PI%8n6s0IRFBMnhAiA
zT1^Mgs4;o!rRks>1_+123Q0^xz?Rvig+Gv95=~5Be}%hk_q#vn2NGPnN40gBh{z##
z|MI3EmaMmiEg3kt`Y1p8l_!Dq3l~5K&3v(Kd|0V%Dgj1*-vqFsRdge-;;Pn0PG0s$
zPRs=&Qz@Sfmpv(8F6QoIeGP{~GoL+QgHu)VYB;LZ-7z>m+wfk4$k*aq;@?7n$3qRG
z5lnL4W+<PA06IXPng#1uzATvwK8*UlERqezQpoXm-JSv_5Ort(T0t`DKeFV<U?ZRm
zQOU7RM?>n2&y|lHJYk9}bmmn!$xXK`mSC1rFgOsb)Gr!L_P!>%Xzo|AIp&AS!z&9$
zRuhEYGoya}FY-0>;1fa!#M`1ISn#PO{u^YGjv>=~{SD2%7Ax$(VQPRh8L`K$X>1<9
z*~xwF`Z$y~o3lD~klJzkcT<o>R{)nQhMfH)vM}12TWd}ZEcH&FB&*Nu8%xw}g(R_5
zdeM;I`)8Zs<4VEsQ5lnICSSfExPeT^(a{l%V6Qy*OQa=pi_jhC9z6c3(it-00JXqB
z#GOozJ3}xKzPqhhP+YUg;YIYQ(){-pw%mw04FYMDkx3Wtrqpme&nz2^@75#*92FlR
zU=RXW4yy`$Mr=Bu2>HIm;ula>XzRuWx8Ez7>>fAnV8&H}6)5q_UkE6&Q&V%N?nbd!
zl$1?=2u%_+&2T-ba_%rfK=K2sHSTZ39BPfc>X)F-$C3JZQYXZ6p?<GhG))+Z6OwRR
zdb++C(@BT3OPCxHgd>{>R~d?S7k!tiBA)m;0yArO^I&onK#cQ*sknPHyL>!yjI^{Y
zz<bd?il`X=#}Y~es_cEb%{QooTwGjivEQU##MsR2Z;iY$4m<Mo?0B3HfEX)zZ2xoE
zJ`hv}inrj(Arm?}I+B%@1!_&R<vNmhG5dRafDr^Xs?E%Jky7p#Pbp0Cm%H6el^?S<
ztzNZH!dd%BTiz23f~;s@#Q~|Rzqe<#bbwm`B#bNK<EH_N0$}ym9W_gZDq$>EC+Z*Q
z9W636IeE7kQhUvRX>*T0Vg#3z6>o-#^D#tyX@_5VfD<2wM=3%Gz`?@<>TRDte+Hpo
z^Oo}Fm3$0&)IXvE(1?L$l!>CkeAlF_ucPsBx-Ob5y93sbaTPi`IuP@eRe(A+=?Y|4
zB6E+8k0<kcazH&q@y@1ZE3dqTvK%<`g6_83Cv32?6>&b`Hre#b|I`E$z(4y<C)D2W
zn{hSi%Ey=j0U7Q5i7#wO5VntUn}*atGj?B5Lt{3JiVNa*&F*am=yk$xzK{8TymB2E
z&<7q;@dNO6Z7>j7idYmhK(0gn?1~1re&*&>B=d?=8Un^UPI`l&G24T@uO^=?JCl{v
z?1E`o*DTzX>T-)IKE8s1d_7@yqCfxZjLMWn>uE8&qT*Pm#fMFy#RW?E<k(r~$Z0b_
zaGgOA$D(z;Y)Xqds?pB^2ceo=<?W8qR<oTcI}@Eer#<3pk-a-WME#_M__Dme^c#Lv
z5>s4GRu+s0hy=WVdn_$hn8l*V0!yF-5Z9)m)uN(6|FK7&-Lp#OVWH2E$XP-2sYOBs
zQw_u@VqAP-w1razidgp3_}7fOxiF9jtIdhyKy9KOcuychlsY`~h3$$T(*mTU1o+@=
z7UwP9-r5@T$K4&x1iY2PKq7<yMQp5(${))&|7kIR;RrP8`=;AdC2MGo9JhxMaXI3Y
zsSUzUDa;SJRW`jS1pKdPDJh$DeKvcL7k<TM)MtlL0yg8%b^$2NB3EnrH5L%Tj+Mzr
z9<ehu1xT5_xP17QZz9?4;0I<GvPc5|FtM_dA>@GTjFAG&Wh*tojjXw5{xJlmq7wx_
zBc8ed<_i3{o`06GC`GJvW)08`eohbW85@fp-JPADW^XqF`4<>PhvtCRIUefj&`@}{
zF&J5MjNoM9%@Dng+yr$O2q5*mKuQmITzWA`G9&^iepj4LLF2CP1iY>*3S1}X3qcG`
z-#3xoya+*l$F}rC`T16-gNW_mX1i8~F`+!4A9qQwD%4gZp7>70hIkOyTNJ$Q;I$Rl
zZX*M+tB;<a9zar3tg4JDzPMzSX(JV*TCAD`#Q&1i81<(li&c$rt_SpL`*Nc!tj9z-
z^9Zc)GGjBEty;rI=!MX>;*r&8J^%~$JO3VF)I$X*E7CEwdB9`{%w>9dg0T|0BF*(%
z$MSQdQd9AkLp#^m>@)p^76cSy>*#@jtE%bN40O@)c=-wltV^E*^b86L3Q%wXbS_X*
z3Om1!>iwTC=J;zYgMM1FRSTeP2XsSI7Csg7w4*O5Ru#7mYc8fHC9$!CPLmreGUzC!
zRwqo4k7wr|>;C|eftI%M737<U5o@R27;X&t|EULqSM~I8bJb<+?%?3%<s~KtCmjP+
zIKlh=-;C66q`~f>cKG-9SF)an#X75-r%!K3NYZg@=}egLpI*U0=BfLUI9e-f`K0!W
zlG7z}knly(PM;<!qg+zl1skSB=5pE<5dttVF%5BM<{UHiZU0TJatCUonAtv<jW&|L
zSF$UvDE@>7H$Ie@!jN|@vsYp)2esKrliR)rnFyv$L3U&$iZEL3d%-sGY{e|90nc?#
zm|0w%*IF(UKcOAn5|S9eYg=e`=rC%zqv4^0PEx=ki^7U5`oAcZ$#?MtfnK1U^zCf3
z!Fn+vJ>dTK76O@=m>2@>HdD{Rz#OXdGf>)h89N9`c04#w8yOj0>ca_F*VLS>w3RAS
z?G5}>0|fmsAOe%EI5jf^c1D2P(+~ip;PJW)9WS>`GW^&0&8fhAkO{tXTre7TsxuD{
z?s2w(3G<F>TN!z=e{mcR;#VY%{)f3WKd|LJoW*R#KS`B;u^WW9hO_F2!f-Lt{^4P7
zl~}~S7I7^-rCeh=t*XD%B18~B48BgRbh+m_zmzgLMU8WN+bQ!i_Z?1_5iK+!a&);A
zCZoxn;U<I)C=?lzMK24`Rx<e|z#>WIr6mpMneY9kYi3aK24|=cu@D+4mDw~gh0wl@
z?oO8GM9jVc6WJ#^9iaM7%oK)lZOnv26JO$ghYRsbU<5Q%@|YeNn@f`hSqdvJt8DC#
zdr3__Fl<geo}iB1^yBas`fbXx-z<UbG9J(Q9uCYmEYQYg9;uRKDfBwho?ECfLxFS+
z;Htug#!P`m;223K$T==m^HpiU7_}Uv3E=$eR0q#`b7Mm)1{A=?RdYbEd2X(;$|eR&
zAXCugXl~i_I!{mD9_3jhf%`QaOw2%IhUW14!S9>|`o1>SmJR{ky5m`Q3G1uuh|)_Q
z7gwuU)8d>Pxbl7=&<Vr|%gf5ZiVuwMQb1l%EU)u~aeo`68_YC+Ju*_EE~h>d@aPH_
zi#L_p@_IyvfT8Bw-yPawRG9L~eD0eCa_$Hlhnu3xamvL*{ryVR@c?L+bZ(GtYG`N(
zWFnP|0j^+bZK(DmATR=%R$(d}sBZz#bcvU#ss5<m4>IpU4Ls1H$CJW>69y~J*~tkU
z^LUFD6N7P1avSg@zYzk)vI&6}bj0SwtylYxA3sF0fh_(Pw09C{-(q7iLUXjVv?A~T
zIOO2qKqg;#VYN;GNNcXF5O)3oYT@w6N|@}dte{f@zF350c4_ISm*u^HuFFPXlf?wL
zH9MWo=MLK8Dapym1U#4!MP=oHO)fn;1y6V|o>x(s1Ro48py>#@R^j1DL_|cu&>%vh
z#-tZT^nVwael(--(4Y52;?@B4>_Z}kZf!M^IoIpgDwut(8Q+N!43;LU*DziSzZurd
zH!PdKoD^w-uemyCcU&+vdA2$b<$M*n87HHrr(H7XMBT693LeDJ5O_DhCj+P@E*STB
z*kDk^&daL8=cz^aAF3+P7cb+v@X=1!d?n&?`>x~3pA>XCBP2I-5RJ+}Fy}7E;d|eA
zQg~9_Au1_p4t&LZAMeajVCi(1K-Hg{YjOrnB@hvDLef7In<!Ke)i9>_@$r#^ZrF1T
zvmw2EXIWsx8Wb217?`@Sy|y-{I`uK6(?a)DCk(7cVDqkmoQfNin5d!B!i6u-`Zm5c
zBoC`jS@<)58YVLcLeR_gyNg|r5bgUBg2e?|-pxy2Ri;yvFhMd)PfP20dun8COiHi}
z2Cfg_pYyv%aMHCsTran{t#DI)>o+E$3SDn>v`2ub?nGEYAejEbtn@G=<3y~kcS%hr
zJJzH>i!>T4dPll=zd2#Gu6t3^4+6QgkqrJpu$S|jfuL}_%+=+-O1TMQuu(!n0-Ufa
zXfe&qs9|F%tGx)w0a>N7nknE$fjp6>MVAm1^g7rGh*?EYrUGfcx+{^7LlX{M_&l|=
zf`T0m=LMjrCX5Eo)4RJn<chw{Gq6adV}Qo06`z@{xp~N@8<1}X-IqW;rzW7Vl_a}E
zx%X;<gTw)rN^NwjEPCxXt?$W4cQ&7sb?=NGd!7q=f1*S6f(ovu!fDSpqtC;7oVjQE
z9`)A$Vibhg)gef@%mxNi{BTz<5{EelfSB^~wt$@`sB(db3JHSoRFRTMHh5%;fF~6s
z0*-jJF2ox<vDA<7kStb(X-js3GQim#CjGu~M45PT?$+GQmn|i!usYW>^B<RLuDam;
zE`<)U;f0~w{2TZHVkhe_NBgXi*?O<#dPg{?mxhMu`1l@yUN+b-;Nbz21Dg1OkrDZa
zR6pYHid1=4K7G|Sa63D6%f6L4aw68cO4z*){z5{Y+3YA)g`#A8DY(da9|(lrLb4{A
z@Ypbah~Wf}PyPl0F#A2GxCI8^AsJTKKG#T3YyeGsXho4QrS`vM!u|K_VfC)oO}{Dw
zn)VblGsCRahOpf={ih9vr7QgHm-~ch4H$MUE0N+ulro(P7ozv$!};WQA5xGY#F*~{
zGubRAGgBSk8y-I%Xpu@Citr9t>UHmXf@e}zcJwz05s0je?uLnx0;duHJ3vGXyZ3+L
zikuxyIs~(uC_;V^bX=h;PkBXizk#WHiE_CzvnCw|-r`cNs)gEr0mS$OFyb+mz(qnC
zU_aG4YJCIQ0wtE;nN_2~on2o`pl$UPbM00b*)<~p!OM&ceIU`TkSUN3<jO59z=FFS
z2w@8#2BB<xe7!^Fjy+apKAf2)*r>T#&ZV)KPv#$P$oe2u8}F}%NL0?53;3F$IUst<
zQUC)p21Z6-pv?w=SgJI`jD4ehYh(LdPL8)|i?BrpC8jK|%V%)mQhSYPrcQVFp#CXk
z?ENhS4J<nzV`wd_8qdl~Zk3G({Rt7i;bCcpv|jzKIv{#yu$_$#r#ey#c`pdJ6TxY_
z$^e-GZ~?m&-Tz&hnr_ESO+e9Rette46lIi@smaO7DJj*Obj_b@u91L#iiqk|J#?{j
zKt_+(wbLFSl4Ui+V7ir5#@pa{Qivtsl{Nk)gWp3+DNj3lNTFqwNA!t_ggu2*Psoch
zi%O>O2%pE{Z@DL9#@+`t-Bd~7I&i%F(DefYJUE*H=yq<1OBJvZvgTNvnMtUq3d^BN
z?4)O4h>VB;V)fvEG&eVojEoTQy5v1A?iJTz9n**d1d{hsohD(%a<!eSi2aQZ%=aDn
zrRSFa($3oDQrLv`L0Vt_cCpWACr#@sIs{@W3oD6Bhvgrj!M6w$TKZ!tY!+IZ3UWm9
zqiON-iA1y^bu!W%ET>-dIKkl7!8L<wMTQVS-e0OwxwEwe6n4Nl&&tXQTor%-OenzZ
zZ+vaS$jJf9h+i4k(>xQIFgxwxMZXyCP1iJ-2#sCMiN=J=^o}g6vREj6ZdUm+>&m0p
z_aMTn<FYo#9Hsx{W@jrfw0U-ab?ACD$NWQVV`oR=FdV*roLt&6HW6q&Z(7SWm!XL(
zMa1U5hsz!<Rxiby)$4Y^7mn88$Rle@vD54@dC3QG9GqbG>FD9lR)NAG$VrhO9328S
zlmun=q*4Y15+w4TfIv?}!~DyaFW`s)*cYDNlw<TSSnz=)@v5;Ok?(iR7=k2v=3Qgm
zjrG$0+<rGpcFd&qNo3iHf-=5pjI8PmaMo$&ZRcshKR38FWW)8&wS?-onq1QBQu^F^
zakD(k{K*Y*><uTf8~@f6CPkjY;(k6;uEQEhV9~@NqAx**)e&20LqL!Y5Vzx%|Dj8R
z7z%XU2u^f0G+ZBV9Ri3!pbbIBt9LzFiMuf8Z+4c{r|YtZgRLa=W;kBdQ@Iac^O^YJ
zMnsAb;O6e0P1Ss}Mw0pbiaUS;SuZtUaQpgXRgi>4ICnfLC4~kv6pzz(3Ux?Xnp{dN
z+4~J6L<r>Wn}+V#*w~_?ouC2+EqL}Z;EMR&rBq%{&PMmNb8zqz9WahiTLyB6{!%P#
zY)+1jxl{+>CXkPuLAIEgNt8OwKt<Kp_XQ_!yh4wcXMgu#x)gcHcFwvU3=+e-LEN-W
z(}Zqp#Kgsk70%+|;#x={OUF3f*@L73nnE4c{gK&N4CK?``~`y4-C@{3AQ{{?r9y@*
z+BY#$qkjXHSK>;|E}l=H8o_$cFQCZeU(WVq+>%olc@I~01Es1gA8dY#s!K$D{<=I7
z^){UT7;VCJ@Ve^WE=dK+{Xe2$3X__7(6$GPxL|h|k`-7G!9e^Z@&{VIs){X#&A1i(
zcSD$?XxJQ{zp8(euR%2d$vn{3q@N~}LKNmr<>ld-umdXyXo&+_m8InYKo-&elmg27
z+s<{*uR^}0q5<ACoq;(;>Hot5Fbuj6*z%CRnfNS>)-^eaAsn4P&ipJI+%7%=4@9RH
z8yKHbMt7|YCdmn(2S*0O<R5G(cER7=0EZG#p>642+&z9y({6FvpO~un{IvoGvQQY;
zGiC%q=1|7`4=W7VUjY3RWfheV-17rVg&_ZH^SH8%*8FW;wa{Q!8F32-0q#YU%<!P9
zbZ~IM6i-e{s<oOO`B&2n$)+CqZQcTniJPN&M_z#n(Hnick6Ak_`LwV_Ws98UZC<NI
zBwMlDonxlpfg@tkxq*ZQ3|D}bJji|k_~h#H5+NW7?Zb2<=5fdE!}vJ74bjgZlX=DR
z<;#dthk=`oV4zc1=PV_K00E>Oz@th^O5#0p$XNDR5^yXjC<~{=Dvg)fWiU$CU>L9>
zAI)G(6Y+a=#T<V0YvlyT$>}LhRL<h{@f8tEcxTAMeMI&J;pOEeP|<;b+>Ca=(bP#v
zO`ZId0FEs=dHMQZzp^l__Hx=5>K8mzfKdgwfOCPMpdbL^f{75&NyX)|&k76_6&LRY
zLk)U5I$&Smvi945>vl53h=0ZD{bZe^h99w3!^=3k&G=v4Xw*1V(O41$5e<tx+7Fi4
z%u1L~isaaE6MkvDlsiKY%r#=!sw96zDd?X(a!5|{U!#<}1_uRo1CZ-K7mgd<R>_9r
z^MwU-g-k4S!Txq{73x?eObBL>NJ~LO5Its!PQ6GgA2bx5-QzOy>B0LA(mID;Jo*IR
zf?X)E2AWaj;N#<CV`Bp`6&%&TuO65$^qx_x6oCB$e4HXAM6n0*7SZ8^1rd}$KbxGN
zNaA5CudopG<#MRBb#=?4r3rZ8zel7xv3Z%T>WcEbg6nT~L<Gt{mWs9cVNSvV!~%V*
zXHi`G=;y^k>R$XVt?L@&I+Ax5CDo)J$hy$Iy9=oo4WH`w|Kv%4_|x()2a|(txCVx0
zS!?K5)(LNT?;gV{>4@O|&z+3jAqez?J(g?_C<Mo24KF(19z|0EDwY|5CD`(^bc|~0
zH0a568^b_A34ub$N0m4T+Fqde1homs^1F<g(-pFY{WzGIaM->?*V}+SCL$uTb0yYs
z3iGj!V%v)^wj0>UgX+6oGjU$*5ef9{loHp_g#3pNe{8$EH-Cn%keWcwf1C<ivLI{)
zN~&FP8GP8;EbH8a$!II2zA$om{aQ%nF90cEg|Tm6jJ_<ja<i|uMl>)65K{$%Z5iPx
zr4t-UpG!qUah)VND$q(nlOwY7QTsFS$<b@6t*ZmxHXKl)RNyoQY`wt0AQQcNco;OT
z?u`OUjK%;M06Y~s%7Kh=db*yItgPkV*NM?1*FMi5#Dik0C;o<@nmX5EN*|=^G&>)|
zLKLYnaRQYDZ;`~A!F~Aoy_v;;fpwBIT6XFy^-n>9wLnhVQThq<;G<{5Q5=<XIx?@k
zY3I?@!QqqCxh|W+<uI5~Zjs1n`7!yukboi@-2PbhE9saJg|x445C33yRN3KfMF=Uy
z4FhVrHCRI6iXs2-E@(!LoSq(t37G@kZBQRhO~plu=gI*Q)t*zMP858uXqUSjM|&#h
z*FxYAL6a|2p-3GMU})q-tIb+*S4sV!!@|Q~d))K={7#F>tVOq*WeDdNe)z-B@_Zlh
z{KqSHT)2sk#6@<y?Vi|ni`DsxcXX*%c!oBQnKR{$y0)rQw}4g|zoY))a{;U7uaY6&
z0zSqDg7Jxw*R^)I(lJ$qqg<|UH#u!mP~CUO&?9{4(Gb8kEdU9(vIWS*0XhIQt%1hh
z7de;pzppt`q1)SPuG=yT@nIwBA`TQ7oeqG>bA2JJ04Hqi{WzqzwB=kk{ojB2+zJ=0
ztg7#t6jtpyqU^ShmGm`F=^oyOuJc6DXbBT5#tIXc`xQKnbTO*S&wCnMM-x!f8!$Wn
z&QX)Re&$KIA|lMZ^LhqEwb+=LIGO(-zbZO;dj^(<W@hTQE5nGtgHm2xsdqHP6$uJ!
zre!7E`xfZMRcZc1)AAf-uH@(DUI3vE;F0-5OgS19jrz9h8=tLB!dR29Qa5EOMohng
zieF9^=5(rPTBa#{%^qx&1#OD3{9HR?YWkmX@jp{`&DHzOWSUwFa&qq4sq>WmjM=x%
zPR#d)v*V<7o^vw)|C`v*>Je=k>+Mhb%}mT>@5I96$Sz(1Q}%Klfr$VI&e)TtsOTGr
zxPWKM&t%3cl+$&~%~tmFH#aBS@iPb!bbPH|5J>N6x+}sy-I#Zc@^<u%@Kcd5P#ytS
zoi;e~z$AThb5q4ucaoj8GkK`6_+|j{Ts&9;@D{P6dUwvMu#3lVBCY86E*FvyQ&Ol+
zC(@=ogQAGycLYA{?dReYC{s&ENaROzuHr^$BP&M8th{s9<q~<{!A=w7Z7C!N?_Uc`
zcM<u#VUE{;fA`goQ!5>hAoZBTi4T>+#CxjfgR@5Dv1a{wLmX*aX;j7hWSCyw*B1qq
z+4Lb6jy0?uY^Ti2L06?^s!$$xl0O1mm;==u2$7pWhn5ZtR5&SIPJ3tNsq(@k_d8ff
z0Yx6_x|dtb`6Mt|ntU0eLTCz96GhXQ(#DK5bR<**8|O#7@S5%}Qf+R++Ilv|C&ZP-
z)T4tn$7|D%`4>c{kTj#6tqIasLA9{|TSJh6nq?3B-DYQ<-BDgYMa>YUu=Y@-PNSt0
zNxA)49i5kUL63ns$}su1ISFZ&$l*mVN*)q{qPE-dZ8FJY4Jvj_n|9xGuK-ag^TzM<
zp8@nL6B+mG=~(t9ZDJZvP}0JD>cTwW^(a&2#aI@L1#BWggq$s;L5F}Vka~)9jG1lg
z^bRg$kQjmD&sRjK2jjZedZ27}wvts8`~-#Hzqmp`x71R;vI06>;|0WsL@N<Lel$H0
z8;HYrJI-e3O5X9Nn)uPt5A-_8b}c3PRI)HW*R6m3b7P<|OZGghIo$5JH>;<RP@X~Q
zF`P%Hb<XA9d#%?Wm3fA26C4?JurW^PIhvwry<g{0W8mE5pT8i78E=1?=iWfVA!g&D
z%{)kB?-EV&N)!eHvOP@k^j7;n%(aLHNM>4iK?!EQ1bj(8S1~Guip@)0v0zrKDe_A6
z9*4mDRib>hvb6Jb%|>A{HOmh;bC%(B-Jx_nCV1P;?R1|j<@MYUYt0rcSP+3=Ey$*J
zmOpYDr}-3)cS<^&!MacE?=h2&9)&*f_NiVRwvKaPpenp8#vTz-@<>48mtq|m1ew(N
zsjkR*&aB=i{gh|%Ev~x3H3ySsf%dOo&gb5aQ${@`JFY$z9SHw&hl5OC)hl8qC&#|6
zF6~H50lq)yGI?a5pLtwm&0&Ec5Ol7JEP(jmHab{B{%BgOrAaq4HY)INxb~-Z@GZli
z^eTz%9%g*(z5TbBm*ERKuOLzp5;Q-@#y`_E@j(a7f>gCNw6bFci9p%;Sa(oT3JDk>
zpB^Tffh|&#@oUw(Qt0!A0S6u21Vvw4YFs(97Po6c3pE~OjGpT^D}z4vPyFS2b`Fwx
z)n*|n`B_@N^taYUk{Ut}$9m;vvoV>k;atf3QV=Cw2wLJus_k8v-F#X|*+at*9XwIf
zOO+voS?%uwTh26J7*I4cRJGtxl38}~@c2cJC!dGzgd0wF?~{4!Z?mYIWKz_6zyF9}
zo+VL6p+Bs#w^+4ZAuIqO$IzTh(w$ZF(Pk%Pzy+YQ%KfETW+WQYgU@CE@-^U-6n5FX
zh&ffTMY{i6lv&?3LwB3<yqq-E(m!F<{&GK4e@3$4ejcefDLb@7l>as7<zscTK?e7K
zcg$2(^+6`HOtZ%5x+@zR(XAWF^;o0ir^?EEyPx}AWA<4fQo=mSK;PME_SgR8LvtPD
zrw9R;<8^G69G0l(^X_UyRxB?YF)Zsv%u-leKT_6B&Jcw~;Gv8da5XmEA=?uFaFO8t
zDw&aD8VMSx{)7)yX#7g3sN<-#phMc-bnnJL;BdR$;$9>{AiMIyj|Y10AJDa9{jsZ&
zgrnys3zwU3cK&XBviSgH%~0{MFfd|0&H{9HD_+_>&;y9A1zrRXQcEV#<A}qd_|H)p
zA`5Kx$M0eNYu|p0T)wd}==b=V<LOtZA=H;NYK|`>r371+UYDCRT<dsN$ASq9k(#w#
z^~S|rGrG_<P&&U&>R1jQO=q$Ey~eCkfO=d6<EN}{yI4PIcht%#5$t-rV!nJ6`@GPa
zt2<tF4kNCHUGT)@`_tw&jVV$6`@9Gm+uZ1l&p^6mehRFrnY=AM3A&@fIZT1s{BQlX
zm`!=9#osv}@~FYYm)ZDRlV9T3^|F}>Bv^N{HNO9Cw5)64z=qez&!XD#32B(FN58|@
z+uqfuXB<%$`9|yUI`X`G<~Ci<6=|x9AbAIY6lItHhmlvhUA^>Jm{>u7&V9=Y@l#eS
z4dWW!qA?lis1m>w|4@+rV0<p?5XFtR8)ek?vOD?su#1w`uJhBgCWpdqF(tkScKr0M
zlC5aL^>g`V^!NE!Uf~<Y^)5Jki*<UPF^RS5bOFT7RaNazZ~n07av;X`<MUfi0Xbpi
zk#5g#S6Rc1E3}y6N_oNh7h-6b!ape(Wt7N)8uy=Hi2{e@#n}K*IzH=iy=p~rdB&vl
zxcq9jpBPOlHH(gt^-78PEhLug66xp+s&GgVw9ut*fpZlOdzr5T6HxV5OuaB`mD=(@
zO9&JG=36nPPxX1g0=}SNWpqfbpOwEW?H+o5^2qOXE1M~e$Ki{e-D&;9K%`DYTSaFO
zjaM%9L$}iO%=DBc6$Bwy?=5G?p0OT%eh<S6#?vGw%;tx5P)PRnnr~(Kb4KFFP0!Q|
z*^$`23H*b@X`2S#4L*@%8-u{ni&$<6e}~a{tzyRyVVIvQ$CJU~ZE2TMqA*!Mb$PKg
z7?j$*8V~-S_apkOOs|n+`=tKz`jY@3!Vyf$UK}zzf^dz0xQ&Ioz<S*!_aW__7wT{K
z|3#Wc!WPS&gd=3nh9Gy%EI*pQMtAP=F|sm#04`7_(P(YPr8&J}UvxY7n%`3-D7ox2
zcuW3Ed~TycfFJ>%htJ%pC`pKA<YX@X&x6B&c~=;f?_JG<N+H9+HMHQ43|40dYh;4<
z@lEBaM!25|lhgU-EQcgo*We(ca5TR!!6^}6cz58;TN_NIP9WjgU^QE@Rb$Qv`XS>t
zdAI#b7>5mFf|@>i0r@bfkgIY6!N=!i(!o6&ii1lN=5dTRQK#1nISQC>PZ?I9-^;Y+
zvpVakFNFp@2ngEutoAdVJQA&XzvPC^`siS!yRzdTAWxa1A>_%8d2UqyEeTphI$=?g
zHx>%KK=O=y^!QVx2v2(U*6E&v^mHG!W3eNLfWz=0YL~+6_|TzCXmxIKF)&dHMsa72
zTd;661%2<BbdBYJ>Fm*fbN$Lz!nLwXuQ@iE&2B5w4mbt~X>KOk=(Djj1_j^O6C(hN
z<+qSI$J5HlSAv0INj6yJbS<vWWjcwrFZ^3C$XR`%0p_x-w5*~8NUtG=?e7seQKs$B
z&gBdk2IbvreeN|j6MY`OT;$OGbY3nOn;iYTxSc60#QV%;S4)1laNo3wHC26ChL6mx
z<_7{>^WUjgv_A4p2C}5TX-xa(_LV#yeGlDOoOBCpRyLrP;iTW28Zw1&_0(p&OwhMp
zApaMHe{;cObUb)(4e<miBSU^C(-9^Hnowmq?bz`>iKnNno#UGJF@*|C-gu%Wd)H-3
z{FY!k{7pbvIopp(6}a}xe*cCBCe+Zll!&u;r2MBPRp_4$gvO)$bw>TC_92?@WgWk|
zHs1XX0p|#)g#iW=pMPW74kGzRUbR+<2?-2TkB-59X3X#{7w%c~GrabO$0bO*zDhb+
zBh2Pt7~vR+^nyX5I*vZuoOpMmaPJ-z_<{D5%FV6%c-qw)I;1%bU;Z4i={#-17B9ki
zqXWjtdrKOkvX87-sC#dx*sDIZ)3Xu&+N&f%Qggq2EcU$7mP?&b;q8{h(Y*htIZ-(|
z+U<Y6f(3tCb94N!TJx{{0DwK%2zc_(#0as&lO~g&oZ$|;Kf{NSd7K4zZ`U-AF209L
zZ}`3@TQ7B!k1s)w6p;JLUcadn8kkcAx@7-Kw~W)h>5$S_$Sm4#l<7ws9qI3c8AK3x
zoshn=&Ax&tew^-X`N4T2DE?Ndmmys;>SLYV%X96&A$vB?%9#B1%d2&5cyHf5`-x(c
ze$EX${KuLcEJR#EtG-`^NC7``aQ=KY!3lsE?k!J`=%Qw40MFC7eB1`dz`~pLx3}Ma
z4{}c3N1_)EPS~z6UO}lCJd#i#O!ebYpQPgCj{9vIk4xmkmVY_-rZWpx&SImTIB}st
zR!S*UX0`}<e`*Roq;+GoxZP={eqVKR=J)A+<X+4?UhEjITYg|)@WG3_+w?@uhuyKg
zb6Dejv8qU4z7rBxd@Shpg8gg)$O5}#$w`>v-5HUN(3Mz9>!gym?k8XJ9uj1A2okrq
z_sJOtpYICUX<Y$wAD!sI=J-*s;pvI5ulfyQLRl0y0q@tY?@#0778(rPuY=Yibe~^{
z(BQUlwOu39rrm3E<dx%NF<Wf^+<dMeBxEG&8Q*NjWR)KHPdTo<Qv$><qqAG7*NU^#
z)T$G`v!iNe_7F&(_TK(@g;8*Tgp0%G-`vC!QjMqM9n*J{46AQWlw$is?Zm?I`>o9F
zN`U1;^hYx->9^hN>FuUhm)Y4}sZ$B84HpFHEBa9{37I=XpFHJvb>xYr_n*AJO^G*g
z$3Z>&zm-t+duwao`>41QqxHQ3rUM`&CYL4@#6Fp*Ynk~-IKe_iTZ56=hgJSm)6oGc
z(j*>YFT2NxH5dKfO*?tLs@<^T`nVh#n%bw8In&ijo(OzqQXUG5%>%REEP1ZLyZf4#
zH&S(vnW8$FpZNm=t(D;a%(8=a;$qT+r-L|w8~0PLznchCMzn%VXbBafH(U|zc^DOn
zVdy2Cb-G_4u8~hY1d=bCBRqHHp#p<h+*)tpb0pUjepN~r;gm(}snB{-_sMi_h#s+*
zixi@m*IYvGIXr7Xm;O{2wc9^}a$jlp1RL^OI1F~R9x;y3r*{X(dP*agr!9o(69P!n
z2|(KdWXj@7lo%u2&c_2G-HF<0!E-*-1;cP*5u~a%YVPrAwKKDnw-kcV@7w#h*$@~c
zjz|(&ESOq`mp3S;x^3qbrnmt)S@rfe0sGUF1@#SMg>e%@HRIi0qs%D5n)4<RwU0{Y
zHt$b902h^l3RmpeutlE+68M30+vP?LlFMAhZ0}vg!x~@PaY+e#zpaUOXj=5BB=WiM
z`1hE+^o7UXY2i;AQWq?f7ZAwz_ViN!)9EUh=!Ts(D;5+3&~B6bWa<4X7uP_ZkHxg7
zDST<9zN@)u@|cAe)sb4i=5QnFu2j;ss<Ll%uF2-pQvM<f`^w6)pz+2xG!0rLafYX>
z$%&-LtB#ig!@uKHhsS9zp1bX$#R{O$z0(#V8IC(A-uD**`RKmA-{Er1<JA01+34^+
zpT&TffzC`+Y@ziJxeAg{k-sBPr-uc$gdSJTlG6#FPZpZ@yYBKna$6We-!mJ@j6x(S
zg143g_-)oJ`d7X5oSaYmZ&PV>3lG<dEH*n|%Yuo2oQuWuz?F1V=oKA5g*;&vg|JCo
zVNu@`1<l{DeS8u<RePU;d^7lVsE99ADs5Mr50%eFV~QuMdb5hEL~W4VL5`d=^rX{}
z(dyKqojq>1%jR};o4xF9F-Q(aMzr*Oxz$1PvvB;6wv#C>R@n5#Y{-|mI?fFgn<#;M
zrjE4!S!5{d`NenPdp>wbb+@SJz!OM+w5u(4tIX^2N_|%?CVO~O-oZdDo3;*$MO#cF
z`Dn_paCA@!@z`@TC^^Duz6lA)Dq>#U+!RtBTy`U(kmwl}pJTlec3{9}^mejYNfR$t
z$V_(qc-Ac>h-rd@g9}(wGS1Gc3fMGlQCF_{l&O0uNpGI-Y01t)O@t`Z?p_91;LPsB
zpMq^Xj?Rm7`<>qjW%rD<J93~@b6`P!y50~HHatg;TM2#|?de|nR-xqoo$d%&-6QlI
zahpe6Hq=Qz&l!F>S>*g`Eu0P$P~AMFJMw-0Lx(q65Yv#?{nfie#S=?aH6LkSjnufg
zbm~#_zUZ}NK~cHu5sUp6=0App2wCMj?}!RA+@oU?i<lfy#<0KUnI#(_qbfvedhtHc
zvPK^D)w7G<cb)KK%noXG02)GsPL$;b@~kNhW1q;Rnq@nwDQs2=tmTeI%bui`d60DS
z;=D@f6jVrUUOzacR`E<>RycYx>HF@Bf<MpUc653%i#|ByNuR{o)xD9wXA~Ou%*O4t
zY15O&ty13Ix9^SDgWOiVd)eQ9ToAety7*&vmadG&;p*du_wI~FFXzb^jTDLI!Mqma
zQKaywBU^XgyjJiE_fC@&+c#mQ%XkrSs--Nkm8<32>ct!PP44JiRB~Z>W3GIi`8VJC
zCr?u-eV?y4^YeyYEhZ$tSEJpPU2sTEVI~_8>1*`dJ{_tO#bi#*Y1V{+#5K+CKItyx
z+BIe#%ojLYQEttqaW{@D`z`Z7FaDCY@(Pi?_wqZesU4*4NVTt2y_ntjiSYZYa)m*g
zMzZN9*OxUM#f0YPhVGk;1cH_JuIT*7ha0;NP50`X#?e9`G2{j9dMwJ0QWtx{e7}JH
z;)wuvPAH_rMobKW*x#eanb9M!j(z@H%e{YND@GJ1=utO2r4MF`5jpp_J4D#frUsu?
z6!RxvHALX1V5YTGSBl~<{PJRrJAm1E^>K5?O16{0`Uz&Wf>5Z#Mf={1?C<GE-r2Af
zyDx&4^F+uxZ)Kb1?hAA<607&vEi5XfU(7>DD)YD5aK(SteGq?$!mrri`nHe^>z75P
ztgppqtFx43_({m3!Zz8NN~;<=@I0<B0rAr(sSvjH$9v&ha32<fN@JR#YLqX3p0?{r
zaCUr+^zGx^ED~mvUYqqj_N(;%xDyR1YAR97L25e^CNA0$lgIhrQdh*?t83<|^^!$y
z*AuJ>dyY>^`kl2I9Zt__<Igy-9(OqU$oEHMP2nrY1qiTcX1E&@j+-eKy7<M1`;HH3
zvv+xitPk7;1gp;1*&YwW<(sV$?;)*n$t1H3oP?nJj!+;?y+NrjxIt_TBUG;aU{dk>
z=3pek<=z_N7xkLbdTL_s(1fzN6ys#&0<X>JdN)@Q0*`tyOU)!9`QmO)o6)y(^pe|9
z`X1JgA(?k$m$xYh{S!=Op6a;ST%H-NuXXPqDYeW2$p#H5zxGq76iGl1S6_wU$x35W
zZW9XRqe<=8q4UlLLZ5MIO4qBNr}P~4mn!lAA(;6}%2Jxgmi!J`B3#Q$TT*;b1kwr(
zvV`W0=*rk5K$jMl@5mJgE#JwMB1Oo(YPm)_S`t@vcxj2~rMNurGW)QIrLgJ|wuD}@
zIxEv5ycDmVU`DR*jC6EqEBZ`A1aqPj`&05I>29wU1r{P*^2w1fZ+Avs^A|<m2*i&E
z;#ZaIO-Y0Zrio@670V_Eu1FV5IXdXU$IDAsuI9RleYv%TwEaG(Yf5TTzUj3(oEb!)
zjSCXeWLq_iilp2$!-kGN{1$o-@tezDJVVrDMtWVHR=^YTYuI<ux2Se&J~jk(id<Sc
z4nJH=kHWJ`r%-Yh!Vn38;8<5>Ge20^AVao><2>WW>S}Tsp7d(25Ua;*1`1wRLD6@F
zb`Mq5G)7nI?2{F5*KF@h0Q}No4mR@Acw&<}{gDp}y|y)|(Ue^7aP&(qR*L!=TU!;+
zLnsWxF^cW$95IX=YQ++d4*6W!zxrlYH38GioJI{ZpHvgJx<EDC{h6`u?cSEDn8G{b
zV}1YTfex1QPjrLFW|%p`9$o2qK)wz_CCaK|F>;n?e((%kyZ;aF940X5i=iPgWNoM$
zu^PY=jz@YGhGSDrfYeXD<!qMUb)2@ovU(>Vaz`9(5g~Fe_n?Xi`d-^2(GMD<;P=V4
zZa~a%x6D=^(lztUKbzQqqD)DmSDxPo2?_bK+T56G4E$Cd!#;CT2t+2gtcY6D`RMj#
z#LS4##8%IeK&!Gu-9}HYT-U*LbHkDM#aV*c%Dkc^VOmuk_BYmZYU&Te%1=;oHFn;F
z(PeukVNx{<s&~-?ITIg&7;@TLRpuhmc%2@#kP<}olMJNy+f7b5?Euz!YFdDV+?7*i
z$JEpZjhKB66tTSY`vx5({$Sl#jXj97Hj{c6hwaXYzAs7^#@l0_x8EuYk+5qVAds-p
zzxr`ILbYbb)N(!Bfva@-?t+Ce<kIgH>ptZBoz7&;DQ1<HPY&%OXv&KJP^F;!WIAwQ
zL%Ty@`1Ii0P&<|=gE>}=0(&=>ZH5~_?0F5P9U3a4rnP|*)=|X=y@t5e&22j!wZg-H
zWXKQ@SeX@9N%keHv+ijYJ~!Y0<TKxYj#uk2&v|K75A#HcyuXz7jXHIhP{3N#X~;jk
zYY=qbPgdlwJ43)U(&C2*#eZ`|B~fdOXYGRTzU-OT_@=B)#Bqs??KZy^#??(g`L8ri
z#qZ+rOGK@V%<0=aeQq9()M1K7gL1*k;W59&QL$};O-NQziJB4w1m{rYk~oBkT4^Hj
z?Tlr%?GUKS41p$BMMH%jF|F-`J6ly!1(XQ_rvg!}dw1R>8YH2lSE$axtQ2gxhZymr
zT8xjpXq2;;W7s$w`GqjYr2<j?B%>vdh5atmkupbvE=e(B-!^5XwAhwvkjRJE^K-#3
z$)Wvi!WjxK#Yb+(y~{lsC`_G9a`5H#RP6u50(d+pDpGCJurrV&P<#?Y3l@a7Gm#9{
z-x-MzN29*?c&|c-CFprwR`)P5;pX%X*t*zN4Q3;)UuL?zS#6BD=OQu-k~diBA6`(T
zpiZiL<kV{G3=elUaDM=(1~N&cRU(3PpY0qKd#Yezt!s3EEkM0KYbW$w;4h>6bN9_u
zEA<ZF(AvOUXL}XfcDMiU&^&0_sM}x2Cp)5I6CSP>v%bQhWyLt!xO%7wzQv$|rIJxj
zOIG%ScbmuaaR0$PE%>&fw<TnqDyL*1jnlJAdajK76%sgugDtnt#s6FFS7f%Kh&svb
zHGvf}Ta_QEEMd-N8@~4^kyoF|qPy0w8fxT!1(U_}q_1ywb!a7<wSgjl<-5|Rr(eqQ
zfe8juvasQ+e38Dqj5~0SCw+7DC^Gbov@+|MaHXA_dUvzJ;FX+Jb@N8fQrbX8Y_R8I
z3Pgyy<dZEqrAl1>PB#py@?_~j93LRp)6nwPq_KQun_%7H)<P&jS2-4Usp&=&Qc?xw
zr<M#qk-5sA6r#Tueiqfi*1v0Nf#B{|I2pfkjZpupB(M5)0a<QNitQOX9hTMB_xBIi
zCkic2<NxJdcozS-BbfxIs*vATHHFJ9H>7i!G6CkPP1s~^YkOxE0o9cq_xB^4j~j)_
zrMq4ZO)~hn+%Y<K8Q~^7JVtIOcr6L^PZuX?0e`2g3I&Lke76z>)r%mLWf$1%gm2V`
z=Ugt8B^ICtSVM~reDg~^2;#V2@VVp{3&Wlk-D+29dhfgD`_YvImWe#e%UZV%`k{_K
zyhFxc!w8qCixW81?HRSRVFKZk-~YEd{Jh@NHni2Ha*<$D5xa3&{kbM<vtT4gIZ96F
z_Tj1Qu`s#8C*?i6;ohxP)ZjrV>#jT@yTf($AvNm7*BVBGC~2t*gv(Rw9bJ5Hes7cK
z_)SXIub;lM+P-kTlpa~O7f?`gx2XQJFMPE48(>f`I6&`V&w89#rxW27z{{0#u=o&u
zOyssTE8aoS7_AZQdwBHc=c{}1k=3lnsFz#?oEUQ8&cR{3#09)ofpUFfT%01~^E$b-
zfZOR>FOhE=zvuOOXUKo(Z`(fQE0-H|JOkqA0@?Q0yArihU@xgrt{oo}BQpKa7Z)FI
zv)o))U!Q%!B(9`Usn73vtQ!`N4*D@v-~|rtjrUlSUTwS%B{x`I!^j1K-Om*a)aLTk
zrq#1VPfBB-*>E8|P*@a#h^i8~qb+MAYiRpZf@NoRhvaR8r^N>Le-c9m4Mr<#bD5Ny
z$;tQEkww+}-Wy{?_kjE8xE`5X^RV!OHoe$iw>ffH*KxeSIk$+97X1EapxXW$(MM1G
zw(G`~CbY%&N#Fa{#h(6U<Oq86iXSfryXk^*hlxJth1r|fYV1!?L#=NFOEm#9^B8;U
zb0(Ltqkwvm7SVUFWG831X!CXH`9ln?uhTEq1_#7Q@4rjGIGaOKtJ*~i({+4I1b9}4
zd*_!XKJOhl`^Wld@tP>I7Lp>RC(`Q-xdFTQiMbY6@(rve!Fvr#C9}qm!FgEFu`^%W
zWf(ZjCY(Wm2@Dk{@wL^pE$)OM{h5mAO^~>=OJna^8sc-hr4wHJw%Oui$nN+b0t8c>
zfX{7gbybjzOq34GpxKF<oSdAMHFl-Wv_A$Q+*Q@pL%TPC3jOOhZ&sU~0aUS0n*gAn
zadDI5<Ha*2^Od11Z7qPNPx`Y=zb&Qg2Ez-8zxqDkg>)OUH`>a`3;;k9@G+Q=*$fgQ
zriTZOdrp@!=2WRB`$!LH?o*-UYP<K-vTm$Grs}|U`Ep_c<c?SXPI6WvLhp%ceS+GK
zcI7bYc}hvaerF4w?)hQVPSZx}tMBi;{QL+!&f`(Rm&@~WSV+<I0g&{jrl#Hp6MZ0&
z2q@-)fO6OVe9?@_3*g07CX&|HbYb29KyL2tO+e#RRu(CBxT3iD?Ynn8Kq)Zr&*{}w
zo9|0|RTVP?ustqMR(;q1205+ZM~Gz4{W9tV1R+3K=!qhFfi|qx56qFENg-ybmx9ku
zHlVyfIvgJ|!kxuX3iyTk&Y~*v30oPZTNam7{Q>V*fXn${Dt@blz~kDezDj&(DhwOx
z2p`Et|J``I<A$aJqk^yROMy0fXNbe3I*k=Id}zD$4_SmQJqI8b;OFOuLXgp-y~!m2
z9wT7u0544db{fqTG*CEM-9YOMK?MpR0F;}%3jEnifx<ob?<<8l06_o!`*$p*!i0ht
z_>S*?h#em@;-acxnwgkj;NdMA^1{L1-TrpqBPIPC*V*jJ@En*#Wntt2q~-c2$?h(t
zgJc&Lyxlu$-Q^03Zyo0Y3^;pI=jQef|B7=+(VlMgW9aDU<YnBE38TUK12kBnLgvE!
z{PZethXrg1pn<*D0C0!^Qunk_XEix7QQJ$5z+y3xx3~8t8BliSYRnY?$)-uy_9?7d
zv+VTL9-wXDupWYh1Ym&gLU=fv5ylNe8YO$mEaz>UkWwx1l}zVxTj9sJaTu)TsO*<g
zB>Q=Xl2q*sHC929{dC7joE{v0GTl$`T($Cn^ruw5a@$wz$jt~>fQn((zZD`yK*z*n
zv!43}R05>)U!LPY)9gQhB_kutEh++QR8p_SC1?Q=_O2`2d@y`|2n=-<6WAf*<ACMk
zda#y^?yF7m^u<gob=DN=EX_dOPPc_`ZoKwqDfU*p;U<<{3U@tc?(mD9ASS>V+b%Vd
zRAL*Y#KyAPuL%W-tREhFf_Hg8eh=&iK1oYg0n1lgfPn-MEHx_3#d@1bTY$JFlZ;&I
z@O=T#Y`Iv45`3`)Ttv`P8)U)>cQEPwSoOV@7MtDD*~+!h#@|R&v%yxF@R-_MP+JTB
zWD%{;Y7abUK;{8|0<0xf=)mys(|QOI5)u*@{)mQwcO5VS2S}TlsVN8qux>i;cJLGw
zM&X4G!D|wHJncN`c&X5vgoA_=^527L3^OzHD|jR!rCYggLLdVJWFOF;HWOo-#HbX~
znt|Z=cK-bvz_)ag&U>LWz~V;fC!`;kk+H(q@dzNHQ{_6GsHjmSqQPri?WX<cAj;t5
z1Cg{klio-Mz2?T27D+KNzz+m2l;KZbzoG>Y1F&<NfOj)sYuQ?_wzmO;G=G18fVv8c
zdJ7`I9vHUoDdmPIxk+q%aK`Y*$fT>KLv(U|pfVp6Kw?Bf$eS)MFZVv*`XNh!@h%7p
z3+t+_3ouN;eVVZ(|NQy0xR^R}^B?Gmm4zid0x-s_t^tPzNkc=U+<0(w6bz-NrfQ4B
zV#q;M>t44{;f9Cx_loN?t@eJnNi1SRdG^N%gb^@e4DSJf=C;NhLq&P1)1%&4ZpigG
zX#BG!tuV0-9$^$P)&wL$;2eI&0B{iL>3H}%hle`8Vadr_7ue!TQ!`jtlwgI6&sksh
zy(hbt>^0R_sJ%h$!j)6Edxrbt(CVp)lU3ApJ4v|gS;xLw-2h*qCqj${Kmk}ltpQ|(
zoTeM76_kI)hv3(^00i;|;@Z$+z7cmGm3NE&Z@L8s9+nb#-{03ek#TMV4}W~58q|mH
zJp*8MxW*QWOo9A+s!+y1b^m&`fPTnuX+8hEx91ykqV065EOn^eBFQ-3tZs=D49rBr
za*C`96<qe_L;l?VYTM4H2n6z3V15SFHzgwQrGJ`gOEi;nHq^CTpwnU2R-~ea=6K&<
zl1Q`c4^c-Ewj~o)(*^#Uqku_3Jxc0uWO#U9Iocv77qBILoPoXyAYNZz#}r=<kGj}f
zIqZx#`-ipxcW@5lx7jE~4lviXdiOUc3QALg|2~QN`QP0E(u{a9kZo2jRxbXC!wCeQ
zm5bwK@_|v(aZZ#*$5CyCa`9duJL8-Au{WGt^x{gF7J$WG8~RP?*%uxOXYFt=+G{6H
zCi@Gty%qWYeqppsK9KYQ2v}NSG$3aM1dPT)%R#gmPUD^gSURBCX9tdgm$r%upn0ua
z9E^lJ0d%hJ>E(a~E`W~x8(bRx3AnF50x<-Du>c@>f!?yt1Gu(;We6wibT|VAM+ZH-
zI>MGof$z(+k9?%7e5NXmK&z~55tu^v6-MeZWU`veD=SYFs}xRIUI86-V`Jk4IxUeH
zQvhu>&-9i+>jDM9wB;30uV&C~Je}4sppKVmh5{1<fXAO$`=(ML+vK#btfm(4G6=qh
zsE7!_)z<0&a&{{?yTM?!c=`Y^Ykpu**8fo?LD&){=i_V5tx@^9M7<sIIU434#O7|Q
z3H&V3G6ElIY-Sc@l$4*p4HUnmL6Q0O;Tj4EJD(Eh0D+Mp*a%Sh0Hk@e4-AYiD3&=$
zNsYmR0ifh7g*_;=T#>3mw@IX<yr`%VIBG<bhysEqVea^s1O`1z3yU8{b4ko5z=9zQ
ztf-Z@2mzPS#o5*AyTF>*pnvPd>tLzLk&>8LNGBOn7|qJ+5TM65xd1^q!sqE8Fa!AK
zrlzJwM*-LWhtJ{hu>jB%08vL-yAq!T6r_Sak08<9{>lwm1CVv#2@Qy}SBxLAus95V
zL++i`)e}-vQ-z4#Dmy`QnE>E=KzNtLE{waKl$n1+Hu&#55ohuMRums$<uiDHUk395
zy1J(Y#L?P!NS(ic_7g(TU+2S_X~8ms4j+(l0Gh64-95130r4^`Y?2e$9q%O?_-M7d
zF((dsC*}qS3D|t$)e^+sjdL`|B>Mh;yTa=Li2PxLBWcbJ97I7P<8F!C<?;#&v*5h4
z1w3|;z^pbpY=YNbhX<&)yC)~U8(lvDGJ1My3JwmAt(`$!3H)YY6asQ<7a<{`r+7KA
zd0qlH-6}KqpVHEmN7I%1@+$4d7XAV7gan*<T#k~Glig2N`0v5S0OnHof<CRX6nIRA
zzptT-fX-I;2IOK7;OS*&XD`qr1T;1^{WGBgIvN1=eJOz{E*XyF0P+KHQaVEy)LNKI
zg2?*;$4Ej#!slk$75uf09;Fjl=0O!d_79KZJdUjwGwB06V9^k^RC)&nXh{EE#+|{)
zh4SeOjSePaVqH1nVQ{d*nF8Jb8Ql|(TLT^}I26k@tBk=f0)ZQd<z0>X<$O_91^Rqm
zAi%qTBZNSUZU*38TL6J#EffS=L0<tHniB9ffEo~h#e+|uIN%0B6Uo_3!AIxTEOBh?
z*gM3U*SVz~!_Zk>#7(jHgXBcN2Ochx#3&)ZS2}^pXm^8y7QFW(3D}P)!A3xUt>M?N
z?~#!LUbnxRn(n}%y59tt{8tyFg1x=HMBHD=fmE<)2nwx61;{`DGKG^%gTn+Q#`omb
z9RHkrCkx~zgAZx|M5;5`%-mc@ON+fRM<Tb$`7oidaQB}*0+FD+os)};Em{k_yOmT7
z9Rq{U<=*=MViy+|@I)8%KK_4=ooPIj?c2x4T{lXiJIhTZWveV9#M~%Zk|Hf+mwkzn
zeFl?|J8MW}Eo2SZvrdwI-!mB5cVpkinEx^SKhNiR@w|Avql>QVI?wYse#bfI?|TGK
z!DVnJ8BE}Hb;{)@4B(SRP`4*JWvm{^96x>>ocZ9;1Lj6$Ma5jZIbfuG&(VUhb2qKE
zvC(~ZJp*j_;3IwgL`bYd<K%svqDdlK{pCfBeqQF(=+0*~ujLiwSZXr@@iwz*l8yy`
zr`%Mh@A-%D_vSB?>=TgVec3b-t@goH*t17tRBl3So-kG5iy-Y22_W`T*9<k8;*qZQ
zZf@5QA3=74r3zeBmm-hXI|xT1LFs{}<ERhiL`s53pZ<)bqOx+=^gxCC8rYww+Tx+H
zPR#rF7RJWcpMidbH%VG;76DB$BLt8!n1|4nH)-S~o0wVO7abwl1r49B7CIm07G1{p
zj_w<ZnUQ|(f1ZEfD17?J%d$c{e3IMm`}?et>&rM*d~iS2qf3wU^@m!6?reQcAf#;H
zpR1*#b~*W4vUo!ZFYWLpEh-HfXX(W;9%k2xaqpyb2m?mno<D)G9VKE^<+d^gl1TzA
z1~5*bxB(F!&Ql5|*q=Xtid(j|#7R4@jMY+9_aHO|-k9%~cU%6re+*pp6_u6n%&GA|
zzB$jcygCO%fHMUp0+D#HSp>_&$7c@@H`x>k3|1k@35|fHxSrd7UJhOR=?=-t8Zi#y
z8g(WnFOxF!b3E3F8?|cFv-z?52w+%4w%>Giy7fkWSaQ+qFc~-0-!wO$^gcoZ5L1vI
z0MmhkLx4Sprr&iIaB95(ssyY}?<D{<GAT)OsKZ%VO1g4ivnadb1LEr5y^gWkKxPqb
zH4Zp^0c(FkWNAz#gkyHFBsU}^WE1h}9y~>#dc2G`YtdcRd@se*6Y-kb9};7uyhwQ_
z4|`G!mxiRrmE$mi+>yP_7VRhF^O7Qh({~okIqKnMEsk4Zq~vnw`+oW!3IQ$#US3|>
zS3Md(00{$I0#_v@S{oXkJ3CLV?QDcH(9;9H=yRX|<55v~^WZOmL9w;0WjuO}fk8xA
zxUB)$(7*tPpXNH?!S;VCvMo^_Gtm$h$RgC$-;W9P+4HTYEe;8robFC_J$8~!+QBf|
z&Om>t=yIo;h88+#-bUxXzWxR6eyylSw^Q!^X)$^YI$j%b(GVxP(;hYz^B%^3#)f+E
zCJ6*ob?K!**2{Ag@u|N47IQNT3=UP~eIjMNJ+|+mPee7-Gcb%`F<9kfX{a{+{r&Uu
z9LCp7|DIZ0l!YPy?sTLQ&HCCF%it6or+A`BKbsnHPN2zH^xY~>d}X?)wnUkMy#q`B
z9*qf5e;Qq6R^wpaEgt#zYd?Q~H1Zd3qFisP#sA!t&tpM8)%=tc#d5!-^zH3!D6{LU
zt4AS90N7-IWs5MP!U(t>s}f&dUx0Rv4?e?YV_&f`Gdn}^0VNp(9R^OBLhs^ktE+JI
z{si@SLAzY6oF@vH4`;Mf{VPtlaPC#yeK=UcDVUwv^37q_<b{E8)Z0lSL9XT2{xnQ8
zsa%bP9{cT*a&3%wkd@Z&j{;CUe?M6)*=RY2YkSIeC_;DKu3Wte8FL7Nynz9y61Z8I
znV1Cizg-cbkl%uvCv%Ljh6k{XaI+^*MqyS$W-~B~5dF{s9&7j_Ky&1Bpn&3WI26<k
zV3lla%&1nPJAPc@=FRTzZYl%?z8%(+yN+=8zoMws?4@_HvJgL5TS<N=Q4?jyIk(Xz
zChB7>x=hZrqh>feRNW`Db4Txukk9^sOx{V4hUYdjGlMmZbe;or6pSkY)DFBvu9~kP
zgkS^`k*;k(kXL`dFoGq+VgEC6H^9ONi<hF4M6kd-p`<MMX7`Vfx}QJIzQ3e`lm#LX
z$`=qrFo(Ts^YN$a)D1S{@7kE~?+<?}l#&^QzeMiGlT?(P@$MX$rJ<eCqi;vfozhfP
zP-VuHr_B}L_EDmuQQ}ow)|Rxfo_oU=6D?sI8X;(bjN_?~;loL$=-Qgr{Tr*COnW|%
z-SNt+zwJ_#XZ|qZ+3R5Q`|xCGUsOg(p*!rTMM;H`zkD=0k@!zLiQt$(93n6iH$7i$
z|Ms0=D$*jhrfuE5mv{oP9GaS(JPrsdVB^We3rkD8gVI=fAQ_tt2JMN)2=HCO)8ys3
z2B;Z8TW&}}Ad;U)qMfgC5GvL}40B--R%Lnj+5r68sQ9T)=x0yBxq;S%jlJaLWP0iv
zZEUI^xtY>$SUf7Tp1-}*$~nPkBfFpfLIha$sP1aM1E2e<TJN}JYv0U2!R9?d0My!+
zEWMO)Cjg!PBWu#E(F_38A0y1Ag(RF!^cPB#{VSQ98{mfsg>0>~&#CGj0>zm#|7Me1
z$QmzUe*tmMm(iw3hL_h|VIe&;DY<x%#CdqVD{z7V#dIuB^ih4t=9&%L2XG$!&b`VG
zg5QxZC?MmK94--kc<mN6j0c4`u2TDunR!!BkaD{Sr?gJ1`w#Bd)z>{_X2lVC--k8A
zZJ?g>zpoAYA}G_vF!Ga!a^3o!<W9;iRUn&=j@z#%1sk)E&BWk%9#$3Z_{@oMlce^u
z(lcKNiT3Bj*r3Ah4>Na%hM(igA$t=w47<XIe|P_6<n73?Fi}a=Pkye=-v8b%A%e%c
zGfC;#g(NxYA^yA|$`k+20Na)CsHP?HH#Z;Uq1FvS453hE>S^fT-ZU&ChWdJX^D;RC
zO>OZ=3X1ojm<b2>(_|BK|HXm<4i*+IEv=~~GuV&-UGvY2iCX~fsIPp)4`Llmr{rFT
zY%N<TP2f;d;_$#op;c#+@64&9NajVBz@nnh^NE$oTahi4QTZ4JDy0zgI%s!j0|lB^
zNl~!{vJp@(XPv56n=a>K_o15|9wJI}b2A_V;Y^^{&Z>F8A2{u#*n_3)hh}htL2Tl%
z+#!7ps|XubfbPF)hby!n$1S=<H1-skW5Ri?t6%*|CpM+#EMb&z4ZlICzEK9d4(mC3
z8zo3J1meo|$Aa%6lRHE803D4YcvNZgth@Oz81n8e3c4Oab^`@{f>Y)p_@THGA+Q4-
z6gqockdo4-J;lW(()_~EFrm}R$u+&xiUmq9SnR-Y&+N6~_8oYte*5-udiq64CkKZW
z2*RNLh$}9HPxmt@_+)|Gp=qdrX{lInWB>gTdKt&5PXi)P-}?^j==gX6$U-R8Ai@Z9
zb90M{b$Q-?KMBq`Fn|Wup7&ke+Jct=cvOLl7ad#$RJ&Fuo54Ij{Ogy1FH{A}*47!w
z<x+zx4|iyk00<Fa29q4qWC8a6LZhs8Q%+{jWts+op#B#c39E&tbk7c{@Ti8H>(Dxo
zcAbI#V8004ZM>?`QzR$x*s)_-TG?Xk!N3e!l7ylh452{sg_h;%=>jq`DA@coK!VkV
zFM{&2A%_sM&aC}KnX)s%-@ji^>z>u!dXWXq6moN6->w1-t0kLIFa#uh6OR*Y1`jJ0
z6%~+8XPwGovvrGXVAl^i#}KNB_P`qJZ{ha^P%lKv!)@5>U^;hq-|?ek-JROsJ##RN
zl@%2gVIG2u+R`#@{uUI3pvD1823==*e*J<jZZmiX^SsBfy(9rZLco}~GQj_+oV)^Y
z)*#Us)JyN^^XOKKtX>)S)d_o_+u_gvq!{E0g)0juxxumskzC_h!N}KoE?70Mm6LS}
zELtNp<OTpy6&-C=FaTVt4HO*givgroqnR%JYdEhO-<{|V9IgyVuu7V+(Nu0%5)c~N
z-q@&9Vs8Lt=KQ}}Suj95kTes~OVUP0BlbS9_XXRIpA~3hTQ`51hPHkXulF1!yHdhJ
zhZ+<%!=S<7P`e>&mKUguV9Jk=mvvh<=2iq(FceF$;}fiGYGpM81u{MLO=IJajOVXi
zb8Cqf%{gnP1IU1!^mM3MfP(|TJvurFy!{n50;(W+^x-vejhK+bP#(djAzG2%kSMVe
zuflf(fmoKK4Cc$NgE&?ZdJs!IDffgR90a@}|4rLRsY6j3{e|mp#idi@6+-UQ^MbMo
zB=&==xP7g%%HNiVTCUbySobN^UHr&>p(=JGVp@KGZ3E3NjY}|kUs{qcD_KEQ=`*1A
zuYcSY%Vm)FsnG?K{6L%qKA7*(K8tc1&8J&@5-RMzKoe27oo?@mDqDBO4wG0nZW1{5
zeycFhl*hzxw-z*>Imxx1ZZ_5M9W8%)GSx%JAm*wmzA$l-Sljd=;V}49SA^8Xy<<bm
zGYlJi<+onHr^9=Uo5|90cqh3zN|v-dUaE(&VN{?+o9gO+3Ez(ySl>REQQ7gdYUt@A
z?(XxJo}iSY83nQ!684KHd2_&#MrBUcldo}0m<sXzdjCDc?dp2%E-CD8L-ehkrMak-
z@Cz#PtA-9AbP2mmW%pg5wvV#UW`A~c&gYRqw#5rx%(8QlsOr5BEkfj0R;0wj|Kib+
zLaR=_mP+{Lnu4n2ZmW@g3rXbrb?pu7M-R}qRtpK~1W%5O857l1-Z^CETKB?qMfc~p
zAWf=pMukmG(|Vna5-qpPV5||^Vx?513Ok$O6v?P?>&AstuV>LO7R)YEmd^JFc2ew%
zJ_-snMEp+nFA)!qxvpNMt#N+cR-&csm`I?!r{Ss9;ZkpQ9a?VRls+pz#vj&bj3vH!
zi@f?HmwaVy^m#uod*#UT_VO#bsQ9zkd#aQPb7c4k3KKbOVn_J4re`f*=s39f<2d&_
zJ)wSGj#FfVwE;IH6<bf+TX){tmJAp)`I(j{kaw1L+U<$`6Al>9rHi&cf1HoGJjkJ7
zB^!IB=nCb@S&^$qs-&vI%)Z&(>{WM@wPFfWE-5)TS9Hkk^EDi@$fjuKug0n`!sy)(
zto}T*8$X&#eaimXe#fIKWXmBL+dcl@cgVU{$)(Q5Z05Dr8W^0Tn|q1;7CydtO&`cH
zmxe8G<89nE_KfWv%hBoV)UwqtN$tbnE{n*qEoIH(K8-m&epa+gz2mm87*1IS)H2T3
zhv;IpkYp$0+S3}CZB27huWdi}o6F=GM#VA&f~rYX@y5fhG~$RDX;4I-Hk6Y!iCH22
zY|vS+#dd9^OuPbZt=HOORN}w`-pcL7*l`7?;&OdD)A-_$tc{H@hf@ibVw)7TaEtsd
z&n3-b8GEKgSRE}dAgGwjVkguJx_Wmgac{S@R};m}WR3dqaK5dk*RHtniB0oolkO0C
zoU<lUGS_R~e&aEgX57(;=pK~ok(He~wii50$B(Z+k<JDOj(&I9?<6JdEkwRfRZm-H
zc=NZc#nbf;<1L**GE-<keB@46s9{?|&w$5dsEP(Ye<UK=$?KE+9rfc~0pi$^YqHZ@
zgoK@|5`9~jebx`f2DiU;W1(GQ=>ANN)P<$t>wS?V3s3dkmV#5w8l)w62BR{j1%66!
ziAMFd7jJtOKbx=WcCl>F(hV59X0Tj`iSH-X*KwiGHXJ>es*XOUe_@K8H*$uY>$76f
zPdnBJ@+=))bHzwv(sIjHsHp*<l)v1{Q>Q4M{~ev&xDI}wcpwJ<e*x|P`jsm3pUi&v
V29DepPayCiRF%{fa}<nU{RfJlei#4%

literal 0
HcmV?d00001

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index b5f37051c6..eb0bc1c003 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -6,9 +6,13 @@ GAN implementation, just a demo.
 from paddle.v2 as pd
 import numpy as np
 import logging
-
-X = pd.data(pd.float_vector(784))
 ```
+
+<p align="center">
+<img src="./dcgan.png" width = "90%" align="center"/><br/>
+The original GAN paper.
+</p>
+
 # Conditional-GAN should be a class. 
 ### Class member function: the initializer.
 ```python
@@ -21,7 +25,7 @@ class DCGAN(object):
     self.z_dim = z_dim # input noise dimension
 
     # define parameters of discriminators
-    self.D_W0 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
+    self.D_W0 = pd.Variable(shape=[3,3, 1, 128], data=pd.gaussian_normal_randomizer())
     self.D_b0 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
     self.D_W1 = pd.Variable(shape=[784, 128], data=pd.gaussian_normal_randomizer())
     self.D_b1 = pd.Variable(np.zeros(128)) # variable also support initialization using a  numpy data
@@ -51,7 +55,7 @@ def generator(self, z, y = None):
     G_h0_bn = pd.batch_norm(G_h0)
     G_h0_relu = pd.relu(G_h0_bn)
     
-    G_h1 = pd.fc(G_h0_relu, self.G_w1, self.G_b1)
+    G_h1 = pd.deconv(G_h0_relu, self.G_w1, self.G_b1)
     G_h1_bn = pd.batch_norm(G_h1)
     G_h1_relu = pd.relu(G_h1_bn)
     

From 4147c7f22836fe7ae7b0c6e616adaba0bbfe3b3a Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Wed, 4 Oct 2017 15:52:23 -0700
Subject: [PATCH 17/82] gan design modified

---
 doc/design/gan_api.md | 82 ++++++++++++++++++++++++++++++++-----------
 1 file changed, 62 insertions(+), 20 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index eb0bc1c003..b107f2fc00 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -1,20 +1,45 @@
-'''
-GAN implementation, just a demo.
-'''
-```python
-# pd for short, should be more concise.
-from paddle.v2 as pd
-import numpy as np
-import logging
-```
+# Design for GAN
+
+GAN (General Adversarial Net) is an important model for unsupervised learning and widely used in many areas. 
+
+It contains several important machine learning concepts, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth.
+
+In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation.
+
+## The Conditional-GAN might be a class. 
+This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure:
+
+### DCGAN(object):
+which contains everything required to build a GAN model. It provides following member functions methods as API:
+
+### __init__(...):
+Initialize hyper-parameters (like conv dimension and so forth), and declare model parameters of discriminator and generator as well.
+
+### generator(z, y=None):
+Generate a fake image from input noise z. If the label y is provided, the conditional GAN model will be chosen.
+Returns a generated image.
+
+### discriminator(image):
+Given an image, decide if it is from a real source or a fake one. 
+Returns a 0/1 binary label.
+
+### build_model(self):
+build the whole GAN model, define training loss for both generator and discrimator.
 
 <p align="center">
 <img src="./dcgan.png" width = "90%" align="center"/><br/>
-The original GAN paper.
+Borrow this photo from the original DC-GAN paper.
 </p>
 
-# Conditional-GAN should be a class. 
-### Class member function: the initializer.
+## Discussion on Engine Functions required to build GAN
+- Trace the ternsor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly)
+- Different optimizers responsible for optimizing different loss.
+
+To be more detailed, we introduce our design of DCGAN as following:
+
+### Class member Function: Initializer
+- Set up hyper-parameters, including condtional dimension, noise dimension, batch size and so forth.
+- Declare and define all the model variables. All the discriminator parameters are included in the list self.theta_D and all the generator parameters are included in the list self.theta_G.
 ```python
 class DCGAN(object):
   def __init__(self, y_dim=None):
@@ -43,11 +68,16 @@ class DCGAN(object):
     self.theta_G = [self.G_W0, self.G_b0, self.G_W1, self.G_b1, self.G_W2, self.G_b2]
 ```
 
-### Class member function: Generator Net
+### Class member Function: Generator
+- Given a noisy input z, returns a fake image.
+- Concatenation, batch-norm, FC operations required;
+- Deconv layer required, which is missing now...
 ```python
 def generator(self, z, y = None):
-
-    # Generator Net
+    # input z: the random noise
+    # input y: input data label (optional)
+    # output G_im: generated fake images
+    
     if not self.y_dim:
       z = pd.concat(1, [z, y])
       
@@ -64,11 +94,14 @@ def generator(self, z, y = None):
     return G_im
 ```
 
-### Class member function: Discriminator Net
+### Class member function: Discriminator
+- Given a noisy input z, returns a fake image.
+- Concatenation, Convolution, batch-norm, FC, Leaky-ReLU operations required;
 ```python
 def discriminator(self, image):
+    # input image: either generated images or real ones
+    # output D_h2: binary logit of the label
 
-    # Discriminator Net
     D_h0 = pd.conv2d(image, self.D_w0, self.D_b0)
     D_h0_bn = pd.batchnorm(h0)
     D_h0_relu = pd.lrelu(h0_bn)
@@ -82,6 +115,9 @@ def discriminator(self, image):
 ```
 
 ### Class member function: Build the model
+- Define data readers as placeholders to hold the data;
+- Build generator and discriminators;
+- Define two training losses for discriminator and generator, respectively. 
 ```python
 def build_model(self):
 
@@ -92,8 +128,8 @@ def build_model(self):
     self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size])
     self.z = pd.data(tf.float32, [None, self.z_size])
     
-    # if conditional GAN
-    if self.y_dim:
+    # step 1: generate images by generator, classify real/fake images with discriminator
+    if self.y_dim: # if conditional GAN, includes label
       self.G = self.generator(self.z, self.y)
       self.D_t = self.discriminator(self.images)
       # generated fake images
@@ -106,6 +142,7 @@ def build_model(self):
       self.sampled = self.sampler(self.z)
       self.D_f = self.discriminator(self.images)
     
+    # step 2: define the two losses
     self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size))
     self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size))
     self.d_loss = self.d_loss_real + self.d_loss_fake
@@ -113,8 +150,13 @@ def build_model(self):
     self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie))
 ```
 
-# Main function for the demo:
+## Main function for the demo:
 ```python
+# pd for short, should be more concise.
+from paddle.v2 as pd
+import numpy as np
+import logging
+
 if __name__ == "__main__":
 
     # dcgan

From 79c8bb9e7acbe2bc91625e4a2e396994c4fef168 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Wed, 4 Oct 2017 16:02:07 -0700
Subject: [PATCH 18/82] gan design new version

---
 doc/design/gan_api.md | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index b107f2fc00..8521bc8bf2 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -6,6 +6,11 @@ It contains several important machine learning concepts, including building and
 
 In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation.
 
+<p align="center">
+<img src="./dcgan.png" width = "90%" align="center"/><br/>
+Borrow this photo from the original DC-GAN paper.
+</p>
+
 ## The Conditional-GAN might be a class. 
 This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure:
 
@@ -26,11 +31,6 @@ Returns a 0/1 binary label.
 ### build_model(self):
 build the whole GAN model, define training loss for both generator and discrimator.
 
-<p align="center">
-<img src="./dcgan.png" width = "90%" align="center"/><br/>
-Borrow this photo from the original DC-GAN paper.
-</p>
-
 ## Discussion on Engine Functions required to build GAN
 - Trace the ternsor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly)
 - Different optimizers responsible for optimizing different loss.
@@ -151,6 +151,10 @@ def build_model(self):
 ```
 
 ## Main function for the demo:
+Generally, the user of GAN just need to the following things:
+- Define an object as DCGAN class;
+- Build the DCGAN model;
+- Specify two optimizers for two different losses with respect to different parameters.
 ```python
 # pd for short, should be more concise.
 from paddle.v2 as pd
@@ -158,7 +162,6 @@ import numpy as np
 import logging
 
 if __name__ == "__main__":
-
     # dcgan
     dcgan = DCGAN()
     dcgan.build_model()
@@ -167,8 +170,8 @@ if __name__ == "__main__":
     data_X, data_y = self.load_mnist()
     
     # Two subgraphs required!!!
-    d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.d_loss, )
-    g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(self.g_loss)
+    d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.d_loss, dcgan.theta_D)
+    g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.g_loss, dcgan.theta_G)
 
     # executor
     sess = pd.executor()
@@ -183,11 +186,11 @@ if __name__ == "__main__":
         batch_z = np.random.uniform(-1., 1., [batch_size, z_dim])
 
         if batch_id % 2 == 0:
-          sess.eval(d_optim, 
+          sess.run(d_optim, 
                    feed_dict = {dcgan.images: batch_im,
                                 dcgan.y: batch_label,
                                 dcgan.z: batch_z})
         else:
-          sess.eval(g_optim,
+          sess.run(g_optim,
                    feed_dict = {dcgan.z: batch_z})
 ```

From 3db38fce8ba88bd68a51a5e0232d60eeac7c50d1 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Wed, 4 Oct 2017 16:14:08 -0700
Subject: [PATCH 19/82] gan design new version

---
 doc/design/gan_api.md | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 8521bc8bf2..e0e3440d49 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -14,21 +14,18 @@ Borrow this photo from the original DC-GAN paper.
 ## The Conditional-GAN might be a class. 
 This design we adopt the popular open source design in https://github.com/carpedm20/DCGAN-tensorflow and https://github.com/rajathkmp/DCGAN. It contains following data structure:
 
-### DCGAN(object):
-which contains everything required to build a GAN model. It provides following member functions methods as API:
+- DCGAN(object): which contains everything required to build a GAN model. It provides following member functions methods as API:
 
-### __init__(...):
-Initialize hyper-parameters (like conv dimension and so forth), and declare model parameters of discriminator and generator as well.
+- __init__(...): Initialize hyper-parameters (like conv dimension and so forth), and declare model parameters of discriminator and generator as well.
 
-### generator(z, y=None):
-Generate a fake image from input noise z. If the label y is provided, the conditional GAN model will be chosen.
+- generator(z, y=None): Generate a fake image from input noise z. If the label y is provided, the conditional GAN model will be chosen.
 Returns a generated image.
 
-### discriminator(image):
+- discriminator(image):
 Given an image, decide if it is from a real source or a fake one. 
 Returns a 0/1 binary label.
 
-### build_model(self):
+- build_model(self):
 build the whole GAN model, define training loss for both generator and discrimator.
 
 ## Discussion on Engine Functions required to build GAN

From 583c94e4e641297820910d15fbc604cd9281834b Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Thu, 5 Oct 2017 23:35:48 -0700
Subject: [PATCH 20/82] new gan

---
 doc/design/gan_api.md | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index e0e3440d49..b7c0fab201 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -6,6 +6,17 @@ It contains several important machine learning concepts, including building and
 
 In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation.
 
+| important building blocks | People in Charge  |
+|---------------------------|-------------------|
+| convolution 2d (done)     | Chengduo          |
+| deconv 2d (missing)       | Zhuoyuan          |
+| batch norm (missing)      | Zhuoyuan, Jiayi   |
+| Dependency Engine (done)  | Jiayi             |
+| Executor (done)           | Tony              |
+| Multi optimizer           | ?                 |
+| Optimizer with any para   | ?                 |
+
+
 <p align="center">
 <img src="./dcgan.png" width = "90%" align="center"/><br/>
 Borrow this photo from the original DC-GAN paper.

From 672f70ccba17a28af6842faede7c6349a399527b Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Thu, 5 Oct 2017 23:43:12 -0700
Subject: [PATCH 21/82] gan api

---
 doc/design/gan_api.md | 28 +++++++++++++++++++---------
 1 file changed, 19 insertions(+), 9 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index b7c0fab201..14e34a9839 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -6,15 +6,25 @@ It contains several important machine learning concepts, including building and
 
 In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation.
 
-| important building blocks | People in Charge  |
-|---------------------------|-------------------|
-| convolution 2d (done)     | Chengduo          |
-| deconv 2d (missing)       | Zhuoyuan          |
-| batch norm (missing)      | Zhuoyuan, Jiayi   |
-| Dependency Engine (done)  | Jiayi             |
-| Executor (done)           | Tony              |
-| Multi optimizer           | ?                 |
-| Optimizer with any para   | ?                 |
+| important building blocks | People in Charge  | Required |
+|---------------------------|-------------------|----------|
+| convolution 2d (done)     | Chengduo          | Y        |
+| cudnn conv 2d (missing)   | Chengduo          | N        |
+| deconv 2d (missing)       | Zhuoyuan          | Y        |
+| cudnn deconv 2d (missing) | Zhuoyuan          | N        |
+| batch norm (missing)      | Zhuoyuan, Jiayi   | Y        |
+| cudnn batch norm (missing)| Zhuoyuan, Jiayi   | N        |
+| max-pooling (done)        | ?                 | Y        |
+| fc (done)                 | ?                 | Y        |
+| softmax loss (done)       | ?                 | Y        |
+| reshape op (done)         | ?                 | Y        |
+| Dependency Engine (done)  | Jiayi             | Y *      |
+| Python API (done)         | Jiayi             | Y *      |
+| Executor (done)           | Tony              | Y *      |
+| Multi optimizer           | ?                 | Y *      |
+| Optimizer with any para   | ?                 | Y *      |
+| Concat op                 | ?                 | N (Cond) |
+| Repmat op                 | ?                 | N (Cond) |
 
 
 <p align="center">

From b8a5b7f9bbe9c182ac70685005ef1662fd6d118b Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Thu, 5 Oct 2017 23:43:58 -0700
Subject: [PATCH 22/82] gan api

---
 doc/design/gan_api.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 14e34a9839..d35309079f 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -10,8 +10,8 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to
 |---------------------------|-------------------|----------|
 | convolution 2d (done)     | Chengduo          | Y        |
 | cudnn conv 2d (missing)   | Chengduo          | N        |
-| deconv 2d (missing)       | Zhuoyuan          | Y        |
-| cudnn deconv 2d (missing) | Zhuoyuan          | N        |
+| deconv 2d (missing)       | Zhuoyuan, Zhihong | Y        |
+| cudnn deconv 2d (missing) | Zhuoyuan, Zhihong | N        |
 | batch norm (missing)      | Zhuoyuan, Jiayi   | Y        |
 | cudnn batch norm (missing)| Zhuoyuan, Jiayi   | N        |
 | max-pooling (done)        | ?                 | Y        |

From 564b8c6cede75f844ba238a4573a6514d899a90d Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Fri, 6 Oct 2017 16:07:57 -0700
Subject: [PATCH 23/82] gan api

---
 doc/design/gan_api.md | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index d35309079f..9864e8b7de 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -19,12 +19,12 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to
 | softmax loss (done)       | ?                 | Y        |
 | reshape op (done)         | ?                 | Y        |
 | Dependency Engine (done)  | Jiayi             | Y *      |
-| Python API (done)         | Jiayi             | Y *      |
+| Python API (done)         | Longfei, Jiayi    | Y *      |
 | Executor (done)           | Tony              | Y *      |
-| Multi optimizer           | ?                 | Y *      |
+| Multi optimizer (woking)  | Longfei           | Y *      |
 | Optimizer with any para   | ?                 | Y *      |
-| Concat op                 | ?                 | N (Cond) |
-| Repmat op                 | ?                 | N (Cond) |
+| Concat op (done)          | ?                 | N (Cond) |
+| Repmat op (done)          | ?                 | N (Cond) |
 
 
 <p align="center">
@@ -91,7 +91,8 @@ class DCGAN(object):
 - Concatenation, batch-norm, FC operations required;
 - Deconv layer required, which is missing now...
 ```python
-def generator(self, z, y = None):
+class DCGAN(object):
+  def generator(self, z, y = None):
     # input z: the random noise
     # input y: input data label (optional)
     # output G_im: generated fake images
@@ -116,7 +117,8 @@ def generator(self, z, y = None):
 - Given a noisy input z, returns a fake image.
 - Concatenation, Convolution, batch-norm, FC, Leaky-ReLU operations required;
 ```python
-def discriminator(self, image):
+class DCGAN(object):
+  def discriminator(self, image):
     # input image: either generated images or real ones
     # output D_h2: binary logit of the label
 
@@ -137,8 +139,8 @@ def discriminator(self, image):
 - Build generator and discriminators;
 - Define two training losses for discriminator and generator, respectively. 
 ```python
-def build_model(self):
-
+class DCGAN(object):
+  def build_model(self):
     # input data
     if self.y_dim:
         self.y = pd.data(pd.float32, [self.batch_size, self.y_dim])

From 806796cea3e8bad3706f82bb47073a2313b09f3e Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Fri, 6 Oct 2017 16:10:30 -0700
Subject: [PATCH 24/82] gan api

---
 doc/design/gan_api.md | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 9864e8b7de..d0f8b47ca3 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -98,18 +98,18 @@ class DCGAN(object):
     # output G_im: generated fake images
     
     if not self.y_dim:
-      z = pd.concat(1, [z, y])
+      z = pd.layer.concat(1, [z, y])
       
-    G_h0 = pd.fc(z, self.G_w0, self.G_b0)
-    G_h0_bn = pd.batch_norm(G_h0)
-    G_h0_relu = pd.relu(G_h0_bn)
+    G_h0 = pd.layer.fc(z, self.G_w0, self.G_b0)
+    G_h0_bn = pd.layer.batch_norm(G_h0)
+    G_h0_relu = pd.layer.relu(G_h0_bn)
     
-    G_h1 = pd.deconv(G_h0_relu, self.G_w1, self.G_b1)
-    G_h1_bn = pd.batch_norm(G_h1)
-    G_h1_relu = pd.relu(G_h1_bn)
+    G_h1 = pd.layer.deconv(G_h0_relu, self.G_w1, self.G_b1)
+    G_h1_bn = pd.layer.batch_norm(G_h1)
+    G_h1_relu = pd.layer.relu(G_h1_bn)
     
-    G_h2 = pd.deconv(G_h1_relu, self.G_W2, self.G_b2))
-    G_im = pd.tanh(G_im)
+    G_h2 = pd.layer.deconv(G_h1_relu, self.G_W2, self.G_b2))
+    G_im = pd.layer.tanh(G_im)
     return G_im
 ```
 
@@ -122,15 +122,15 @@ class DCGAN(object):
     # input image: either generated images or real ones
     # output D_h2: binary logit of the label
 
-    D_h0 = pd.conv2d(image, self.D_w0, self.D_b0)
-    D_h0_bn = pd.batchnorm(h0)
-    D_h0_relu = pd.lrelu(h0_bn)
+    D_h0 = pd.layer.conv2d(image, w=self.D_w0, b=self.D_b0)
+    D_h0_bn = pd.layer.batchnorm(h0)
+    D_h0_relu = pd.layer.lrelu(h0_bn)
     
-    D_h1 = pd.conv2d(D_h0_relu, self.D_w1, self.D_b1)
-    D_h1_bn = pd.batchnorm(D_h1)
-    D_h1_relu = pd.lrelu(D_h1_bn)
+    D_h1 = pd.layer.conv2d(D_h0_relu, w=self.D_w1, b=self.D_b1)
+    D_h1_bn = pd.layer.batchnorm(D_h1)
+    D_h1_relu = pd.layer.lrelu(D_h1_bn)
     
-    D_h2 = pd.fc(D_h1_relu, self.D_w2, self.D_b2)
+    D_h2 = pd.layer.fc(D_h1_relu, w=self.D_w2, b=self.D_b2)
     return D_h2
 ```
 

From a0767228bd70ff8809a71dc4f9273d9dfac2aa46 Mon Sep 17 00:00:00 2001
From: qiaolongfei <qiaolongfei@baidu.com>
Date: Sat, 7 Oct 2017 00:39:08 -0700
Subject: [PATCH 25/82] merge InferShapeContext and ExecutionContext

---
 paddle/framework/operator.cc |  8 +++---
 paddle/framework/operator.h  | 49 +++++++++++++++---------------------
 2 files changed, 24 insertions(+), 33 deletions(-)

diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc
index 2ca838f838..2fca816f35 100644
--- a/paddle/framework/operator.cc
+++ b/paddle/framework/operator.cc
@@ -205,13 +205,13 @@ void OperatorBase::GenerateTemporaryNames() {
 }
 
 template <>
-const Tensor* InferShapeContext::Input<Tensor>(const std::string& name) const {
+const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
   auto* var = InputVar(name);
   return var == nullptr ? nullptr : GetTensorFromVar(var);
 }
 
 template <>
-const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
+const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
     const std::string& name) const {
   auto names = op().Inputs(name);
   std::vector<const Tensor*> res;
@@ -225,13 +225,13 @@ const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
 }
 
 template <>
-Tensor* InferShapeContext::Output<Tensor>(const std::string& name) const {
+Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
   auto var = OutputVar(name);
   return var == nullptr ? nullptr : var->GetMutable<LoDTensor>();
 }
 
 template <>
-std::vector<Tensor*> InferShapeContext::MultiOutput<Tensor>(
+std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
     const std::string& name) const {
   auto names = op().Outputs(name);
   std::vector<Tensor*> res;
diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h
index d7bc9c9ffb..af8989dc4f 100644
--- a/paddle/framework/operator.h
+++ b/paddle/framework/operator.h
@@ -57,7 +57,6 @@ inline std::string GradVarName(const std::string& var_name) {
 }
 
 class OperatorBase;
-class InferShapeContext;
 class ExecutionContext;
 
 extern const Tensor* GetTensorFromVar(const Variable* var);
@@ -169,10 +168,11 @@ class NOP : public OperatorBase {
   }
 };
 
-class InferShapeContext {
+class ExecutionContext {
  public:
-  InferShapeContext(const OperatorBase& op, const Scope& scope)
-      : op_(op), scope_(scope) {}
+  ExecutionContext(const OperatorBase& op, const Scope& scope,
+                   const platform::DeviceContext& device_context)
+      : op_(op), scope_(scope), device_context_(device_context) {}
 
   const OperatorBase& op() const { return op_; }
 
@@ -278,31 +278,6 @@ class InferShapeContext {
     out_tensor->set_lod(in_tensor.lod());
   }
 
- private:
-  const OperatorBase& op_;
-  const Scope& scope_;
-};
-
-template <>
-const Tensor* InferShapeContext::Input<Tensor>(const std::string& name) const;
-
-template <>
-const std::vector<const Tensor*> InferShapeContext::MultiInput<Tensor>(
-    const std::string& name) const;
-
-template <>
-Tensor* InferShapeContext::Output<Tensor>(const std::string& name) const;
-
-template <>
-std::vector<Tensor*> InferShapeContext::MultiOutput<Tensor>(
-    const std::string& name) const;
-
-class ExecutionContext : public InferShapeContext {
- public:
-  ExecutionContext(const OperatorBase& op, const Scope& scope,
-                   const platform::DeviceContext& device_context)
-      : InferShapeContext(op, scope), device_context_(device_context) {}
-
   template <typename PlaceType,
             typename DeviceType = typename platform::EigenDeviceConverter<
                 PlaceType>::EigenDeviceType>
@@ -315,9 +290,25 @@ class ExecutionContext : public InferShapeContext {
   }
 
  private:
+  const OperatorBase& op_;
+  const Scope& scope_;
   const platform::DeviceContext& device_context_;
 };
 
+template <>
+const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;
+
+template <>
+const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
+    const std::string& name) const;
+
+template <>
+Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;
+
+template <>
+std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
+    const std::string& name) const;
+
 class CompileTimeInferShapeContext : public InferShapeContextBase {
  public:
   CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block)

From c0a34e1c645381bbf4c80d13b257f62c828662f7 Mon Sep 17 00:00:00 2001
From: qiaolongfei <qiaolongfei@baidu.com>
Date: Sat, 7 Oct 2017 00:47:55 -0700
Subject: [PATCH 26/82] rename InferShapeContextBase to InferShapeContext

---
 paddle/framework/operator.h                              | 6 +++---
 paddle/framework/operator_test.cc                        | 2 +-
 paddle/framework/shape_inference.h                       | 6 +++---
 paddle/operators/accuracy_op.cc                          | 2 +-
 paddle/operators/activation_op.cc                        | 4 ++--
 paddle/operators/adadelta_op.cc                          | 2 +-
 paddle/operators/adagrad_op.cc                           | 2 +-
 paddle/operators/clip_op.cc                              | 4 ++--
 paddle/operators/concat_op.cc                            | 4 ++--
 paddle/operators/conv2d_op.cc                            | 4 ++--
 paddle/operators/cos_sim_op.cc                           | 4 ++--
 paddle/operators/crop_op.cc                              | 4 ++--
 paddle/operators/cross_entropy_op.cc                     | 4 ++--
 paddle/operators/dropout_op.cc                           | 4 ++--
 paddle/operators/elementwise_op.h                        | 4 ++--
 paddle/operators/fill_zeros_like_op.cc                   | 2 +-
 paddle/operators/gather_op.cc                            | 4 ++--
 paddle/operators/gaussian_random_op.cc                   | 2 +-
 paddle/operators/lookup_table_op.cc                      | 4 ++--
 paddle/operators/lstm_unit_op.cc                         | 4 ++--
 paddle/operators/mean_op.cc                              | 4 ++--
 paddle/operators/minus_op.cc                             | 2 +-
 paddle/operators/modified_huber_loss_op.cc               | 4 ++--
 paddle/operators/mul_op.cc                               | 4 ++--
 paddle/operators/multiplex_op.cc                         | 4 ++--
 paddle/operators/pad_op.cc                               | 4 ++--
 paddle/operators/pool_op.cc                              | 4 ++--
 paddle/operators/prelu_op.cc                             | 4 ++--
 paddle/operators/rank_loss_op.cc                         | 4 ++--
 paddle/operators/reduce_op.cc                            | 4 ++--
 paddle/operators/reshape_op.cc                           | 4 ++--
 paddle/operators/rmsprop_op.cc                           | 2 +-
 paddle/operators/scale_op.cc                             | 2 +-
 paddle/operators/scatter_op.cc                           | 4 ++--
 paddle/operators/sequence_pool_op.cc                     | 4 ++--
 paddle/operators/sequence_softmax_op.cc                  | 4 ++--
 paddle/operators/sgd_op.cc                               | 2 +-
 paddle/operators/sigmoid_cross_entropy_with_logits_op.cc | 4 ++--
 paddle/operators/smooth_l1_loss_op.cc                    | 4 ++--
 paddle/operators/softmax_op.cc                           | 4 ++--
 paddle/operators/softmax_with_cross_entropy_op.cc        | 4 ++--
 paddle/operators/split_op.cc                             | 2 +-
 paddle/operators/squared_l2_distance_op.cc               | 4 ++--
 paddle/operators/sum_op.cc                               | 2 +-
 paddle/operators/top_k_op.cc                             | 2 +-
 paddle/operators/transpose_op.cc                         | 4 ++--
 paddle/operators/uniform_random_op.cc                    | 2 +-
 47 files changed, 82 insertions(+), 82 deletions(-)

diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h
index af8989dc4f..1e9ace9987 100644
--- a/paddle/framework/operator.h
+++ b/paddle/framework/operator.h
@@ -309,7 +309,7 @@ template <>
 std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
     const std::string& name) const;
 
-class CompileTimeInferShapeContext : public InferShapeContextBase {
+class CompileTimeInferShapeContext : public InferShapeContext {
  public:
   CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block)
       : op_(op), block_(block) {}
@@ -405,7 +405,7 @@ class CompileTimeInferShapeContext : public InferShapeContextBase {
   const BlockDescBind& block_;
 };
 
-class RuntimeInferShapeContext : public InferShapeContextBase {
+class RuntimeInferShapeContext : public InferShapeContext {
  public:
   RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
       : op_(op), scope_(scope) {}
@@ -603,7 +603,7 @@ class OperatorWithKernel : public OperatorBase {
                        });
   }
 
-  virtual void InferShape(InferShapeContextBase* ctx) const = 0;
+  virtual void InferShape(InferShapeContext* ctx) const = 0;
 
  protected:
   // indicate kernel DataType by input data. Defaultly all input data must be
diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc
index a0c17b41f2..a02f4668bc 100644
--- a/paddle/framework/operator_test.cc
+++ b/paddle/framework/operator_test.cc
@@ -113,7 +113,7 @@ class OpWithKernelTest : public OperatorWithKernel {
   using OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {}
+  void InferShape(framework::InferShapeContext* ctx) const override {}
   DataType IndicateDataType(const ExecutionContext& ctx) const override {
     return DataType::FP32;
   }
diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h
index 74e0371e32..64aab16ae5 100644
--- a/paddle/framework/shape_inference.h
+++ b/paddle/framework/shape_inference.h
@@ -20,11 +20,11 @@ namespace paddle {
 namespace framework {
 
 // TODO(longfei): Once after both CompileTimeInferShapeContext and
-// RuntimeInferShapeContext get merged, we can rename InferShapeContextBase into
+// RuntimeInferShapeContext get merged, we can rename InferShapeContext into
 // InferShapeContext so to replace the current InferShapeContext.
-class InferShapeContextBase {
+class InferShapeContext {
  public:
-  virtual ~InferShapeContextBase() {}
+  virtual ~InferShapeContext() {}
   virtual bool HasInput(const std::string &name) const = 0;
   virtual bool HasOutput(const std::string &name) const = 0;
 
diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc
index 82010bfb53..c5fb113e0f 100644
--- a/paddle/operators/accuracy_op.cc
+++ b/paddle/operators/accuracy_op.cc
@@ -22,7 +22,7 @@ class AccuracyOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Inference"),
                    "Input(Inference) of AccuracyOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Label"),
diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc
index 66e9d2c401..5df875cd61 100644
--- a/paddle/operators/activation_op.cc
+++ b/paddle/operators/activation_op.cc
@@ -22,7 +22,7 @@ class ActivationOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     ctx->SetOutputDim("Y", ctx->GetInputDim("X"));
     ctx->ShareLoD("X", /*->*/ "Y");
   }
@@ -33,7 +33,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Y"));
   }
 };
diff --git a/paddle/operators/adadelta_op.cc b/paddle/operators/adadelta_op.cc
index bd8c93b4a1..cf1bca1658 100644
--- a/paddle/operators/adadelta_op.cc
+++ b/paddle/operators/adadelta_op.cc
@@ -22,7 +22,7 @@ class AdadeltaOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Param"),
                    "Input(Param) of AdadeltaOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Grad"),
diff --git a/paddle/operators/adagrad_op.cc b/paddle/operators/adagrad_op.cc
index ea2ff3c503..a17747efb7 100644
--- a/paddle/operators/adagrad_op.cc
+++ b/paddle/operators/adagrad_op.cc
@@ -22,7 +22,7 @@ class AdagradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Param"),
                    "Input(Param) of AdagradOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Grad"),
diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc
index b3dd060fd7..3e9b0d82ba 100644
--- a/paddle/operators/clip_op.cc
+++ b/paddle/operators/clip_op.cc
@@ -22,7 +22,7 @@ class ClipOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of ClipOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -61,7 +61,7 @@ class ClipOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Input(Out@GRAD) should not be null");
diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc
index 1ffa02c8f9..235c4449ac 100644
--- a/paddle/operators/concat_op.cc
+++ b/paddle/operators/concat_op.cc
@@ -24,7 +24,7 @@ class ConcatOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL,
                       "Inputs(X) of ConcatOp should be empty.")
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -83,7 +83,7 @@ class ConcatOpGrad : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X"));
   }
 };
diff --git a/paddle/operators/conv2d_op.cc b/paddle/operators/conv2d_op.cc
index 5cc82944bb..6325d4248f 100644
--- a/paddle/operators/conv2d_op.cc
+++ b/paddle/operators/conv2d_op.cc
@@ -27,7 +27,7 @@ class Conv2DOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Input"),
                    "Input(Input) of Conv2DOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Filter"),
@@ -106,7 +106,7 @@ class Conv2DOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     auto in_dims = ctx->GetInputDim("Input");
     auto filter_dims = ctx->GetInputDim("Filter");
     if (ctx->HasOutput(framework::GradVarName("Input"))) {
diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc
index 040546f1a6..2b4c4b9c45 100644
--- a/paddle/operators/cos_sim_op.cc
+++ b/paddle/operators/cos_sim_op.cc
@@ -24,7 +24,7 @@ class CosSimOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     // notnull check
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of CosSimOp should not be null.");
@@ -98,7 +98,7 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     // notnull check
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) must not be null.");
diff --git a/paddle/operators/crop_op.cc b/paddle/operators/crop_op.cc
index 9b2305e90e..a1424993cc 100644
--- a/paddle/operators/crop_op.cc
+++ b/paddle/operators/crop_op.cc
@@ -25,7 +25,7 @@ class CropOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of CropOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -115,7 +115,7 @@ class CropOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Input(Out@GRAD) should not be null");
diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc
index 4b67887f36..708e80e96a 100644
--- a/paddle/operators/cross_entropy_op.cc
+++ b/paddle/operators/cross_entropy_op.cc
@@ -22,7 +22,7 @@ class CrossEntropyOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
     PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
     PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null.");
@@ -60,7 +60,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
     PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")),
diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc
index a669b5cf00..708ccfa0bf 100644
--- a/paddle/operators/dropout_op.cc
+++ b/paddle/operators/dropout_op.cc
@@ -24,7 +24,7 @@ class DropoutOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
     PADDLE_ENFORCE_GE(ctx->Attrs().Get<float>("dropout_prob"), 0);
     PADDLE_ENFORCE_LE(ctx->Attrs().Get<float>("dropout_prob"), 1);
@@ -70,7 +70,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE_EQ(ctx->Attrs().Get<bool>("is_training"), 1,
                       "GradOp is only callable when is_training is true");
 
diff --git a/paddle/operators/elementwise_op.h b/paddle/operators/elementwise_op.h
index 3082f37422..66f1910a47 100644
--- a/paddle/operators/elementwise_op.h
+++ b/paddle/operators/elementwise_op.h
@@ -25,7 +25,7 @@ class ElementwiseOp : public framework::OperatorWithKernel {
 
  protected:
   using Tensor = framework::Tensor;
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of elementwise op should not be null");
     PADDLE_ENFORCE(ctx->HasInput("Y"),
@@ -106,7 +106,7 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel {
   using Tensor = framework::Tensor;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc
index e164de6584..4c70b9a36b 100644
--- a/paddle/operators/fill_zeros_like_op.cc
+++ b/paddle/operators/fill_zeros_like_op.cc
@@ -22,7 +22,7 @@ class FillZerosLikeOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of FillZerosLikeOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Y"),
diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc
index fe305337cb..fb99c6c016 100644
--- a/paddle/operators/gather_op.cc
+++ b/paddle/operators/gather_op.cc
@@ -23,7 +23,7 @@ class GatherOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of GatherOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Index"),
@@ -51,7 +51,7 @@ class GatherGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
   }
 
diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc
index 5cd2c7d2c0..ca7fb38505 100644
--- a/paddle/operators/gaussian_random_op.cc
+++ b/paddle/operators/gaussian_random_op.cc
@@ -43,7 +43,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
                    "Output(Out) of GaussianRandomOp should not be null.");
     auto dims = ctx->Attrs().Get<std::vector<int>>("dims");
diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc
index 929008fbcb..3f8d4ab857 100644
--- a/paddle/operators/lookup_table_op.cc
+++ b/paddle/operators/lookup_table_op.cc
@@ -22,7 +22,7 @@ class LookupTableOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("W"),
                    "Input(W) of LookupTableOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Ids"),
@@ -70,7 +70,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     auto table_dims = ctx->GetInputDim("W");
     ctx->SetOutputDim(framework::GradVarName("W"), table_dims);
   }
diff --git a/paddle/operators/lstm_unit_op.cc b/paddle/operators/lstm_unit_op.cc
index dad56731de..13a45ec246 100644
--- a/paddle/operators/lstm_unit_op.cc
+++ b/paddle/operators/lstm_unit_op.cc
@@ -22,7 +22,7 @@ class LstmUnitOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LSTM should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("C_prev"),
                    "Input(C_prev) of LSTM should not be null.");
@@ -77,7 +77,7 @@ class LstmUnitGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("C")),
                    "Input(C@GRAD) should not be null");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("H")),
diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc
index 2332c9546b..441543049f 100644
--- a/paddle/operators/mean_op.cc
+++ b/paddle/operators/mean_op.cc
@@ -22,7 +22,7 @@ class MeanOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of MeanOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -47,7 +47,7 @@ class MeanGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
   }
 };
diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc
index 7057dcbd6e..d7fd2f901b 100644
--- a/paddle/operators/minus_op.cc
+++ b/paddle/operators/minus_op.cc
@@ -26,7 +26,7 @@ class MinusOp : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of MinusOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Y"),
diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/operators/modified_huber_loss_op.cc
index 84212a2b3b..6522327fdc 100644
--- a/paddle/operators/modified_huber_loss_op.cc
+++ b/paddle/operators/modified_huber_loss_op.cc
@@ -22,7 +22,7 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized.");
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized.");
 
@@ -74,7 +74,7 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized.");
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized.");
     PADDLE_ENFORCE(ctx->HasInput("IntermediateVal"),
diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc
index 3c8fe04d2e..ec0683d887 100644
--- a/paddle/operators/mul_op.cc
+++ b/paddle/operators/mul_op.cc
@@ -24,7 +24,7 @@ class MulOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of MulOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of MulOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -97,7 +97,7 @@ class MulOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
diff --git a/paddle/operators/multiplex_op.cc b/paddle/operators/multiplex_op.cc
index a069127a19..a86685b6dd 100644
--- a/paddle/operators/multiplex_op.cc
+++ b/paddle/operators/multiplex_op.cc
@@ -24,7 +24,7 @@ class MultiplexOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) shouldn't be null.");
     PADDLE_ENFORCE(!ctx->Inputs("X").empty(),
                    "MultiInput(X) shouldn't be empty.");
@@ -90,7 +90,7 @@ class MultiplexGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(!ctx->Inputs("X").empty(), "Input(X) should not be null.");
     PADDLE_ENFORCE(!ctx->Outputs(framework::GradVarName("X")).empty(),
                    "Output(X@Grad) should not be null.");
diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc
index 15aa05f266..2f26ada85e 100644
--- a/paddle/operators/pad_op.cc
+++ b/paddle/operators/pad_op.cc
@@ -24,7 +24,7 @@ class PadOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of PadOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
                    "Output(Out) of PadOp should not be null.");
@@ -98,7 +98,7 @@ class PadOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Input(Out@GRAD) should not be null");
diff --git a/paddle/operators/pool_op.cc b/paddle/operators/pool_op.cc
index c29f51f056..ba3b5ed207 100644
--- a/paddle/operators/pool_op.cc
+++ b/paddle/operators/pool_op.cc
@@ -27,7 +27,7 @@ class PoolOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "X(Input) of Pooling should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -74,7 +74,7 @@ class PoolOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "X(Input) of Pooling should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc
index 1692464f28..166fe26824 100644
--- a/paddle/operators/prelu_op.cc
+++ b/paddle/operators/prelu_op.cc
@@ -26,7 +26,7 @@ class PReluOp : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasInput("Alpha"), "Input(Alpha) should not be null");
     PADDLE_ENFORCE(product(ctx->GetInputDim("Alpha")) == 1,
@@ -63,7 +63,7 @@ class PReluGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Input(Out@GRAD) should not be null");
diff --git a/paddle/operators/rank_loss_op.cc b/paddle/operators/rank_loss_op.cc
index 1ba22006f2..e0abbc4db1 100644
--- a/paddle/operators/rank_loss_op.cc
+++ b/paddle/operators/rank_loss_op.cc
@@ -25,7 +25,7 @@ class RankLossOp : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     // input check
     PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null");
     PADDLE_ENFORCE(ctx->HasInput("Left"), "Input(Left) shouldn't be null");
@@ -90,7 +90,7 @@ class RankLossGradOp : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null.");
     PADDLE_ENFORCE(ctx->HasInput("Left"), "Input(Left) shouldn't be null.");
     PADDLE_ENFORCE(ctx->HasInput("Right"), "Input(Right) shouldn't be null.");
diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc
index 3ef443d1c7..12081ee6f0 100644
--- a/paddle/operators/reduce_op.cc
+++ b/paddle/operators/reduce_op.cc
@@ -24,7 +24,7 @@ class ReduceOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of ReduceOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -58,7 +58,7 @@ class ReduceGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Input(Out@GRAD) should not be null.");
diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc
index a3c3fa2716..3cd54930a0 100644
--- a/paddle/operators/reshape_op.cc
+++ b/paddle/operators/reshape_op.cc
@@ -26,7 +26,7 @@ class ReshapeOp : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     // input check
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of ReshapeOp should not be null.");
@@ -94,7 +94,7 @@ class ReshapeGradOp : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null.");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Input(Out@GRAD) shouldn't be null.");
diff --git a/paddle/operators/rmsprop_op.cc b/paddle/operators/rmsprop_op.cc
index 8f61c7fdda..ada6f2bc3c 100644
--- a/paddle/operators/rmsprop_op.cc
+++ b/paddle/operators/rmsprop_op.cc
@@ -22,7 +22,7 @@ class RmspropOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Param"),
                    "Input(Param) of RmspropOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("MeanSquare"),
diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc
index e225aecc27..ac297da6b7 100644
--- a/paddle/operators/scale_op.cc
+++ b/paddle/operators/scale_op.cc
@@ -26,7 +26,7 @@ class ScaleOp : public framework::OperatorWithKernel {
       : OperatorWithKernel(type, inputs, outputs, attrs) {}
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of ScaleOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc
index d15ba15153..fbea01a8db 100644
--- a/paddle/operators/scatter_op.cc
+++ b/paddle/operators/scatter_op.cc
@@ -23,7 +23,7 @@ class ScatterOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Ref"),
                    "Input(Ref) of ScatterOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Index"),
@@ -60,7 +60,7 @@ class ScatterGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     ctx->SetOutputDim(framework::GradVarName("Updates"),
                       ctx->GetInputDim("Updates"));
     ctx->SetOutputDim(framework::GradVarName("Ref"), ctx->GetInputDim("Ref"));
diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/operators/sequence_pool_op.cc
index bc4af2f704..06c00d31ea 100644
--- a/paddle/operators/sequence_pool_op.cc
+++ b/paddle/operators/sequence_pool_op.cc
@@ -22,7 +22,7 @@ class SequencePoolOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of SequencePoolOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -74,7 +74,7 @@ class SequencePoolGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Gradient of Out should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("X"), "The input X should not be null.");
diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/operators/sequence_softmax_op.cc
index 621779ab61..ea217ba459 100644
--- a/paddle/operators/sequence_softmax_op.cc
+++ b/paddle/operators/sequence_softmax_op.cc
@@ -22,7 +22,7 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of SequenceSoftmaxOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -67,7 +67,7 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Out"),
                    "Input(Out) of SequenceSoftmaxGradOp should not be null.");
     PADDLE_ENFORCE(
diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc
index 31d491f130..2a6a162a02 100644
--- a/paddle/operators/sgd_op.cc
+++ b/paddle/operators/sgd_op.cc
@@ -22,7 +22,7 @@ class SGDOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Param"),
                    "Input(Param) of SGDOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Grad"),
diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc
index ede458e011..b6653e1cc7 100644
--- a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc
+++ b/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc
@@ -24,7 +24,7 @@ class SigmoidCrossEntropyWithLogitsOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
     PADDLE_ENFORCE(ctx->HasInput("Labels"),
                    "Input(Labels) should be not null.");
@@ -53,7 +53,7 @@ class SigmoidCrossEntropyWithLogitsGradOp
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
     PADDLE_ENFORCE(ctx->HasInput("Labels"),
                    "Input(Labels) should be not null.");
diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/operators/smooth_l1_loss_op.cc
index 2d197e3b1b..91391dc945 100644
--- a/paddle/operators/smooth_l1_loss_op.cc
+++ b/paddle/operators/smooth_l1_loss_op.cc
@@ -22,7 +22,7 @@ class SmoothL1LossOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized.");
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized.");
 
@@ -94,7 +94,7 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     auto in_dims = ctx->GetInputDim("X");
     auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
 
diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc
index e353afee3e..4c131ed44d 100644
--- a/paddle/operators/softmax_op.cc
+++ b/paddle/operators/softmax_op.cc
@@ -22,7 +22,7 @@ class SoftmaxOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of SoftmaxOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Y"),
@@ -69,7 +69,7 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null.");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")),
                    "Input(Y@GRAD) should be not null.");
diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc
index 42c1ba6fdf..5431a1657c 100644
--- a/paddle/operators/softmax_with_cross_entropy_op.cc
+++ b/paddle/operators/softmax_with_cross_entropy_op.cc
@@ -83,7 +83,7 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Logits"),
                    "Input(Logits) should be not null.");
     PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null.");
@@ -128,7 +128,7 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")),
                    "Input(Loss@Grad) should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Softmax"),
diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc
index 5f4b5539af..d5dd4df2a2 100644
--- a/paddle/operators/split_op.cc
+++ b/paddle/operators/split_op.cc
@@ -24,7 +24,7 @@ class SplitOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of SplitOp should not be null.");
     PADDLE_ENFORCE_GE(ctx->Outputs("Out").size(), 1UL,
diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc
index 5a0cb59600..cce4e527c3 100644
--- a/paddle/operators/squared_l2_distance_op.cc
+++ b/paddle/operators/squared_l2_distance_op.cc
@@ -22,7 +22,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of SquaredL2DistanceOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Y"),
@@ -86,7 +86,7 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Gradient of Out should not be null");
     auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc
index c701ee8dde..ffb0cb9211 100644
--- a/paddle/operators/sum_op.cc
+++ b/paddle/operators/sum_op.cc
@@ -22,7 +22,7 @@ class SumOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null");
     auto x_dims = ctx->GetInputsDim("X");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc
index 5f22bf1df8..c954819912 100644
--- a/paddle/operators/top_k_op.cc
+++ b/paddle/operators/top_k_op.cc
@@ -22,7 +22,7 @@ class TopkOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "Input(X) of TopkOp should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
diff --git a/paddle/operators/transpose_op.cc b/paddle/operators/transpose_op.cc
index 0672f9342d..1101bbe3ef 100644
--- a/paddle/operators/transpose_op.cc
+++ b/paddle/operators/transpose_op.cc
@@ -24,7 +24,7 @@ class TransposeOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null");
     auto x_dims = ctx->GetInputDim("X");
@@ -93,7 +93,7 @@ class TransposeOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
     PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                    "Input(Out@GRAD) should not be null");
diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc
index 97b1d0bed4..e330877fc4 100644
--- a/paddle/operators/uniform_random_op.cc
+++ b/paddle/operators/uniform_random_op.cc
@@ -47,7 +47,7 @@ class UniformRandomOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase* ctx) const override {
+  void InferShape(framework::InferShapeContext* ctx) const override {
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
                    "Output(Out) of UniformRandomOp should not be null.");
 

From adad8d9ed2cd722e6ac45b18596099b31fdb9929 Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Mon, 9 Oct 2017 11:20:09 +0800
Subject: [PATCH 27/82] Open WITH_TESTING option.

---
 CMakeLists.txt                                |  4 --
 paddle/capi/tests/CMakeLists.txt              | 17 ++---
 paddle/gserver/tests/CMakeLists.txt           | 70 +++++++++++--------
 paddle/gserver/tests/LayerGradUtil.h          |  1 -
 paddle/gserver/tests/test_ActivationGrad.cpp  |  1 -
 paddle/gserver/tests/test_BatchNorm.cpp       |  1 -
 paddle/gserver/tests/test_CRFLayerGrad.cpp    |  1 -
 paddle/gserver/tests/test_ConvTrans.cpp       |  1 -
 paddle/gserver/tests/test_ConvUnify.cpp       |  1 -
 .../tests/test_CrossEntropyOverBeamGrad.cpp   |  1 -
 paddle/gserver/tests/test_KmaxSeqScore.cpp    |  1 -
 paddle/gserver/tests/test_LayerGrad.cpp       |  1 -
 .../gserver/tests/test_SelectiveFCLayer.cpp   |  1 -
 .../gserver/tests/test_SeqSliceLayerGrad.cpp  |  1 -
 14 files changed, 48 insertions(+), 54 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7d549b864b..4783095194 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -94,10 +94,6 @@ if(ANDROID OR IOS)
     endif()
     set(MOBILE_INFERENCE ON)
     add_definitions(-DPADDLE_MOBILE_INFERENCE)
-
-    # TODO: Need Open the WITH_TESTING
-    set(WITH_TESTING OFF CACHE STRING "Disable TESTING when cross-compiling
-                    for Android and iOS" FORCE)
 endif()
 
 set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/capi/tests/CMakeLists.txt
index 8208808b94..bb38ace628 100644
--- a/paddle/capi/tests/CMakeLists.txt
+++ b/paddle/capi/tests/CMakeLists.txt
@@ -4,11 +4,12 @@ add_unittest(capi_test_mats test_Vector.cpp
 target_include_directories(capi_test_mats PUBLIC ${PADDLE_CAPI_INC_PATH})
 target_link_libraries(capi_test_mats paddle_capi)
 
-
-add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp)
-target_include_directories(capi_test_gradientMachine PUBLIC
-  ${PADDLE_CAPI_INC_PATH})
-target_link_libraries(capi_test_gradientMachine paddle_capi)
-add_test(NAME capi_test_gradientMachine
-  COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine
-  WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests)
+if(NOT MOBILE_INFERENCE)
+    add_unittest_without_exec(capi_test_gradientMachine test_GradientMachine.cpp)
+    target_include_directories(capi_test_gradientMachine PUBLIC
+      ${PADDLE_CAPI_INC_PATH})
+    target_link_libraries(capi_test_gradientMachine paddle_capi)
+    add_test(NAME capi_test_gradientMachine
+      COMMAND ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/capi_test_gradientMachine
+      WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle/capi/tests)
+endif()
diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt
index de9b8e63df..fcee19415c 100644
--- a/paddle/gserver/tests/CMakeLists.txt
+++ b/paddle/gserver/tests/CMakeLists.txt
@@ -1,15 +1,17 @@
 # gserver pacakge unittests
 
+if(NOT MOBILE_INFERENCE)
 ################### test_ProtoDataProvider ############
-add_unittest_without_exec(test_ProtoDataProvider
-    test_ProtoDataProvider.cpp)
-
-# test_ProtoDataProvider will mkdir as same name,
-# so if WORKING_DIRECTORY is default directory, then
-# mkdir will get error.
-add_test(NAME test_ProtoDataProvider
-    COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider
-    WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
+    add_unittest_without_exec(test_ProtoDataProvider
+        test_ProtoDataProvider.cpp)
+
+    # test_ProtoDataProvider will mkdir as same name,
+    # so if WORKING_DIRECTORY is default directory, then
+    # mkdir will get error.
+    add_test(NAME test_ProtoDataProvider
+        COMMAND ${CMAKE_CURRENT_BINARY_DIR}/test_ProtoDataProvider
+        WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
+endif()
 
 ################# test_LayerGrad #######################
 add_unittest_without_exec(test_LayerGrad
@@ -98,9 +100,11 @@ add_unittest_without_exec(test_KmaxSeqScore
 add_test(NAME test_KmaxSeqScore
     COMMAND test_KmaxSeqScore)
 
+if(NOT MOBILE_INFERENCE)
 ################## test_Evaluator #######################
-add_unittest(test_Evaluator
-    test_Evaluator.cpp)
+    add_unittest(test_Evaluator
+        test_Evaluator.cpp)
+endif()
 
 ################ test_LinearChainCRF ####################
 add_simple_unittest(test_LinearChainCRF)
@@ -131,27 +135,31 @@ if(NOT WITH_DOUBLE)
         WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
 endif()
 
+if(NOT MOBILE_INFERENCE)
 ############### test_RecurrentGradientMachine ###############
-# TODO(yuyang18): There is some bug in test_RecurrentGradientMachine
-# I will fix it.
-add_unittest_without_exec(test_RecurrentGradientMachine
-    test_RecurrentGradientMachine.cpp)
-add_test(NAME test_RecurrentGradientMachine
-    COMMAND .set_python_path.sh -d
-            ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests
-            ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine
-    WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
-
-add_unittest_without_exec(test_NetworkCompare
-    test_NetworkCompare.cpp)
-if(WITH_GPU)
-    add_test(NAME test_NetworkCompare
-        COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true
-        WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
-else()
-    add_test(NAME test_NetworkCompare
-        COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false
-        WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
+  # TODO(yuyang18): There is some bug in test_RecurrentGradientMachine
+  # I will fix it.
+  add_unittest_without_exec(test_RecurrentGradientMachine
+      test_RecurrentGradientMachine.cpp)
+  add_test(NAME test_RecurrentGradientMachine
+      COMMAND .set_python_path.sh -d
+              ${PADDLE_SOURCE_DIR}/python:${PADDLE_SOURCE_DIR}/paddle/gserver/tests
+              ${CMAKE_CURRENT_BINARY_DIR}/test_RecurrentGradientMachine
+      WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
+endif()
+
+if(NOT MOBILE_INFERENCE)
+    add_unittest_without_exec(test_NetworkCompare
+        test_NetworkCompare.cpp)
+    if(WITH_GPU)
+        add_test(NAME test_NetworkCompare
+            COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=true
+            WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
+    else()
+        add_test(NAME test_NetworkCompare
+            COMMAND .set_python_path.sh -d ${PADDLE_SOURCE_DIR}/python ${CMAKE_CURRENT_BINARY_DIR}/test_NetworkCompare --use_gpu=false
+            WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}/paddle)
+    endif()
 endif()
 
 
diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h
index 88e831f78b..e10a27eedf 100644
--- a/paddle/gserver/tests/LayerGradUtil.h
+++ b/paddle/gserver/tests/LayerGradUtil.h
@@ -15,7 +15,6 @@ limitations under the License. */
 #pragma once
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
-#include "paddle/trainer/Trainer.h"
 
 #include "paddle/testing/TestUtil.h"
 using namespace std;  // NOLINT
diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp
index de93972a58..f4c2a07c44 100644
--- a/paddle/gserver/tests/test_ActivationGrad.cpp
+++ b/paddle/gserver/tests/test_ActivationGrad.cpp
@@ -17,7 +17,6 @@ limitations under the License. */
 #include <vector>
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
-#include "paddle/trainer/Trainer.h"
 
 #include "LayerGradUtil.h"
 #include "paddle/testing/TestUtil.h"
diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp
index 659eefa31b..38bcbb880d 100644
--- a/paddle/gserver/tests/test_BatchNorm.cpp
+++ b/paddle/gserver/tests/test_BatchNorm.cpp
@@ -17,7 +17,6 @@ limitations under the License. */
 #include <vector>
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
-#include "paddle/trainer/Trainer.h"
 #include "paddle/utils/GlobalConstants.h"
 
 #include "LayerGradUtil.h"
diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp
index df14449291..f010066ebc 100644
--- a/paddle/gserver/tests/test_CRFLayerGrad.cpp
+++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp
@@ -16,7 +16,6 @@ limitations under the License. */
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
 #include "paddle/gserver/layers/LinearChainCRF.h"
-#include "paddle/trainer/Trainer.h"
 
 #include "LayerGradUtil.h"
 #include "paddle/testing/TestUtil.h"
diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp
index 6035a866b4..5f2f966547 100644
--- a/paddle/gserver/tests/test_ConvTrans.cpp
+++ b/paddle/gserver/tests/test_ConvTrans.cpp
@@ -18,7 +18,6 @@ limitations under the License. */
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
 #include "paddle/math/MathUtils.h"
-#include "paddle/trainer/Trainer.h"
 #include "paddle/utils/GlobalConstants.h"
 
 #include "LayerGradUtil.h"
diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp
index e7325e0cc3..bcc10a6197 100644
--- a/paddle/gserver/tests/test_ConvUnify.cpp
+++ b/paddle/gserver/tests/test_ConvUnify.cpp
@@ -18,7 +18,6 @@ limitations under the License. */
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
 #include "paddle/math/MathUtils.h"
-#include "paddle/trainer/Trainer.h"
 #include "paddle/utils/GlobalConstants.h"
 
 #include "LayerGradUtil.h"
diff --git a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
index c922237d33..477638426f 100644
--- a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
+++ b/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp
@@ -18,7 +18,6 @@ limitations under the License. */
 #include <gtest/gtest.h>
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
-#include "paddle/trainer/Trainer.h"
 
 #include "LayerGradUtil.h"
 #include "paddle/testing/TestUtil.h"
diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/gserver/tests/test_KmaxSeqScore.cpp
index 308abe6816..483e382f6d 100644
--- a/paddle/gserver/tests/test_KmaxSeqScore.cpp
+++ b/paddle/gserver/tests/test_KmaxSeqScore.cpp
@@ -18,7 +18,6 @@ limitations under the License. */
 #include <vector>
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
-#include "paddle/trainer/Trainer.h"
 #include "paddle/utils/GlobalConstants.h"
 
 #include "LayerGradUtil.h"
diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp
index 090bde7b20..876a935fb0 100644
--- a/paddle/gserver/tests/test_LayerGrad.cpp
+++ b/paddle/gserver/tests/test_LayerGrad.cpp
@@ -21,7 +21,6 @@ limitations under the License. */
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
 #include "paddle/math/MathUtils.h"
-#include "paddle/trainer/Trainer.h"
 
 #include "LayerGradUtil.h"
 #include "paddle/testing/TestUtil.h"
diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp
index ab23d00a2c..1da935dfc9 100644
--- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp
+++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp
@@ -24,7 +24,6 @@ limitations under the License. */
 #include "paddle/gserver/layers/Layer.h"
 #include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h"
 #include "paddle/math/CpuSparseMatrix.h"
-#include "paddle/trainer/Trainer.h"
 
 using namespace paddle;  // NOLINT
 using namespace std;     // NOLINT
diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp
index e1d4ae1617..8e04ccd16a 100644
--- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp
+++ b/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp
@@ -15,7 +15,6 @@ limitations under the License. */
 #include <gtest/gtest.h>
 #include "ModelConfig.pb.h"
 #include "paddle/gserver/layers/DataLayer.h"
-#include "paddle/trainer/Trainer.h"
 
 #include "LayerGradUtil.h"
 #include "paddle/testing/TestUtil.h"

From 5b862fedf1feb78b7dc63451e5219cef2fde33a3 Mon Sep 17 00:00:00 2001
From: Luo Tao <luotao02@baidu.com>
Date: Mon, 9 Oct 2017 11:31:13 +0800
Subject: [PATCH 28/82] remove debug log in interp_op.cc

---
 paddle/operators/interp_op.cc | 1 -
 1 file changed, 1 deletion(-)

diff --git a/paddle/operators/interp_op.cc b/paddle/operators/interp_op.cc
index 04bcb9ade8..fc8b9a11b8 100644
--- a/paddle/operators/interp_op.cc
+++ b/paddle/operators/interp_op.cc
@@ -58,7 +58,6 @@ class InterpOp : public NetOp {
                                              {{"Out", {Output("Out")}}}, {}));
 
     CompleteAddOp(false);
-    LOG(INFO) << DebugString();
   }
 };
 

From 8f2c48cf4c46b3b3d1b58fb8fec13a9c95d2327c Mon Sep 17 00:00:00 2001
From: hedaoyuan <hedaoyuan@github.com>
Date: Mon, 9 Oct 2017 11:32:17 +0800
Subject: [PATCH 29/82] Fix Layer.cpp

---
 paddle/gserver/layers/Layer.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp
index 075e8166ef..01f2aae6cf 100644
--- a/paddle/gserver/layers/Layer.cpp
+++ b/paddle/gserver/layers/Layer.cpp
@@ -15,11 +15,14 @@ limitations under the License. */
 #include "paddle/utils/Util.h"
 
 #include "CostLayer.h"
-#include "ValidationLayer.h"
 #include "paddle/math/SparseMatrix.h"
 #include "paddle/utils/Error.h"
 #include "paddle/utils/Logging.h"
 
+#ifndef PADDLE_MOBILE_INFERENCE
+#include "ValidationLayer.h"
+#endif
+
 DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
 
 namespace paddle {

From 707d144c93aa6053cd02c58bc92bf1d7306c95c3 Mon Sep 17 00:00:00 2001
From: Luo Tao <luotao02@baidu.com>
Date: Mon, 9 Oct 2017 14:45:01 +0800
Subject: [PATCH 30/82] Unify Reduce functions and simplify register code

---
 paddle/operators/activation_op.cc |  8 +++----
 paddle/operators/activation_op.cu | 10 ++++-----
 paddle/operators/reduce_op.cc     | 34 +++++++++--------------------
 paddle/operators/reduce_op.cu     | 36 ++++++++-----------------------
 paddle/operators/reduce_op.h      |  6 ++++++
 5 files changed, 33 insertions(+), 61 deletions(-)

diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc
index 66e9d2c401..2afa8a68b0 100644
--- a/paddle/operators/activation_op.cc
+++ b/paddle/operators/activation_op.cc
@@ -285,11 +285,9 @@ REGISTER_OP(stanh, ops::ActivationOp, ops::STanhOpMaker<float>, stanh_grad,
 #define REGISTER_ACTIVATION_CPU_KERNEL(act_type, functor, grad_functor)        \
   REGISTER_OP_CPU_KERNEL(                                                      \
       act_type,                                                                \
-      paddle::operators::ActivationKernel<paddle::platform::CPUPlace,          \
-                                          paddle::operators::functor<float>>); \
+      ops::ActivationKernel<paddle::platform::CPUPlace, ops::functor<float>>); \
   REGISTER_OP_CPU_KERNEL(act_type##_grad,                                      \
-                         paddle::operators::ActivationGradKernel<              \
-                             paddle::platform::CPUPlace,                       \
-                             paddle::operators::grad_functor<float>>);
+                         ops::ActivationGradKernel<paddle::platform::CPUPlace, \
+                                                   ops::grad_functor<float>>);
 
 FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_CPU_KERNEL);
diff --git a/paddle/operators/activation_op.cu b/paddle/operators/activation_op.cu
index 93e9f1c694..7b7644519d 100644
--- a/paddle/operators/activation_op.cu
+++ b/paddle/operators/activation_op.cu
@@ -15,14 +15,14 @@
 #define EIGEN_USE_GPU
 #include "paddle/operators/activation_op.h"
 
+namespace ops = paddle::operators;
+
 #define REGISTER_ACTIVATION_GPU_KERNEL(act_type, functor, grad_functor)        \
   REGISTER_OP_GPU_KERNEL(                                                      \
       act_type,                                                                \
-      paddle::operators::ActivationKernel<paddle::platform::GPUPlace,          \
-                                          paddle::operators::functor<float>>); \
+      ops::ActivationKernel<paddle::platform::GPUPlace, ops::functor<float>>); \
   REGISTER_OP_GPU_KERNEL(act_type##_grad,                                      \
-                         paddle::operators::ActivationGradKernel<              \
-                             paddle::platform::GPUPlace,                       \
-                             paddle::operators::grad_functor<float>>);
+                         ops::ActivationGradKernel<paddle::platform::GPUPlace, \
+                                                   ops::grad_functor<float>>);
 
 FOR_EACH_KERNEL_FUNCTOR(REGISTER_ACTIVATION_GPU_KERNEL);
diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc
index 3ef443d1c7..87f66e1e93 100644
--- a/paddle/operators/reduce_op.cc
+++ b/paddle/operators/reduce_op.cc
@@ -168,36 +168,22 @@ namespace ops = paddle::operators;
 
 REGISTER_OP(reduce_sum, ops::ReduceOp, ops::ReduceSumOpMaker, reduce_sum_grad,
             ops::ReduceGradOp);
-REGISTER_OP_CPU_KERNEL(
-    reduce_sum,
-    ops::ReduceKernel<paddle::platform::CPUPlace, float, ops::SumFunctor>);
-REGISTER_OP_CPU_KERNEL(reduce_sum_grad,
-                       ops::ReduceGradKernel<paddle::platform::CPUPlace, float,
-                                             ops::SumGradFunctor>);
 
 REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker,
             reduce_mean_grad, ops::ReduceGradOp);
-REGISTER_OP_CPU_KERNEL(
-    reduce_mean,
-    ops::ReduceKernel<paddle::platform::CPUPlace, float, ops::MeanFunctor>);
-REGISTER_OP_CPU_KERNEL(reduce_mean_grad,
-                       ops::ReduceGradKernel<paddle::platform::CPUPlace, float,
-                                             ops::MeanGradFunctor>);
 
 REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad,
             ops::ReduceGradOp);
-REGISTER_OP_CPU_KERNEL(
-    reduce_max,
-    ops::ReduceKernel<paddle::platform::CPUPlace, float, ops::MaxFunctor>);
-REGISTER_OP_CPU_KERNEL(reduce_max_grad,
-                       ops::ReduceGradKernel<paddle::platform::CPUPlace, float,
-                                             ops::MaxOrMinGradFunctor>);
 
 REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad,
             ops::ReduceGradOp);
-REGISTER_OP_CPU_KERNEL(
-    reduce_min,
-    ops::ReduceKernel<paddle::platform::CPUPlace, float, ops::MinFunctor>);
-REGISTER_OP_CPU_KERNEL(reduce_min_grad,
-                       ops::ReduceGradKernel<paddle::platform::CPUPlace, float,
-                                             ops::MaxOrMinGradFunctor>);
+
+#define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor)     \
+  REGISTER_OP_CPU_KERNEL(                                                  \
+      reduce_type,                                                         \
+      ops::ReduceKernel<paddle::platform::CPUPlace, float, ops::functor>); \
+  REGISTER_OP_CPU_KERNEL(reduce_type##_grad,                               \
+                         ops::ReduceGradKernel<paddle::platform::CPUPlace, \
+                                               float, ops::grad_functor>);
+
+FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_CPU_KERNEL);
diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu
index 595127b858..d306e1a240 100644
--- a/paddle/operators/reduce_op.cu
+++ b/paddle/operators/reduce_op.cu
@@ -17,30 +17,12 @@
 
 namespace ops = paddle::operators;
 
-REGISTER_OP_GPU_KERNEL(
-    reduce_sum,
-    ops::ReduceKernel<paddle::platform::GPUPlace, float, ops::SumFunctor>);
-REGISTER_OP_GPU_KERNEL(reduce_sum_grad,
-                       ops::ReduceGradKernel<paddle::platform::GPUPlace, float,
-                                             ops::SumGradFunctor>);
-
-REGISTER_OP_GPU_KERNEL(
-    reduce_mean,
-    ops::ReduceKernel<paddle::platform::GPUPlace, float, ops::MeanFunctor>);
-REGISTER_OP_GPU_KERNEL(reduce_mean_grad,
-                       ops::ReduceGradKernel<paddle::platform::GPUPlace, float,
-                                             ops::MeanGradFunctor>);
-
-REGISTER_OP_GPU_KERNEL(
-    reduce_max,
-    ops::ReduceKernel<paddle::platform::GPUPlace, float, ops::MaxFunctor>);
-REGISTER_OP_GPU_KERNEL(reduce_max_grad,
-                       ops::ReduceGradKernel<paddle::platform::GPUPlace, float,
-                                             ops::MaxOrMinGradFunctor>);
-
-REGISTER_OP_GPU_KERNEL(
-    reduce_min,
-    ops::ReduceKernel<paddle::platform::GPUPlace, float, ops::MinFunctor>);
-REGISTER_OP_GPU_KERNEL(reduce_min_grad,
-                       ops::ReduceGradKernel<paddle::platform::GPUPlace, float,
-                                             ops::MaxOrMinGradFunctor>);
+#define REGISTER_REDUCE_GPU_KERNEL(reduce_type, functor, grad_functor)     \
+  REGISTER_OP_GPU_KERNEL(                                                  \
+      reduce_type,                                                         \
+      ops::ReduceKernel<paddle::platform::GPUPlace, float, ops::functor>); \
+  REGISTER_OP_GPU_KERNEL(reduce_type##_grad,                               \
+                         ops::ReduceGradKernel<paddle::platform::GPUPlace, \
+                                               float, ops::grad_functor>);
+
+FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_GPU_KERNEL);
diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h
index ba3f3db81d..45043c440b 100644
--- a/paddle/operators/reduce_op.h
+++ b/paddle/operators/reduce_op.h
@@ -198,3 +198,9 @@ class ReduceGradKernel : public framework::OpKernel<T> {
 
 }  // namespace operators
 }  // namespace paddle
+
+#define FOR_EACH_KERNEL_FUNCTOR(__macro)                \
+  __macro(reduce_sum, SumFunctor, SumGradFunctor);      \
+  __macro(reduce_mean, MeanFunctor, MeanGradFunctor);   \
+  __macro(reduce_max, MaxFunctor, MaxOrMinGradFunctor); \
+  __macro(reduce_min, MinFunctor, MaxOrMinGradFunctor);

From 3f874143fe62062607f341f2559840fc23f4bbd7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= <typhoonzero1986@gmail.com>
Date: Mon, 9 Oct 2017 14:55:03 +0800
Subject: [PATCH 31/82] fix grad debug event (#4536)

---
 python/paddle/v2/event.py   | 14 +++++++++++++-
 python/paddle/v2/trainer.py |  9 +++++++--
 2 files changed, 20 insertions(+), 3 deletions(-)

diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py
index e66bf67d79..a0ffd31c54 100644
--- a/python/paddle/v2/event.py
+++ b/python/paddle/v2/event.py
@@ -10,7 +10,8 @@ There are:
 * EndPass
 """
 __all__ = [
-    'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult'
+    'EndIteration', 'BeginIteration', 'BeginPass', 'EndPass', 'TestResult',
+    'EndForwardBackward'
 ]
 
 
@@ -73,6 +74,17 @@ class BeginIteration(object):
         self.batch_id = batch_id
 
 
+class EndForwardBackward(object):
+    """
+    Event On One Batch ForwardBackward Complete.
+    """
+
+    def __init__(self, pass_id, batch_id, gm):
+        self.pass_id = pass_id
+        self.batch_id = batch_id
+        self.gm = gm
+
+
 class EndIteration(WithMetric):
     """
     Event On One Batch Training Complete.
diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py
index ca95ef13bd..076e755939 100644
--- a/python/paddle/v2/trainer.py
+++ b/python/paddle/v2/trainer.py
@@ -164,11 +164,18 @@ class SGD(object):
                                                           pass_type)
                 self.__gradient_machine__.eval(pass_evaluator)
                 self.__gradient_machine__.eval(batch_evaluator)
+                event_handler(
+                    v2_event.EndForwardBackward(
+                        pass_id=pass_id,
+                        batch_id=batch_id,
+                        gm=self.__gradient_machine__))
                 for each_param in self.__gradient_machine__.getNonStaticParameters(
                 ):
                     self.__parameter_updater__.update(each_param)
                 cost_sum = out_args.sum()
                 cost = cost_sum / len(data_batch)
+                self.__parameter_updater__.finishBatch(cost)
+                batch_evaluator.finish()
                 event_handler(
                     v2_event.EndIteration(
                         pass_id=pass_id,
@@ -176,8 +183,6 @@ class SGD(object):
                         cost=cost,
                         evaluator=batch_evaluator,
                         gm=self.__gradient_machine__))
-                self.__parameter_updater__.finishBatch(cost)
-                batch_evaluator.finish()
 
             self.__parameter_updater__.finishPass()
             pass_evaluator.finish()

From fcfce48421650f983b484af9fe20d2e843dc042b Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Mon, 9 Oct 2017 19:02:24 +0800
Subject: [PATCH 32/82] follow coments

---
 paddle/operators/CMakeLists.txt               |  3 +-
 paddle/operators/math/pooling.h               | 42 +++++++++++++++++--
 paddle/operators/pool_with_index_op.cc        | 20 ++++-----
 paddle/operators/pool_with_index_op.cu        |  8 ++--
 .../v2/framework/tests/test_pool_max_op.py    | 21 +++++-----
 5 files changed, 65 insertions(+), 29 deletions(-)

diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt
index 49da132049..39af318ca5 100644
--- a/paddle/operators/CMakeLists.txt
+++ b/paddle/operators/CMakeLists.txt
@@ -75,10 +75,11 @@ function(op_library TARGET)
         file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n")
     endif()
 
+    # pool_with_index_op contains several operators
     if ("${TARGET}" STREQUAL "pool_with_index_op")
         set(pybind_flag 1)
         # It's enough to just adding one operator to pybind
-        file(APPEND ${pybind_file} "USE_OP(maxPool2dWithIndex);\n")
+        file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n")
     endif()
 
     # pybind USE_NO_KERNEL_OP
diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h
index d819e5986e..f15ddca69a 100644
--- a/paddle/operators/math/pooling.h
+++ b/paddle/operators/math/pooling.h
@@ -21,15 +21,26 @@ limitations under the License. */
 namespace paddle {
 namespace operators {
 namespace math {
-//////////////////////
-#define FLT_MAX __FLT_MAX__  //
 
+#define FLT_MAX \
+  __FLT_MAX__  // It might need to be placed in another file, but I'm still
+               // wondering where to put it
+
+/*
+ * \brief Extracting simple operations from pooling.
+ *        Both MaxPool and AvgPool need initial, compute and finalize operation.
+ *        MaxPool initializes temp variable to the negative maximum to find the
+ * maximum value in the pooling field.
+ *        AvgPool initializes temp variable to the zero to accumulate all values
+ * in pool pooling, and takes the average.
+ *        MaxPoolGrad and AvgPoolGrad are gradient operations respectively.
+ */
 template <class T>
 class MaxPool {
  public:
   DEVICE inline T initial() { return static_cast<T>(-FLT_MAX); }
   DEVICE inline void compute(T& y, const T& x) { y = y > x ? y : x; }
-  DEVICE inline void finalize(T& y, const T& poo_size) {}
+  DEVICE inline void finalize(T& y, const T& pool_field) {}
 };
 
 template <class T>
@@ -37,8 +48,9 @@ class AvgPool {
  public:
   DEVICE inline T initial() { return static_cast<T>(0); }
   DEVICE inline void compute(T& y, const T& x) { y += x; }
-  DEVICE inline void finalize(T& y, const T& poo_size) { y /= poo_size; }
+  DEVICE inline void finalize(T& y, const T& pool_field) { y /= pool_field; }
 };
+
 template <class T>
 class MaxPoolGrad {
  public:
@@ -57,6 +69,20 @@ class AvgPoolGrad {
   }
 };
 
+/*
+ * \brief Getting pooling results, and calculating gradient.
+ *
+ * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in
+ * NCDHW format.
+ *
+ * In max pooling, it is possible that the pooling region has multiple maximum
+ * elements.
+ * In this case, we should compute the gradient of the first maximum element.
+ * This is different from average pooling. So we rewrite the max_pool_grad:
+ * MaxPool2dGradFunctor, MaxPool3dGradFunctor.
+ *
+ */
+
 template <typename Place, typename PoolProcess, typename T>
 class Pool2dFunctor {
  public:
@@ -117,6 +143,14 @@ class MaxPool3dGradFunctor {
                   std::vector<int>& strides, std::vector<int>& paddings);
 };
 
+/*
+ * \brief Getting max pooling results and corresponding max index, and
+ * calculating gradient.
+ * In sub-sampling-pooling, it is necessary to know max element index.
+ * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in
+ * NCDHW format.
+ *
+ */
 template <typename Place, typename T>
 class MaxPool2dWithIndexFunctor {
  public:
diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc
index c51145b923..2e6a5f2555 100644
--- a/paddle/operators/pool_with_index_op.cc
+++ b/paddle/operators/pool_with_index_op.cc
@@ -17,8 +17,8 @@ limitations under the License. */
 namespace paddle {
 namespace operators {
 
-int OutputSizeMaxPool(int input_size, int filter_size, int padding,
-                      int stride) {
+inline int OutputSizeMaxPool(int input_size, int filter_size, int padding,
+                             int stride) {
   int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
   return output_size;
 }
@@ -194,24 +194,24 @@ the input and ksize, strides, paddings parameters.
 
 namespace ops = paddle::operators;
 
-REGISTER_OP(maxPool2dWithIndex, ops::MaxPoolWithIndexOp,
-            ops::MaxPool2dWithIndexOpMaker, maxPool2dWithIndex_grad,
+REGISTER_OP(max_pool2d_with_index, ops::MaxPoolWithIndexOp,
+            ops::MaxPool2dWithIndexOpMaker, max_pool2d_with_index_grad,
             ops::MaxPoolWithIndexOpGrad);
 
 REGISTER_OP_CPU_KERNEL(
-    maxPool2dWithIndex,
+    max_pool2d_with_index,
     ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float>);
 REGISTER_OP_CPU_KERNEL(
-    maxPool2dWithIndex_grad,
+    max_pool2d_with_index_grad,
     ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float>)
 
-REGISTER_OP(maxPool3dWithIndex, ops::MaxPoolWithIndexOp,
-            ops::MaxPool3dWithIndexOpMaker, maxPool3dWithIndex_grad,
+REGISTER_OP(max_pool3d_with_index, ops::MaxPoolWithIndexOp,
+            ops::MaxPool3dWithIndexOpMaker, max_pool3d_with_index_grad,
             ops::MaxPoolWithIndexOpGrad);
 
 REGISTER_OP_CPU_KERNEL(
-    maxPool3dWithIndex,
+    max_pool3d_with_index,
     ops::MaxPoolWithIndexKernel<paddle::platform::CPUPlace, float>);
 REGISTER_OP_CPU_KERNEL(
-    maxPool3dWithIndex_grad,
+    max_pool3d_with_index_grad,
     ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUPlace, float>)
diff --git a/paddle/operators/pool_with_index_op.cu b/paddle/operators/pool_with_index_op.cu
index 8007fc7ccf..287657d4b1 100644
--- a/paddle/operators/pool_with_index_op.cu
+++ b/paddle/operators/pool_with_index_op.cu
@@ -17,15 +17,15 @@ limitations under the License. */
 namespace ops = paddle::operators;
 
 REGISTER_OP_GPU_KERNEL(
-    maxPool2dWithIndex,
+    max_pool2d_with_index,
     ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float>);
 REGISTER_OP_GPU_KERNEL(
-    maxPool2dWithIndex_grad,
+    max_pool2d_with_index_grad,
     ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float>)
 
 REGISTER_OP_GPU_KERNEL(
-    maxPool3dWithIndex,
+    max_pool3d_with_index,
     ops::MaxPoolWithIndexKernel<paddle::platform::GPUPlace, float>);
 REGISTER_OP_GPU_KERNEL(
-    maxPool3dWithIndex_grad,
+    max_pool3d_with_index_grad,
     ops::MaxPoolWithIndexGradKernel<paddle::platform::GPUPlace, float>)
diff --git a/python/paddle/v2/framework/tests/test_pool_max_op.py b/python/paddle/v2/framework/tests/test_pool_max_op.py
index 17028c3bf6..f0f8aa6089 100644
--- a/python/paddle/v2/framework/tests/test_pool_max_op.py
+++ b/python/paddle/v2/framework/tests/test_pool_max_op.py
@@ -100,7 +100,8 @@ class TestMaxPoolWithIndex_Op(OpTest):
 
     def initTestCase(self):
         self.global_pool = True
-        self.op_type = "maxPool3dWithIndex"
+        self.index = "max_pool3d_with_index"
+        self.op_type = "%s" % self.index
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 5, 5, 5]
         self.ksize = [3, 3, 3]
@@ -111,7 +112,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
 class TestCase1(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = True
-        self.op_type = "maxPool3dWithIndex"
+        self.op_type = "max_pool3d_with_index"
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 5, 5, 5]
         self.ksize = [3, 3, 3]
@@ -122,7 +123,7 @@ class TestCase1(TestMaxPoolWithIndex_Op):
 class TestCase2(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = False
-        self.op_type = "maxPool3dWithIndex"
+        self.op_type = "max_pool3d_with_index"
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 7, 7, 7]
         self.ksize = [3, 3, 3]
@@ -133,7 +134,7 @@ class TestCase2(TestMaxPoolWithIndex_Op):
 class TestCase3(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = False
-        self.op_type = "maxPool3dWithIndex"
+        self.op_type = "max_pool3d_with_index"
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 7, 7, 7]
         self.ksize = [3, 3, 3]
@@ -144,7 +145,7 @@ class TestCase3(TestMaxPoolWithIndex_Op):
 class TestCase4(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = True
-        self.op_type = "maxPool3dWithIndex"
+        self.op_type = "max_pool3d_with_index"
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 5, 5, 5]
         self.ksize = [3, 3, 3]
@@ -155,7 +156,7 @@ class TestCase4(TestMaxPoolWithIndex_Op):
 class TestCase5(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = True
-        self.op_type = "maxPool3dWithIndex"
+        self.op_type = "max_pool3d_with_index"
         self.pool_forward_naive = max_pool3D_forward_naive
         self.shape = [2, 3, 5, 5, 5]
         self.ksize = [3, 3, 3]
@@ -166,7 +167,7 @@ class TestCase5(TestMaxPoolWithIndex_Op):
 class TestCase6(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = False
-        self.op_type = "maxPool2dWithIndex"
+        self.op_type = "max_pool2d_with_index"
         self.pool_forward_naive = max_pool2D_forward_naive
         self.shape = [2, 3, 7, 7]
         self.ksize = [3, 3]
@@ -177,7 +178,7 @@ class TestCase6(TestMaxPoolWithIndex_Op):
 class TestCase7(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = False
-        self.op_type = "maxPool2dWithIndex"
+        self.op_type = "max_pool2d_with_index"
         self.pool_forward_naive = max_pool2D_forward_naive
         self.shape = [2, 3, 7, 7]
         self.ksize = [3, 3]
@@ -188,7 +189,7 @@ class TestCase7(TestMaxPoolWithIndex_Op):
 class TestCase8(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = True
-        self.op_type = "maxPool2dWithIndex"
+        self.op_type = "max_pool2d_with_index"
         self.pool_forward_naive = max_pool2D_forward_naive
         self.shape = [2, 3, 5, 5]
         self.ksize = [3, 3]
@@ -199,7 +200,7 @@ class TestCase8(TestMaxPoolWithIndex_Op):
 class TestCase9(TestMaxPoolWithIndex_Op):
     def initTestCase(self):
         self.global_pool = True
-        self.op_type = "maxPool2dWithIndex"
+        self.op_type = "max_pool2d_with_index"
         self.pool_forward_naive = max_pool2D_forward_naive
         self.shape = [2, 3, 5, 5]
         self.ksize = [3, 3]

From a06f099d9f54b47ce4df7d1ae32c928fb8d7593e Mon Sep 17 00:00:00 2001
From: Luo Tao <luotao02@baidu.com>
Date: Mon, 9 Oct 2017 16:34:05 +0800
Subject: [PATCH 33/82] refine comment of interp_op

---
 paddle/operators/interp_op.cc                 | 43 +++++++++++--------
 .../v2/framework/tests/test_interp_op.py      |  6 +--
 2 files changed, 28 insertions(+), 21 deletions(-)

diff --git a/paddle/operators/interp_op.cc b/paddle/operators/interp_op.cc
index fc8b9a11b8..d02b01c3f3 100644
--- a/paddle/operators/interp_op.cc
+++ b/paddle/operators/interp_op.cc
@@ -30,27 +30,26 @@ class InterpOp : public NetOp {
                       "Input(Y) of InterpOp should not be null.");
     PADDLE_ENFORCE_NE(Input("W"), framework::kEmptyVarName,
                       "Input(W) of InterpOp should not be null.");
-    PADDLE_ENFORCE_NE(Output("MinusOut"), framework::kEmptyVarName,
-                      "Output(MinusOut) of InterpOp should not be null.");
+    PADDLE_ENFORCE_NE(Output("SubOut"), framework::kEmptyVarName,
+                      "Output(SubOut) of InterpOp should not be null.");
     PADDLE_ENFORCE_NE(Output("MulOut"), framework::kEmptyVarName,
                       "Output(MulOut) of InterpOp should not be null.");
     PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName,
                       "Output(Out) of InterpOp should not be null.");
 
-    // MinusOut = X - Y
+    // SubOut = X - Y
     auto x = Input("X");
     auto y = Input("Y");
-    auto minus_out = Output("MinusOut");
-    AppendOp(framework::OpRegistry::CreateOp("elementwise_sub",
-                                             {{"X", {x}}, {"Y", {y}}},
-                                             {{"Out", {minus_out}}}, {}));
+    auto sub_out = Output("SubOut");
+    AppendOp(framework::OpRegistry::CreateOp(
+        "elementwise_sub", {{"X", {x}}, {"Y", {y}}}, {{"Out", {sub_out}}}, {}));
 
-    // MulOut = MinusOut * W = (X - Y) * W
+    // MulOut = SubOut * W = (X - Y) * W
     auto w = Input("W");
     auto mul_out = Output("MulOut");
     AppendOp(framework::OpRegistry::CreateOp(
-        "elementwise_mul", {{"X", {minus_out}}, {"Y", {w}}},
-        {{"Out", {mul_out}}}, {{"axis", 0}}));
+        "elementwise_mul", {{"X", {sub_out}}, {"Y", {w}}}, {{"Out", {mul_out}}},
+        {{"axis", 0}}));
 
     // Out = MulOut + Y = (X - Y) * W + Y = X * W + Y * (1 - W)
     AppendOp(framework::OpRegistry::CreateOp("elementwise_add",
@@ -65,18 +64,26 @@ class InterpOpMaker : public framework::OpProtoAndCheckerMaker {
  public:
   InterpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
       : OpProtoAndCheckerMaker(proto, op_checker) {
-    AddInput("X", "A 2-D Tensor, the first input of interp_op");
-    AddInput("Y", "A 2-D Tensor, the second input of interp_op");
-    AddInput("W", "A 1-D Tensor, the interpolated values");
-    AddOutput("MinusOut",
-              "A 2-D Tensor, the intermediate outputs, saving X - Y.")
+    AddInput("X",
+             "(Tensor), 2-D Matrix of shape [batch_size, data_dim]"
+             "containing data samples, the first input of interp_op");
+    AddInput("Y",
+             "(Tensor), 2-D Matrix of shape `[batch_size, data_dim]`"
+             "containing data samples, the second input of interp_op");
+    AddInput("W",
+             "(Tensor), 1-D Vector of shape [batch_size],"
+             "the interpolated values in the half-open interval [0.0, 1.0)");
+    AddOutput("SubOut",
+              "(Tensor), the intermediate subtraction outputs, saving X - Y.")
         .AsIntermediate();
     AddOutput("MulOut",
-              "A 2-D Tensor, the intermediate outputs,"
-              "saving the mul mul of (X - Y) and W")
+              "(Tensor), the intermediate multiplication outputs,"
+              "saving the elementwise multiplication of (X - Y) and W.")
         .AsIntermediate();
     AddOutput("Out",
-              "A 2-D Tensor, the output of interp_op, same shape with X");
+              "(Tensor), the output of interp_op, same shape with X,"
+              "returns the first-dimensional piecewise linear interpolant "
+              "between X and Y");
     AddComment(R"DOC(
     Linear Interpolation with two inputs, used in NEURAL TURING MACHINE.
 
diff --git a/python/paddle/v2/framework/tests/test_interp_op.py b/python/paddle/v2/framework/tests/test_interp_op.py
index f82dcc7f50..066569b96c 100644
--- a/python/paddle/v2/framework/tests/test_interp_op.py
+++ b/python/paddle/v2/framework/tests/test_interp_op.py
@@ -10,12 +10,12 @@ class TestInterpOp(OpTest):
         y = np.random.random((2, 3)).astype("float32")
         w = np.random.random(2).astype("float32")
 
-        minus_out = x - y
-        mul_out = minus_out * w.reshape(2, 1)
+        sub_out = x - y
+        mul_out = sub_out * w.reshape(2, 1)
         out = mul_out + y
 
         self.inputs = {'X': x, 'Y': y, 'W': w}
-        self.outputs = {'Out': out, 'MinusOut': minus_out, 'MulOut': mul_out}
+        self.outputs = {'Out': out, 'SubOut': sub_out, 'MulOut': mul_out}
 
     def test_check_output(self):
         self.check_output()

From 597299074efb2e926954219c4afac9a6b189904d Mon Sep 17 00:00:00 2001
From: Luo Tao <luotao02@baidu.com>
Date: Mon, 9 Oct 2017 20:11:01 +0800
Subject: [PATCH 34/82] fix bug in REGISTER_OP(reduce_min)

---
 paddle/operators/reduce_op.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc
index 87f66e1e93..55f294a9be 100644
--- a/paddle/operators/reduce_op.cc
+++ b/paddle/operators/reduce_op.cc
@@ -175,7 +175,7 @@ REGISTER_OP(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker,
 REGISTER_OP(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_max_grad,
             ops::ReduceGradOp);
 
-REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMaxOpMaker, reduce_min_grad,
+REGISTER_OP(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, reduce_min_grad,
             ops::ReduceGradOp);
 
 #define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor)     \

From b14c122a949402169678aa1a154349b2fbd8ddc0 Mon Sep 17 00:00:00 2001
From: fengjiayi <fengjiayi@baidu.com>
Date: Mon, 9 Oct 2017 10:28:26 -0700
Subject: [PATCH 35/82] Fix bug

---
 paddle/framework/block_desc.cc | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc
index 01f50e1393..509aa235d3 100644
--- a/paddle/framework/block_desc.cc
+++ b/paddle/framework/block_desc.cc
@@ -74,6 +74,12 @@ void BlockDescBind::Sync() {
     for (auto &op_desc : ops_) {
       op_field.AddAllocated(op_desc->Proto());
     }
+    auto &var_field = *this->desc_->mutable_vars();
+    var_field.Clear();
+    var_field.Reserve(static_cast<int>(vars_.size()));
+    for (auto &var_desc : vars_) {
+      var_field.AddAllocated(var_desc.second->Proto());
+    }
     need_update_ = false;
   }
 }

From 3c39df197e2fbb0e8666bd8bb20e2a60e5a47d9b Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 10:30:20 -0700
Subject: [PATCH 36/82] Init Python API

Following the design
* https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md

Just written `Program`, `Block` and unittest of program.
---
 python/paddle/v2/framework/graph.py           | 45 +++++++++++++++++++
 .../paddle/v2/framework/tests/test_program.py | 36 +++++++++++++++
 2 files changed, 81 insertions(+)
 create mode 100644 python/paddle/v2/framework/graph.py
 create mode 100644 python/paddle/v2/framework/tests/test_program.py

diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
new file mode 100644
index 0000000000..5211b0f166
--- /dev/null
+++ b/python/paddle/v2/framework/graph.py
@@ -0,0 +1,45 @@
+import paddle.v2.framework.core as core
+
+
+class Block(object):
+    def __init__(self, program, idx):
+        self.proto = program.proto.block(idx)
+        self.vars = dict()  # var_name --> var
+        self.ops = list()  # operator list
+        self.program = program
+
+    @property
+    def parent_idx(self):
+        return self.proto.parent
+
+    @property
+    def idx(self):
+        return self.proto.id
+
+
+class Program(object):
+    def __init__(self):
+        self.proto = core.ProgramDesc.instance()
+        assert self.proto.num_blocks() == 1
+        self.blocks = [Block(self, 0)]
+        self.current_block_idx = 0
+
+    def global_block(self):
+        return self.blocks[0]
+
+    def current_block(self):
+        return self.blocks[self.current_block_idx]
+
+    def create_block(self):
+        new_block_idx = len(self.blocks)
+        self.proto.append_block(self.current_block().proto)
+        self.current_block_idx = new_block_idx
+        self.blocks.append(Block(self, self.current_block_idx))
+        return self.current_block()
+
+    def rollback(self):
+        self.current_block_idx = self.current_block().parent_idx
+
+
+# program is a global instance.
+g_program = Program()
diff --git a/python/paddle/v2/framework/tests/test_program.py b/python/paddle/v2/framework/tests/test_program.py
new file mode 100644
index 0000000000..b82d1760d6
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_program.py
@@ -0,0 +1,36 @@
+import unittest
+from paddle.v2.framework.graph import g_program
+
+
+class TestProgram(unittest.TestCase):
+    def test_program(self):
+        b = g_program.current_block()
+        self.assertEqual(-1, b.parent_idx)
+        self.assertEqual(0, b.idx)
+
+        b = g_program.create_block()
+        self.assertEqual(1, b.idx)
+        self.assertEqual(0, b.parent_idx)
+
+        b = g_program.create_block()
+        self.assertEqual(2, b.idx)
+        self.assertEqual(1, b.parent_idx)
+
+        g_program.rollback()
+
+        b = g_program.current_block()
+        self.assertEqual(1, b.idx)
+        self.assertEqual(0, b.parent_idx)
+
+        b = g_program.create_block()
+        self.assertEqual(3, b.idx)
+        self.assertEqual(1, b.parent_idx)
+
+        g_program.rollback()
+        b = g_program.current_block()
+        self.assertEqual(1, b.idx)
+        self.assertEqual(0, b.parent_idx)
+
+
+if __name__ == '__main__':
+    unittest.main()

From ee545e47ccfa79a793bb0c7adabe6f0e852afc13 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Mon, 9 Oct 2017 10:34:59 -0700
Subject: [PATCH 37/82] gan api

---
 doc/design/gan_api.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index d0f8b47ca3..0db18f92a7 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -15,6 +15,7 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to
 | batch norm (missing)      | Zhuoyuan, Jiayi   | Y        |
 | cudnn batch norm (missing)| Zhuoyuan, Jiayi   | N        |
 | max-pooling (done)        | ?                 | Y        |
+| cudnn-max-pool (missing)  | Chengduo          | Y        |
 | fc (done)                 | ?                 | Y        |
 | softmax loss (done)       | ?                 | Y        |
 | reshape op (done)         | ?                 | Y        |

From bedcf074a2c497afeb057cb8a1ecfaa3eb39a7dd Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 10:37:31 -0700
Subject: [PATCH 38/82] Implementation singleton

---
 python/paddle/v2/framework/graph.py | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
index 5211b0f166..7468279438 100644
--- a/python/paddle/v2/framework/graph.py
+++ b/python/paddle/v2/framework/graph.py
@@ -18,9 +18,18 @@ class Block(object):
 
 
 class Program(object):
+    @classmethod
+    def instance(cls):
+        # From https://stackoverflow.com/questions/8212053
+        # Making Program as a Singleton class.
+        if not hasattr(cls, '_instance'):
+            cls._instance = cls()
+        return cls._instance
+
     def __init__(self):
+        assert not hasattr(self.__class__,
+                           '_instance'), 'Do not call constructor directly!'
         self.proto = core.ProgramDesc.instance()
-        assert self.proto.num_blocks() == 1
         self.blocks = [Block(self, 0)]
         self.current_block_idx = 0
 
@@ -42,4 +51,4 @@ class Program(object):
 
 
 # program is a global instance.
-g_program = Program()
+g_program = Program.instance()

From f30a1f42f0b90b17c2664d7e9a65070ee1c3a473 Mon Sep 17 00:00:00 2001
From: kavyasrinet <kavyasrinet@baidu.com>
Date: Mon, 9 Oct 2017 10:49:21 -0700
Subject: [PATCH 39/82] Adding relu6 activation function (#4607)

---
 paddle/operators/activation_op.cc             | 16 ++++++++++
 paddle/operators/activation_op.h              | 31 +++++++++++++++++++
 .../v2/framework/tests/test_activation_op.py  | 19 +++++++-----
 3 files changed, 59 insertions(+), 7 deletions(-)

diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc
index 2afa8a68b0..43081d2326 100644
--- a/paddle/operators/activation_op.cc
+++ b/paddle/operators/activation_op.cc
@@ -201,6 +201,19 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
   }
 };
 
+template <typename AttrType>
+class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  Relu6OpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput("X", "Input of Relu6 operator");
+    AddOutput("Y", "Output of Relu6 operator");
+    AddComment("Relu6 activation operator, relu6 = min(max(0, x), 6)");
+    AddAttr<AttrType>("threshold", "The threshold value of Relu6")
+        .SetDefault(static_cast<AttrType>(6));
+  }
+};
+
 template <typename AttrType>
 class PowOpMaker : public framework::OpProtoAndCheckerMaker {
  public:
@@ -276,6 +289,9 @@ REGISTER_OP(leaky_relu, ops::ActivationOp, ops::LeakyReluOpMaker<float>,
 REGISTER_OP(soft_relu, ops::ActivationOp, ops::SoftReluOpMaker<float>,
             soft_relu_grad, ops::ActivationOpGrad);
 
+REGISTER_OP(relu6, ops::ActivationOp, ops::Relu6OpMaker<float>, relu6_grad,
+            ops::ActivationOpGrad);
+
 REGISTER_OP(pow, ops::ActivationOp, ops::PowOpMaker<float>, pow_grad,
             ops::ActivationOpGrad);
 
diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h
index 2450601742..f127468125 100644
--- a/paddle/operators/activation_op.h
+++ b/paddle/operators/activation_op.h
@@ -280,6 +280,36 @@ struct BReluGradFunctor : public BaseActivationFunctor<T> {
   }
 };
 
+// relu6(x) = min(max(0, x), 6)
+template <typename T>
+struct Relu6Functor : public BaseActivationFunctor<T> {
+  float threshold;
+
+  // NOTE: Explicit hides the `BaseActivationFunctor<T>::GetAttrs`
+  // not polymorphism for speed.
+  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
+    return {{"threshold", &threshold}};
+  }
+
+  template <typename Device, typename X, typename Y>
+  void operator()(Device d, X x, Y y) const {
+    y.device(d) = x.cwiseMax(static_cast<T>(0)).cwiseMin(threshold);
+  }
+};
+
+template <typename T>
+struct Relu6GradFunctor : public BaseActivationFunctor<T> {
+  float threshold;
+  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
+    return {{"threshold", &threshold}};
+  }
+  template <typename Device, typename X, typename Y, typename dY, typename dX>
+  void operator()(Device d, X x, Y y, dY dy, dX dx) const {
+    dx.device(d) =
+        dy * ((x > static_cast<T>(0)) * (x < threshold)).template cast<T>();
+  }
+};
+
 // softsign(x) = x / (1 + |x|)
 template <typename T>
 struct SoftsignFunctor : public BaseActivationFunctor<T> {
@@ -425,5 +455,6 @@ struct STanhGradFunctor : public BaseActivationFunctor<T> {
   __macro(pow, PowFunctor, PowGradFunctor);                      \
   __macro(stanh, STanhFunctor, STanhGradFunctor);                \
   __macro(softsign, SoftsignFunctor, SoftsignGradFunctor);       \
+  __macro(relu6, Relu6Functor, Relu6GradFunctor);                \
   __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor);   \
   __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor)
diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py
index 701e1a1aee..8b76decaec 100644
--- a/python/paddle/v2/framework/tests/test_activation_op.py
+++ b/python/paddle/v2/framework/tests/test_activation_op.py
@@ -137,21 +137,26 @@ class TestBRelu(OpTest):
         self.check_grad(['X'], 'Y', max_relative_error=0.02)
 
 
-class TestLeakyRelu(OpTest):
+class TestRelu6(OpTest):
     def setUp(self):
-        self.op_type = "leaky_relu"
-        alpha = 0.02
-        self.attrs = {'alpha': alpha}
-        self.inputs = {'X': np.random.uniform(-3, 3, [4, 4]).astype("float32")}
+        self.op_type = "relu6"
+        x = np.random.uniform(-1, 1, [4, 10]).astype("float32")
+        threshold = 6.0
+        # The same with TestAbs
+        x[np.abs(x) < 0.005] = 0.02
+        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
+
+        self.inputs = {'X': x}
+        self.attrs = {'threshold': threshold}
         self.outputs = {
-            'Y': np.maximum(self.inputs['X'], alpha * self.inputs['X'])
+            'Y': np.minimum(np.maximum(self.inputs['X'], 0), threshold)
         }
 
     def test_check_output(self):
         self.check_output()
 
     def test_check_grad(self):
-        self.check_grad(['X'], 'Y', max_relative_error=0.007)
+        self.check_grad(['X'], 'Y', max_relative_error=0.02)
 
 
 class TestSoftRelu(OpTest):

From 8f4771be226e19593e0434db2293cc6bbcbbdc69 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 10:49:53 -0700
Subject: [PATCH 40/82] Add skeleton of Variable

---
 python/paddle/v2/framework/graph.py | 35 +++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
index 7468279438..5b93115b3e 100644
--- a/python/paddle/v2/framework/graph.py
+++ b/python/paddle/v2/framework/graph.py
@@ -1,5 +1,37 @@
 import paddle.v2.framework.core as core
 
+__all__ = ['Block', 'Variable', 'Program']
+
+
+class Variable(object):
+    def __init__(self, block, name=None, shape=None, dtype=None,
+                 lod_level=None):
+        self.block = block
+
+        if name is None:
+            name = Variable._unique_var_name_()
+        self.proto = self.block.proto.new_var(name)
+
+        if shape is not None:
+            self.proto.set_shape(shape)
+
+        if dtype is not None:
+            # TODO(yuyang18): Convert dtype from numpy.dtype
+            self.proto.set_data_type(dtype)
+
+        if lod_level is not None:
+            # TODO(yuyang18): set_lod_level is not defined.
+            self.proto.set_lod_level(lod_level)
+
+        self.block.vars[name] = self
+
+    # TODO(yuyang18): Get methods
+
+    @staticmethod
+    def _unique_var_name_():
+        uid = core.unique_integer()  # unique during whole process.
+        return "_generated_var_%d" % uid
+
 
 class Block(object):
     def __init__(self, program, idx):
@@ -16,6 +48,9 @@ class Block(object):
     def idx(self):
         return self.proto.id
 
+    def create_var(self, *args, **kwargs):
+        return Variable(self, *args, **kwargs)
+
 
 class Program(object):
     @classmethod

From 4cb5bd90218082998f990d0977f05acef8da61e7 Mon Sep 17 00:00:00 2001
From: Abhinav Arora <abhinavarora28@gmail.com>
Date: Mon, 9 Oct 2017 10:56:56 -0700
Subject: [PATCH 41/82] Implementing the Adamax optimizer operator (#4538)

* Implementing the Adamax optimizer step operator
* Adding unit tests for adamax_op

* Changing learning rate and time step to inputs from attributes

* Changing learning rate and time step to input(tensors)

* Making the Adamax operator conform to naming convention

* Removing Tensor<float> from comments

* Rectifying the Adamax implementation

* Changing Unit Test values and adding comments

* Changing Unit Test to test multiple steps
---
 paddle/operators/adamax_op.cc                 | 139 ++++++++++++++
 paddle/operators/adamax_op.cu                 |  20 ++
 paddle/operators/adamax_op.h                  |  72 +++++++
 .../v2/framework/tests/test_adamax_op.py      | 178 ++++++++++++++++++
 4 files changed, 409 insertions(+)
 create mode 100644 paddle/operators/adamax_op.cc
 create mode 100644 paddle/operators/adamax_op.cu
 create mode 100644 paddle/operators/adamax_op.h
 create mode 100644 python/paddle/v2/framework/tests/test_adamax_op.py

diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc
new file mode 100644
index 0000000000..c348e0a0b2
--- /dev/null
+++ b/paddle/operators/adamax_op.cc
@@ -0,0 +1,139 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/operators/adamax_op.h"
+
+namespace paddle {
+namespace operators {
+
+class AdamaxOp : public framework::OperatorWithKernel {
+ public:
+  using framework::OperatorWithKernel::OperatorWithKernel;
+
+ protected:
+  void InferShape(framework::InferShapeContextBase *ctx) const override {
+    PADDLE_ENFORCE(ctx->HasInput("Param"),
+                   "Input(Param) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasInput("Grad"),
+                   "Input(Grad) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasInput("Moment"),
+                   "Input(Moment) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasInput("InfNorm"),
+                   "Input(InfNorm) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasInput("LearningRate"),
+                   "Input(LearningRate) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasInput("Beta1Pow"),
+                   "Input(Beta1Pow) of AdamaxOp should not be null.");
+
+    PADDLE_ENFORCE(ctx->HasOutput("ParamOut"),
+                   "Output(ParamOut) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasOutput("MomentOut"),
+                   "Output(MomentOut) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"),
+                   "Output(InfNormOut) of AdamaxOp should not be null.");
+    PADDLE_ENFORCE(ctx->HasOutput("Beta1PowOut"),
+                   "Output(Beta1PowOut) of AdamaxOp should not be null.");
+
+    auto lr_dims = ctx->GetInputDim("LearningRate");
+    PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1,
+                      "Learning rate should have 1 dimension");
+    auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow");
+    PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1,
+                      "Beta1 power accumulator should have 1 dimension");
+    auto param_dims = ctx->GetInputDim("Param");
+    PADDLE_ENFORCE_EQ(
+        param_dims, ctx->GetInputDim("Grad"),
+        "Param and Grad input of AdamaxOp should have same dimension");
+    PADDLE_ENFORCE_EQ(
+        param_dims, ctx->GetInputDim("Moment"),
+        "Param and Moment input of AdamaxOp should have same dimension");
+    PADDLE_ENFORCE_EQ(
+        param_dims, ctx->GetInputDim("InfNorm"),
+        "Param and InfNorm input of AdamaxOp should have same dimension");
+
+    ctx->SetOutputDim("ParamOut", param_dims);
+    ctx->SetOutputDim("MomentOut", param_dims);
+    ctx->SetOutputDim("InfNormOut", param_dims);
+    ctx->SetOutputDim("Beta1PowOut", beta1_pow_dims);
+  }
+};
+
+class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  AdamaxOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput("Param", "(Tensor) Input parameter");
+    AddInput("Grad", "(Tensor) Input gradient");
+    AddInput("LearningRate", "(Tensor) Learning rate");
+    AddInput("Moment", "(Tensor) First moment");
+    AddInput("InfNorm",
+             "(Tensor) "
+             "Input exponentially weighted infinity norm");
+    AddInput("Beta1Pow", "(Tensor) Input beta1 power accumulator");
+
+    AddOutput("ParamOut", "(Tensor) Output parameter");
+    AddOutput("MomentOut", "(Tensor) Output first moment");
+    AddOutput("InfNormOut",
+              "(Tensor) "
+              "Output exponentially weighted infinity norm");
+    AddOutput("Beta1PowOut", "(Tensor) Output beta1 power accumulator");
+
+    AddAttr<float>("beta1",
+                   "(float, default 0.9) "
+                   "Exponential decay rate for the "
+                   "1st moment estimates.")
+        .SetDefault(0.9f);
+    AddAttr<float>("beta2",
+                   "(float, default 0.999) "
+                   "exponential decay rate for the weighted "
+                   "infinity norm estimates.")
+        .SetDefault(0.999f);
+    AddAttr<float>("epsilon",
+                   "(float, default 1.0e-8) "
+                   "Constant for numerical stability")
+        .SetDefault(1.0e-8f);
+    AddComment(R"DOC(
+Adamax Updates Operator.
+
+This implements the Adamax optimizer from Section 7 of the Adam
+paper[1]. Adamax is a variant of the
+Adam algorithm based on the infinity norm.
+
+Adamax updates:
+
+moment_out = beta1 * moment + (1 - beta1) * grad
+inf_norm_out = max(beta2 * inf_norm + epsilon, abs(grad))
+beta1_pow_out = beta1_pow * beta1
+learning_rate_t = learning_rate/(1 - beta1_pow_out)
+param_out = param - learning_rate_t * moment_out/inf_norm_out
+
+The original paper does not have an epsilon attribute.
+However, it is added here for numerical stability
+by preventing divide by 0.
+
+References:
+  [1] Adam: A Method for Stochastic Optimization
+      (https://arxiv.org/abs/1412.6980)
+
+)DOC");
+  }
+};
+
+}  // namespace operators
+}  // namespace paddle
+
+namespace ops = paddle::operators;
+REGISTER_OP_WITHOUT_GRADIENT(adamax, ops::AdamaxOp, ops::AdamaxOpMaker);
+REGISTER_OP_CPU_KERNEL(adamax,
+                       ops::AdamaxOpKernel<paddle::platform::CPUPlace, float>);
diff --git a/paddle/operators/adamax_op.cu b/paddle/operators/adamax_op.cu
new file mode 100644
index 0000000000..fee3b6fc6b
--- /dev/null
+++ b/paddle/operators/adamax_op.cu
@@ -0,0 +1,20 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License. */
+
+#define EIGEN_USE_GPU
+#include "paddle/operators/adamax_op.h"
+
+namespace ops = paddle::operators;
+REGISTER_OP_GPU_KERNEL(adamax,
+                       ops::AdamaxOpKernel<paddle::platform::GPUPlace, float>);
diff --git a/paddle/operators/adamax_op.h b/paddle/operators/adamax_op.h
new file mode 100644
index 0000000000..9677b1bb78
--- /dev/null
+++ b/paddle/operators/adamax_op.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+#include "paddle/framework/eigen.h"
+#include "paddle/framework/op_registry.h"
+
+namespace paddle {
+namespace operators {
+
+template <typename Place, typename T>
+class AdamaxOpKernel : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext& ctx) const override {
+    auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
+    auto moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
+    auto inf_norm_out_tensor = ctx.Output<framework::Tensor>("InfNormOut");
+    auto beta1_pow_out_tensor = ctx.Output<framework::Tensor>("Beta1PowOut");
+
+    param_out_tensor->mutable_data<T>(ctx.GetPlace());
+    moment_out_tensor->mutable_data<T>(ctx.GetPlace());
+    inf_norm_out_tensor->mutable_data<T>(ctx.GetPlace());
+    beta1_pow_out_tensor->mutable_data<T>(ctx.GetPlace());
+
+    float beta1 = ctx.Attr<float>("beta1");
+    float beta2 = ctx.Attr<float>("beta2");
+    float epsilon = ctx.Attr<float>("epsilon");
+
+    auto param = framework::EigenVector<T>::Flatten(
+        *ctx.Input<framework::Tensor>("Param"));
+    auto grad = framework::EigenVector<T>::Flatten(
+        *ctx.Input<framework::Tensor>("Grad"));
+    auto moment = framework::EigenVector<T>::Flatten(
+        *ctx.Input<framework::Tensor>("Moment"));
+    auto inf_norm = framework::EigenVector<T>::Flatten(
+        *ctx.Input<framework::Tensor>("InfNorm"));
+    auto lr = framework::EigenVector<T>::Flatten(
+        *ctx.Input<framework::Tensor>("LearningRate"));
+    auto beta1_pow = framework::EigenVector<T>::Flatten(
+        *ctx.Input<framework::Tensor>("Beta1Pow"));
+    auto param_out = framework::EigenVector<T>::Flatten(*param_out_tensor);
+    auto moment_out = framework::EigenVector<T>::Flatten(*moment_out_tensor);
+    auto inf_norm_out =
+        framework::EigenVector<T>::Flatten(*inf_norm_out_tensor);
+    auto beta1_pow_out =
+        framework::EigenVector<T>::Flatten(*beta1_pow_out_tensor);
+    auto place = ctx.GetEigenDevice<Place>();
+
+    moment_out.device(place) = beta1 * moment + (1 - beta1) * grad;
+    inf_norm_out.device(place) =
+        grad.abs().cwiseMax((beta2 * inf_norm) + epsilon);
+    beta1_pow_out.device(place) = beta1_pow * beta1;
+    auto lr_t = lr / (1 - beta1_pow_out);
+    Eigen::DSizes<int, 1> m_dsize(moment_out_tensor->numel());
+    param_out.device(place) =
+        param - lr_t.broadcast(m_dsize) * (moment_out / inf_norm_out);
+  }
+};
+
+}  // namespace operators
+}  // namespace paddle
diff --git a/python/paddle/v2/framework/tests/test_adamax_op.py b/python/paddle/v2/framework/tests/test_adamax_op.py
new file mode 100644
index 0000000000..af81075d6a
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_adamax_op.py
@@ -0,0 +1,178 @@
+import unittest
+import numpy as np
+from op_test import OpTest
+
+
+class TestAdamaxOp1(OpTest):
+    def setUp(self):
+        '''Test Adamax Operator with supplied attributes
+        '''
+        self.op_type = "adamax"
+        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        moment = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        # The infinity norm is positive
+        inf_norm = np.random.random((102, 105)).astype("float32")
+
+        learning_rate = 0.002
+        beta1 = 0.78
+        beta2 = 0.899
+        epsilon = 1e-5
+        beta1_pow = beta1**10
+
+        self.inputs = {
+            'Param': param,
+            'Grad': grad,
+            'Moment': moment,
+            'InfNorm': inf_norm,
+            'LearningRate': np.array([learning_rate]).astype("float32"),
+            'Beta1Pow': np.array([beta1_pow]).astype("float32")
+        }
+
+        self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon}
+
+        param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step(
+            self.inputs, self.attrs)
+
+        self.outputs = {
+            'ParamOut': param_out,
+            'MomentOut': moment_out,
+            'InfNormOut': inf_norm_out,
+            'Beta1PowOut': beta1_pow_out
+        }
+
+    def test_check_output(self):
+        self.check_output()
+
+
+class TestAdamaxOp2(OpTest):
+    '''Test Adamax Operator with default attributes
+    '''
+
+    def setUp(self):
+        self.op_type = "adamax"
+        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        moment = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        # The infinity norm is positive
+        inf_norm = np.random.random((102, 105)).astype("float32")
+
+        learning_rate = 0.002
+        beta1 = 0.9
+        beta2 = 0.999
+        epsilon = 1e-8
+        beta1_pow = beta1**8
+
+        self.inputs = {
+            'Param': param,
+            'Grad': grad,
+            'Moment': moment,
+            'InfNorm': inf_norm,
+            'LearningRate': np.array([learning_rate]).astype("float32"),
+            'Beta1Pow': np.array([beta1_pow]).astype("float32")
+        }
+
+        attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon}
+        param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step(
+            self.inputs, attrs)
+
+        self.outputs = {
+            'ParamOut': param_out,
+            'MomentOut': moment_out,
+            'InfNormOut': inf_norm_out,
+            'Beta1PowOut': beta1_pow_out
+        }
+
+    def test_check_output(self):
+        self.check_output()
+
+
+class TestAdamaxOpMultipleSteps(OpTest):
+    def setUp(self):
+        '''Test Adamax Operator with supplied attributes
+        '''
+        self.op_type = "adamax"
+        self.num_steps = 10
+
+        param = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        grad = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        moment = np.random.uniform(-1, 1, (102, 105)).astype("float32")
+        # The infinity norm is positive
+        inf_norm = np.random.random((102, 105)).astype("float32")
+
+        learning_rate = 0.002
+        beta1 = 0.8
+        beta2 = 0.99
+        epsilon = 1e-5
+        beta1_pow = 1
+
+        self.inputs = {
+            'Param': param,
+            'Grad': grad,
+            'Moment': moment,
+            'InfNorm': inf_norm,
+            'LearningRate': np.array([learning_rate]).astype("float32"),
+            'Beta1Pow': np.array([beta1_pow]).astype("float32")
+        }
+
+        self.attrs = {'beta1': beta1, 'beta2': beta2, 'epsilon': epsilon}
+
+        param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step(
+            self.inputs, self.attrs)
+
+    def test_check_output(self):
+        for _ in range(self.num_steps):
+            param_out, moment_out, inf_norm_out, beta1_pow_out = adamax_step(
+                self.inputs, self.attrs)
+
+            self.outputs = {
+                'ParamOut': param_out,
+                'MomentOut': moment_out,
+                'InfNormOut': inf_norm_out,
+                'Beta1PowOut': beta1_pow_out
+            }
+
+            # Verify output for this step
+            self.check_output()
+
+            # Output of this step becomes input for next step
+            self.inputs['Param'] = param_out
+            self.inputs['Moment'] = moment_out
+            self.inputs['InfNorm'] = inf_norm_out
+            self.inputs['Beta1Pow'] = beta1_pow_out
+
+            # Randomize gradient for next step
+            self.inputs['Grad'] = np.random.uniform(
+                -1, 1, (102, 105)).astype("float32")
+
+
+def adamax_step(inputs, attributes):
+    '''
+    Simulate one step of the adamax optimizer
+    :param inputs: dict of inputs
+    :param attributes: dict of attributes
+    :return tuple: tuple of output param, moment, inf_norm and
+    beta1 power accumulator
+    '''
+    param = inputs['Param']
+    grad = inputs['Grad']
+    moment = inputs['Moment']
+    inf_norm = inputs['InfNorm']
+    lr = inputs['LearningRate']
+    beta1_pow = inputs['Beta1Pow']
+
+    beta1 = attributes['beta1']
+    beta2 = attributes['beta2']
+    epsilon = attributes['epsilon']
+
+    moment_out = beta1 * moment + (1 - beta1) * grad
+    inf_norm_out = np.maximum(beta2 * inf_norm + epsilon, np.abs(grad))
+    beta1_pow_out = beta1_pow * beta1
+    lr_t = (lr / (1 - beta1_pow_out))
+    param_out = param - lr_t * np.divide(moment_out, inf_norm_out)
+
+    return param_out, moment_out, inf_norm_out, beta1_pow_out
+
+
+if __name__ == "__main__":
+    unittest.main()

From 61a5181e31a073a2b23cc76028fc24119d4970c7 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 11:05:25 -0700
Subject: [PATCH 42/82] Add skeleton of Operator

---
 python/paddle/v2/framework/graph.py | 44 +++++++++++++++++++++++++++--
 1 file changed, 42 insertions(+), 2 deletions(-)

diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
index 5b93115b3e..6f2a76a983 100644
--- a/python/paddle/v2/framework/graph.py
+++ b/python/paddle/v2/framework/graph.py
@@ -1,6 +1,7 @@
 import paddle.v2.framework.core as core
+import collections
 
-__all__ = ['Block', 'Variable', 'Program']
+__all__ = ['Block', 'Variable', 'Program', 'Operator']
 
 
 class Variable(object):
@@ -24,6 +25,7 @@ class Variable(object):
             self.proto.set_lod_level(lod_level)
 
         self.block.vars[name] = self
+        self.op = None
 
     # TODO(yuyang18): Get methods
 
@@ -33,11 +35,37 @@ class Variable(object):
         return "_generated_var_%d" % uid
 
 
+class Operator(object):
+    def __init__(self,
+                 block,
+                 proto,
+                 type=None,
+                 inputs=None,
+                 outputs=None,
+                 attrs=None):
+        self.block = block
+        self.proto = proto
+        if type is not None:
+            # TODO.
+            pass
+        if inputs is not None:
+            # TODO
+            pass
+        if outputs is not None:
+            # TODO
+            pass
+        if attrs is not None:
+            # TODO
+            pass
+
+        # TODO: Getters
+
+
 class Block(object):
     def __init__(self, program, idx):
         self.proto = program.proto.block(idx)
         self.vars = dict()  # var_name --> var
-        self.ops = list()  # operator list
+        self.ops = collections.deque()  # operator list
         self.program = program
 
     @property
@@ -51,6 +79,18 @@ class Block(object):
     def create_var(self, *args, **kwargs):
         return Variable(self, *args, **kwargs)
 
+    def append_op(self, *args, **kwargs):
+        op_proto = self.proto.append_op()
+        op = Operator(self, op_proto, *args, **kwargs)
+        self.ops.append(op)
+        return op
+
+    def prepend_op(self, *args, **kwargs):
+        op_proto = self.proto.prepend_op()
+        op = Operator(self, op_proto, *args, **kwargs)
+        self.ops.appendleft(op)
+        return op
+
 
 class Program(object):
     @classmethod

From c464ec21d8b0a1e7ad6da7115b78cd047d9a2041 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 12:09:39 -0700
Subject: [PATCH 43/82] Fix bug of foward default attribute not passed to
 backward

---
 paddle/framework/backward.cc    |  2 +-
 paddle/framework/op_desc.h      |  5 +++++
 paddle/framework/op_registry.cc | 11 ++++++++---
 paddle/framework/op_registry.h  |  2 +-
 4 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc
index c970e01dd1..0a4688db9c 100644
--- a/paddle/framework/backward.cc
+++ b/paddle/framework/backward.cc
@@ -302,7 +302,7 @@ std::vector<std::unique_ptr<OpDescBind>> MakeOpGrad(
     return grad_op_descs;  // empty vector
   }
 
-  grad_op_descs = OpRegistry::CreateGradOpDescs(*op_desc);
+  grad_op_descs = OpRegistry::CreateGradOpDescs(op_desc.get());
 
   std::list<std::unique_ptr<OpDescBind>> pending_fill_zeros_ops;
   for (auto& desc : grad_op_descs) {
diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h
index b39808dad1..b729029412 100644
--- a/paddle/framework/op_desc.h
+++ b/paddle/framework/op_desc.h
@@ -97,6 +97,11 @@ class OpDescBind {
 
   const VariableNameMap &Outputs() const { return outputs_; }
 
+  AttributeMap *MutableAttrMap() {
+    this->need_update_ = true;
+    return &this->attrs_;
+  }
+
  private:
   template <typename MapType>
   static std::vector<typename MapType::key_type> MapKeys(const MapType &map) {
diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc
index 66043f6e04..b118edae17 100644
--- a/paddle/framework/op_registry.cc
+++ b/paddle/framework/op_registry.cc
@@ -60,9 +60,14 @@ std::unique_ptr<OperatorBase> OpRegistry::CreateOp(const OpDescBind& op_desc) {
 }
 
 std::vector<std::unique_ptr<OpDescBind>> OpRegistry::CreateGradOpDescs(
-    const OpDescBind& op_desc) {
-  auto& info = OpInfoMap::Instance().Get(op_desc.Type());
-  return info.grad_op_maker_(op_desc);
+    OpDescBind* op_desc) {
+  auto& info = OpInfoMap::Instance().Get(op_desc->Type());
+
+  if (info.Checker() != nullptr) {
+    info.Checker()->Check(*op_desc->MutableAttrMap());
+  }
+
+  return info.grad_op_maker_(*op_desc);
 }
 
 }  // namespace framework
diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h
index cce3605fd4..5ca3af52a6 100644
--- a/paddle/framework/op_registry.h
+++ b/paddle/framework/op_registry.h
@@ -80,7 +80,7 @@ class OpRegistry {
   static std::unique_ptr<OperatorBase> CreateOp(const OpDesc& op_desc);
 
   static std::vector<std::unique_ptr<OpDescBind>> CreateGradOpDescs(
-      const OpDescBind& op_desc);
+      OpDescBind* op_desc);
 
   static std::unique_ptr<OperatorBase> CreateOp(const OpDescBind& op_desc);
 };

From 49ca0b4831037a3faa955ecf45ebedbee21ea833 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 13:10:04 -0700
Subject: [PATCH 44/82] Stash

---
 paddle/framework/var_desc.cc | 8 ++++++++
 paddle/framework/var_desc.h  | 4 ++++
 paddle/pybind/protobuf.cc    | 4 +++-
 3 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc
index 13b9c5f3cd..a88e813b5e 100644
--- a/paddle/framework/var_desc.cc
+++ b/paddle/framework/var_desc.cc
@@ -32,5 +32,13 @@ std::vector<int64_t> VarDescBind::Shape() const {
 DataType VarDescBind::GetDataType() const {
   return desc_.lod_tensor().data_type();
 }
+
+void VarDescBind::SetLoDLevel(int32_t lod_level) {
+  desc_.mutable_lod_tensor()->set_lod_level(lod_level);
+}
+
+int32_t VarDescBind::GetLodLevel() const {
+  return desc_.lod_tensor().lod_level();
+}
 }  // namespace framework
 }  // namespace paddle
diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h
index 4763bf09d0..464fece85f 100644
--- a/paddle/framework/var_desc.h
+++ b/paddle/framework/var_desc.h
@@ -66,6 +66,10 @@ class VarDescBind {
 
   DataType GetDataType() const;
 
+  void SetLoDLevel(int32_t lod_level);
+
+  int32_t GetLodLevel() const;
+
  private:
   VarDesc desc_;
 };
diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc
index 218821b35b..7e18bf1c07 100644
--- a/paddle/pybind/protobuf.cc
+++ b/paddle/pybind/protobuf.cc
@@ -167,7 +167,9 @@ void BindVarDsec(py::module &m) {
       .def("set_shape", &VarDescBind::SetShape)
       .def("set_data_type", &VarDescBind::SetDataType)
       .def("shape", &VarDescBind::Shape, py::return_value_policy::reference)
-      .def("data_type", &VarDescBind::GetDataType);
+      .def("data_type", &VarDescBind::GetDataType)
+      .def("lod_level", &VarDescBind::GetLodLevel)
+      .def("set_lod_level", &VarDescBind::SetLoDLevel);
 }
 
 void BindOpDesc(py::module &m) {

From 92add2a29b4d0e5e5d4abe44d86e3a7c7af23645 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 13:20:08 -0700
Subject: [PATCH 45/82] Fix compile error in develop branch

---
 paddle/operators/adamax_op.cc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/paddle/operators/adamax_op.cc b/paddle/operators/adamax_op.cc
index c348e0a0b2..5cf727742c 100644
--- a/paddle/operators/adamax_op.cc
+++ b/paddle/operators/adamax_op.cc
@@ -22,7 +22,7 @@ class AdamaxOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("Param"),
                    "Input(Param) of AdamaxOp should not be null.");
     PADDLE_ENFORCE(ctx->HasInput("Grad"),

From 1e41a675d4111a826ffac45cbd197054d193d72e Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 13:39:38 -0700
Subject: [PATCH 46/82] Convert np.dtype to core.DataType

---
 python/paddle/v2/framework/graph.py           | 25 +++++++++++++++++--
 .../v2/framework/tests/test_variable.py       | 22 ++++++++++++++++
 2 files changed, 45 insertions(+), 2 deletions(-)
 create mode 100644 python/paddle/v2/framework/tests/test_variable.py

diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
index 6f2a76a983..a7a3ca62c7 100644
--- a/python/paddle/v2/framework/graph.py
+++ b/python/paddle/v2/framework/graph.py
@@ -1,5 +1,6 @@
 import paddle.v2.framework.core as core
 import collections
+import numpy as np
 
 __all__ = ['Block', 'Variable', 'Program', 'Operator']
 
@@ -17,11 +18,11 @@ class Variable(object):
             self.proto.set_shape(shape)
 
         if dtype is not None:
-            # TODO(yuyang18): Convert dtype from numpy.dtype
+            if not isinstance(dtype, core.DataType):
+                dtype = Variable._convert_np_dtype_to_dtype_(dtype)
             self.proto.set_data_type(dtype)
 
         if lod_level is not None:
-            # TODO(yuyang18): set_lod_level is not defined.
             self.proto.set_lod_level(lod_level)
 
         self.block.vars[name] = self
@@ -34,6 +35,26 @@ class Variable(object):
         uid = core.unique_integer()  # unique during whole process.
         return "_generated_var_%d" % uid
 
+    @staticmethod
+    def _convert_np_dtype_to_dtype_(np_dtype):
+        dtype = np.dtype(np_dtype)
+        if dtype == np.float32:
+            return core.DataType.FP32
+        elif dtype == np.float64:
+            return core.DataType.FP64
+        elif dtype == np.float16:
+            return core.DataType.FP16
+        elif dtype == np.int32:
+            return core.DataType.INT32
+        elif dtype == np.int16:
+            return core.DataType.INT16
+        elif dtype == np.int64:
+            return core.DataType.INT64
+        elif dtype == np.bool:
+            return core.DataType.BOOL
+        else:
+            raise ValueError("Not supported numpy dtype " + str(dtype))
+
 
 class Operator(object):
     def __init__(self,
diff --git a/python/paddle/v2/framework/tests/test_variable.py b/python/paddle/v2/framework/tests/test_variable.py
new file mode 100644
index 0000000000..dd23eac0cd
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_variable.py
@@ -0,0 +1,22 @@
+import unittest
+from paddle.v2.framework.graph import Variable
+import paddle.v2.framework.core as core
+import numpy as np
+
+
+class TestVariable(unittest.TestCase):
+    def test_np_dtype_convert(self):
+        DT = core.DataType
+        convert = Variable._convert_np_dtype_to_dtype_
+        self.assertEqual(DT.FP32, convert(np.float32))
+        self.assertEqual(DT.FP16, convert("float16"))
+        self.assertEqual(DT.FP64, convert("float64"))
+        self.assertEqual(DT.INT32, convert("int32"))
+        self.assertEqual(DT.INT16, convert("int16"))
+        self.assertEqual(DT.INT64, convert("int64"))
+        self.assertEqual(DT.BOOL, convert("bool"))
+        self.assertRaises(ValueError, lambda: convert("int8"))
+
+
+if __name__ == '__main__':
+    unittest.main()

From 569616b329db71bfc4739021d55e0a74179732e2 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 14:04:36 -0700
Subject: [PATCH 47/82] Complete Variable for Python API

---
 python/paddle/v2/framework/graph.py           | 59 ++++++++++++++++---
 .../v2/framework/tests/test_variable.py       | 20 ++++++-
 2 files changed, 71 insertions(+), 8 deletions(-)

diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
index a7a3ca62c7..a66e7a9d73 100644
--- a/python/paddle/v2/framework/graph.py
+++ b/python/paddle/v2/framework/graph.py
@@ -12,23 +12,68 @@ class Variable(object):
 
         if name is None:
             name = Variable._unique_var_name_()
-        self.proto = self.block.proto.new_var(name)
+        try:
+            self.proto = self.block.proto.var(name)
+            is_new_var = False
+        except core.EnforceNotMet:
+            self.proto = self.block.proto.new_var(name)
+            is_new_var = True
 
         if shape is not None:
-            self.proto.set_shape(shape)
-
+            if is_new_var:
+                self.proto.set_shape(shape)
+            else:
+                old_shape = self.shape
+                shape = tuple(shape)
+                if shape != old_shape:
+                    raise ValueError(
+                        "Variable {0} has been created before. the previous "
+                        "shape is {1}; the new shape is {2}. They are not "
+                        "matched.".format(self.name, old_shape, shape))
         if dtype is not None:
             if not isinstance(dtype, core.DataType):
                 dtype = Variable._convert_np_dtype_to_dtype_(dtype)
-            self.proto.set_data_type(dtype)
+            if is_new_var:
+                self.proto.set_data_type(dtype)
+            else:
+                old_dtype = self.data_type()
+                if dtype != old_shape:
+                    raise ValueError("Variable {0} has been created before. "
+                                     "The previous data type is {1}; the new "
+                                     "data type is {2}. They are not "
+                                     "matched.".format(self.name, old_dtype,
+                                                       dtype))
 
         if lod_level is not None:
-            self.proto.set_lod_level(lod_level)
+            if is_new_var:
+                self.proto.set_lod_level(lod_level)
+            else:
+                if lod_level != self.lod_level:
+                    raise ValueError("Variable {0} has been created before. "
+                                     "The previous lod_level is {1}; the new "
+                                     "lod_level is {2}. They are not "
+                                     "matched".format(self.name, self.lod_level,
+                                                      lod_level))
 
         self.block.vars[name] = self
         self.op = None
 
-    # TODO(yuyang18): Get methods
+    @property
+    def name(self):
+        return self.proto.name()
+
+    @property
+    def shape(self):
+        # convert to tuple, make it as same as numpy API.
+        return tuple(self.proto.shape())
+
+    @property
+    def data_type(self):
+        return self.proto.data_type()
+
+    @property
+    def lod_level(self):
+        return self.proto.lod_level()
 
     @staticmethod
     def _unique_var_name_():
@@ -79,7 +124,7 @@ class Operator(object):
             # TODO
             pass
 
-        # TODO: Getters
+            # TODO: Getters
 
 
 class Block(object):
diff --git a/python/paddle/v2/framework/tests/test_variable.py b/python/paddle/v2/framework/tests/test_variable.py
index dd23eac0cd..8ea1083ff6 100644
--- a/python/paddle/v2/framework/tests/test_variable.py
+++ b/python/paddle/v2/framework/tests/test_variable.py
@@ -1,5 +1,5 @@
 import unittest
-from paddle.v2.framework.graph import Variable
+from paddle.v2.framework.graph import Variable, g_program
 import paddle.v2.framework.core as core
 import numpy as np
 
@@ -17,6 +17,24 @@ class TestVariable(unittest.TestCase):
         self.assertEqual(DT.BOOL, convert("bool"))
         self.assertRaises(ValueError, lambda: convert("int8"))
 
+    def test_var(self):
+        b = g_program.current_block()
+        w = b.create_var(
+            dtype="float64", shape=[784, 100], lod_level=0, name="fc.w")
+        self.assertEqual(core.DataType.FP64, w.data_type)
+        self.assertEqual((784, 100), w.shape)
+        self.assertEqual("fc.w", w.name)
+        self.assertEqual(0, w.lod_level)
+
+        w = b.create_var(name='fc.w')
+        self.assertEqual(core.DataType.FP64, w.data_type)
+        self.assertEqual((784, 100), w.shape)
+        self.assertEqual("fc.w", w.name)
+        self.assertEqual(0, w.lod_level)
+
+        self.assertRaises(ValueError,
+                          lambda: b.create_var(name="fc.w", shape=(24, 100)))
+
 
 if __name__ == '__main__':
     unittest.main()

From dcb09e932d57701b553a5308aaab5b16bf214910 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 14:21:58 -0700
Subject: [PATCH 48/82] Use PROTO_LITE when refactoring Paddle

It will significantly reduce binary size. It is useful for mobile
deployment.
---
 paddle/framework/framework.proto | 1 +
 paddle/framework/op_desc.h       | 2 --
 paddle/framework/program_desc.h  | 2 --
 paddle/operators/net_op.h        | 1 +
 paddle/pybind/protobuf.cc        | 3 ---
 5 files changed, 2 insertions(+), 7 deletions(-)

diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto
index ac2827e547..b7a63f9ba1 100644
--- a/paddle/framework/framework.proto
+++ b/paddle/framework/framework.proto
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
 limitations under the License. */
 
 syntax = "proto2";
+option optimize_for = LITE_RUNTIME;
 package paddle.framework;
 
 enum AttrType {
diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h
index b729029412..d0c314771c 100644
--- a/paddle/framework/op_desc.h
+++ b/paddle/framework/op_desc.h
@@ -52,8 +52,6 @@ class OpDescBind {
   void SetOutput(const std::string &param_name,
                  const std::vector<std::string> &args);
 
-  std::string DebugString() { return this->Proto()->DebugString(); }
-
   bool HasAttr(const std::string &name) const {
     return attrs_.find(name) != attrs_.end();
   }
diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h
index 9b34a06aef..d684b08d16 100644
--- a/paddle/framework/program_desc.h
+++ b/paddle/framework/program_desc.h
@@ -31,8 +31,6 @@ class ProgramDescBind {
 
   BlockDescBind *Block(size_t idx) { return blocks_[idx].get(); }
 
-  std::string DebugString() { return Proto()->DebugString(); }
-
   size_t Size() const { return blocks_.size(); }
 
   ProgramDesc *Proto();
diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h
index 2388b094d2..ebeb262d96 100644
--- a/paddle/operators/net_op.h
+++ b/paddle/operators/net_op.h
@@ -14,6 +14,7 @@ limitations under the License. */
 
 #pragma once
 
+#include <set>
 #include "paddle/framework/framework.pb.h"
 #include "paddle/framework/op_registry.h"
 
diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc
index 218821b35b..47bd7bc3bb 100644
--- a/paddle/pybind/protobuf.cc
+++ b/paddle/pybind/protobuf.cc
@@ -117,7 +117,6 @@ void BindProgramDesc(py::module &m) {
       .def("append_block", &ProgramDescBind::AppendBlock,
            py::return_value_policy::reference)
       .def("block", &ProgramDescBind::Block, py::return_value_policy::reference)
-      .def("__str__", &ProgramDescBind::DebugString)
       .def("num_blocks", &ProgramDescBind::Size);
 }
 
@@ -191,8 +190,6 @@ void BindOpDesc(py::module &m) {
       .def("output", &OpDescBind::Output)
       .def("output_names", &OpDescBind::OutputNames)
       .def("set_output", &OpDescBind::SetOutput)
-      .def("__str__", &OpDescBind::DebugString)
-      .def("__repr__", &OpDescBind::DebugString)
       .def("has_attr", &OpDescBind::HasAttr)
       .def("attr_type", &OpDescBind::GetAttrType)
       .def("attr_names", &OpDescBind::AttrNames)

From 383faaf71907605b92feef2ffc91f955623fa82d Mon Sep 17 00:00:00 2001
From: Abhinav Arora <abhinavarora28@gmail.com>
Date: Mon, 9 Oct 2017 14:25:38 -0700
Subject: [PATCH 49/82] Adding implementation for copying a vector to a tensor
 (#4635)

* Adding implementation for copying a vector to tensor
* Changing Tensor test to access gpu memory indirectly
---
 paddle/framework/tensor.h       | 13 +++++
 paddle/framework/tensor_impl.h  | 23 +++++++++
 paddle/framework/tensor_test.cc | 87 +++++++++++++++++++++++++++++++++
 3 files changed, 123 insertions(+)

diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h
index 80a3f0a393..ba82127d9c 100644
--- a/paddle/framework/tensor.h
+++ b/paddle/framework/tensor.h
@@ -95,6 +95,19 @@ class Tensor {
   template <typename T>
   inline void CopyFrom(const Tensor& src, const platform::Place& dst_place);
 
+  /**
+   * @brief   Copy the content of an external vector to a tensor.
+   *
+   * @param[in] src   The external vector.
+   * @param[in] ctx   The device context contains place where to store.
+   *
+   * * @note    CopyFromVector assumes that the tensor has been resized
+   *            before invoking.
+   */
+  template <typename T>
+  inline void CopyFromVector(const std::vector<T>& src,
+                             const platform::Place& dst_place);
+
   /**
    * @brief   Return the slice of the tensor.
    *
diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h
index 379eac94f9..8ee9941982 100644
--- a/paddle/framework/tensor_impl.h
+++ b/paddle/framework/tensor_impl.h
@@ -123,6 +123,29 @@ inline void Tensor::CopyFrom(const Tensor& src,
 #endif
 }
 
+template <typename T>
+inline void Tensor::CopyFromVector(const std::vector<T>& src,
+                                   const platform::Place& dst_place) {
+  auto src_ptr = static_cast<const void*>(src.data());
+  platform::CPUPlace src_place;
+  auto dst_ptr = static_cast<void*>(mutable_data<T>(dst_place));
+  auto size = src.size() * sizeof(T);
+
+  if (platform::is_cpu_place(dst_place)) {
+    memory::Copy(boost::get<platform::CPUPlace>(dst_place), dst_ptr, src_place,
+                 src_ptr, size);
+  }
+#ifdef PADDLE_WITH_CUDA
+  else if (platform::is_gpu_place(dst_place)) {
+    memory::Copy(boost::get<platform::GPUPlace>(dst_place), dst_ptr, src_place,
+                 src_ptr, size, 0);
+  }
+  PADDLE_ENFORCE(cudaStreamSynchronize(0),
+                 "cudaStreamSynchronize failed in Tensor CopyFromVector");
+
+#endif
+}
+
 template <typename T>
 inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
   check_memory_size<T>();
diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc
index 58cf0fc3cb..492eba69e1 100644
--- a/paddle/framework/tensor_test.cc
+++ b/paddle/framework/tensor_test.cc
@@ -263,6 +263,93 @@ TEST(Tensor, CopyFrom) {
 #endif
 }
 
+TEST(Tensor, CopyFromVector) {
+  using namespace paddle::framework;
+  using namespace paddle::platform;
+  {
+    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    Tensor cpu_tensor;
+
+    // Copy to CPU Tensor
+    cpu_tensor.Resize(make_ddim({3, 3}));
+    auto cpu_place = new paddle::platform::CPUPlace();
+    cpu_tensor.CopyFromVector<int>(src_vec, *cpu_place);
+
+    // Compare Tensors
+    const int* cpu_ptr = cpu_tensor.data<int>();
+    const int* src_ptr = src_vec.data();
+    ASSERT_NE(src_ptr, cpu_ptr);
+    for (size_t i = 0; i < 9; ++i) {
+      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
+    }
+
+    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
+    cpu_tensor.Resize(make_ddim({2, 2}));
+    cpu_tensor.CopyFromVector<int>(src_vec, *cpu_place);
+    cpu_ptr = cpu_tensor.data<int>();
+    src_ptr = src_vec.data();
+    ASSERT_NE(src_ptr, cpu_ptr);
+    for (size_t i = 0; i < 5; ++i) {
+      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
+    }
+
+    delete cpu_place;
+  }
+
+#ifdef PADDLE_WITH_CUDA
+  {
+    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
+    Tensor cpu_tensor;
+    Tensor gpu_tensor;
+    Tensor dst_tensor;
+
+    // Copy to CPU Tensor
+    cpu_tensor.Resize(make_ddim({3, 3}));
+    auto cpu_place = new paddle::platform::CPUPlace();
+    cpu_tensor.CopyFromVector<int>(src_vec, *cpu_place);
+
+    // Copy to GPUTensor
+    gpu_tensor.Resize(make_ddim({3, 3}));
+    auto gpu_place = new paddle::platform::GPUPlace();
+    gpu_tensor.CopyFromVector<int>(src_vec, *gpu_place);
+    // Copy from GPU to CPU tensor for comparison
+    dst_tensor.CopyFrom<int>(gpu_tensor, *cpu_place);
+
+    // Compare Tensors
+    const int* src_ptr = src_vec.data();
+    const int* cpu_ptr = cpu_tensor.data<int>();
+    const int* dst_ptr = dst_tensor.data<int>();
+    ASSERT_NE(src_ptr, cpu_ptr);
+    ASSERT_NE(src_ptr, dst_ptr);
+    for (size_t i = 0; i < 9; ++i) {
+      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
+      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
+    }
+
+    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
+
+    cpu_tensor.Resize(make_ddim({2, 2}));
+    cpu_tensor.CopyFromVector<int>(src_vec, *cpu_place);
+    gpu_tensor.Resize(make_ddim({2, 2}));
+    gpu_tensor.CopyFromVector<int>(src_vec, *gpu_place);
+    dst_tensor.CopyFrom<int>(gpu_tensor, *cpu_place);
+
+    src_ptr = src_vec.data();
+    cpu_ptr = cpu_tensor.data<int>();
+    dst_ptr = dst_tensor.data<int>();
+    ASSERT_NE(src_ptr, cpu_ptr);
+    ASSERT_NE(src_ptr, dst_ptr);
+    for (size_t i = 0; i < 5; ++i) {
+      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
+      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
+    }
+
+    delete cpu_place;
+    delete gpu_place;
+  }
+#endif
+}
+
 TEST(Tensor, ReshapeToMatrix) {
   using namespace paddle::framework;
   using namespace paddle::platform;

From 5984cbca47a4663b47b16390fc028829dbc9f183 Mon Sep 17 00:00:00 2001
From: fengjiayi <fengjiayi@baidu.com>
Date: Mon, 9 Oct 2017 14:30:31 -0700
Subject: [PATCH 50/82] Add Attr test

---
 paddle/framework/backward_test.cc | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc
index 30225a4a99..05ebf356ba 100644
--- a/paddle/framework/backward_test.cc
+++ b/paddle/framework/backward_test.cc
@@ -440,6 +440,25 @@ TEST(Backward, simple_single_op) {
             std::vector<std::string>({f::GradVarName("b")}));
 }
 
+TEST(Backward, default_attribute) {
+  f::ProgramDesc *program_desc = GetNewProgramDesc();
+  f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc);
+  f::BlockDescBind *block = program.Block(0);
+  f::OpDescBind *op = block->AppendOp();
+  op->SetType("mul");
+  op->SetInput("X", {"x"});
+  op->SetInput("Y", {"y"});
+  op->SetOutput("Out", {"out"});
+
+  AppendBackward(program, {});
+
+  ASSERT_EQ(block->AllOps().size(), 2UL);
+  f::OpDescBind *grad_op = block->AllOps()[1];
+  ASSERT_EQ(grad_op->Type(), "mul_grad");
+  EXPECT_EQ(boost::get<int>(grad_op->GetAttr("x_num_col_dims")), 1);
+  EXPECT_EQ(boost::get<int>(grad_op->GetAttr("y_num_col_dims")), 1);
+}
+
 TEST(Backward, simple_mult_op) {
   f::ProgramDesc *program_desc = GetNewProgramDesc();
   f::ProgramDescBind &program = f::ProgramDescBind::Instance(program_desc);

From 4238b9b95cda29618828a9a477afecb3bbed984e Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Mon, 9 Oct 2017 14:35:45 -0700
Subject: [PATCH 51/82] gan_api

---
 doc/design/gan_api.md | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 0db18f92a7..4fcff8b70a 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -1,10 +1,10 @@
 # Design for GAN
 
-GAN (General Adversarial Net) is an important model for unsupervised learning and widely used in many areas. 
+GAN (General Adversarial Net [https://arxiv.org/abs/1406.2661]) is an important model for unsupervised learning and widely used in many areas. 
 
-It contains several important machine learning concepts, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth.
+It applies several important concepts in machine learning system design, including building and running subgraphs, dependency tracing, different optimizers in one executor and so forth.
 
-In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN as an example due to its good performance on image generation.
+In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation.
 
 | important building blocks | People in Charge  | Required |
 |---------------------------|-------------------|----------|
@@ -51,7 +51,7 @@ Returns a 0/1 binary label.
 build the whole GAN model, define training loss for both generator and discrimator.
 
 ## Discussion on Engine Functions required to build GAN
-- Trace the ternsor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly)
+- Trace the tensor and variable dependency in the engine executor. (Very critical, otherwise GAN can'be be trained correctly)
 - Different optimizers responsible for optimizing different loss.
 
 To be more detailed, we introduce our design of DCGAN as following:

From ce901b1186b671781cd86b91ce530e2be3408f37 Mon Sep 17 00:00:00 2001
From: fengjiayi <fengjiayi@baidu.com>
Date: Mon, 9 Oct 2017 15:16:34 -0700
Subject: [PATCH 52/82] Refine unit test

---
 paddle/framework/backward_test.cc | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc
index 05ebf356ba..3b7cbcd989 100644
--- a/paddle/framework/backward_test.cc
+++ b/paddle/framework/backward_test.cc
@@ -58,6 +58,8 @@ class MulOpMaker : public OpProtoAndCheckerMaker {
     AddInput("X", "A");
     AddInput("Y", "B");
     AddOutput("Out", "Out");
+    AddAttr<int>("x_num_col_dims", "").SetDefault(1).EqualGreaterThan(1);
+    AddAttr<int>("y_num_col_dims", "").SetDefault(1).EqualGreaterThan(1);
     AddComment("Mul");
   }
 };
@@ -453,6 +455,9 @@ TEST(Backward, default_attribute) {
   AppendBackward(program, {});
 
   ASSERT_EQ(block->AllOps().size(), 2UL);
+  EXPECT_EQ(boost::get<int>(op->GetAttr("x_num_col_dims")), 1);
+  EXPECT_EQ(boost::get<int>(op->GetAttr("y_num_col_dims")), 1);
+
   f::OpDescBind *grad_op = block->AllOps()[1];
   ASSERT_EQ(grad_op->Type(), "mul_grad");
   EXPECT_EQ(boost::get<int>(grad_op->GetAttr("x_num_col_dims")), 1);

From d2217ffa39f30ec719e93cd2cb6b9449467bcbc1 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 16:03:39 -0700
Subject: [PATCH 53/82] Fix CI

---
 paddle/framework/block_desc.h   | 1 +
 paddle/framework/program_desc.h | 1 +
 2 files changed, 2 insertions(+)

diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h
index 2de270f60e..3437e89923 100644
--- a/paddle/framework/block_desc.h
+++ b/paddle/framework/block_desc.h
@@ -15,6 +15,7 @@ limitations under the License. */
 #pragma once
 
 #include <deque>
+#include <memory>
 #include <unordered_map>
 #include <vector>
 #include "paddle/framework/op_desc.h"
diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h
index d684b08d16..f29b1c54e7 100644
--- a/paddle/framework/program_desc.h
+++ b/paddle/framework/program_desc.h
@@ -14,6 +14,7 @@ limitations under the License. */
 
 #pragma once
 
+#include <memory>
 #include <vector>
 #include "paddle/framework/framework.pb.h"
 #include "paddle/platform/macros.h"

From 4767fb6719694ee400d3a6c9344aa21edde8bd36 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Mon, 9 Oct 2017 16:05:14 -0700
Subject: [PATCH 54/82] gan api modified

---
 doc/design/gan_api.md | 67 ++++++++++++++++++++++++++++++++++---------
 1 file changed, 53 insertions(+), 14 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 4fcff8b70a..77c867bac7 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -139,10 +139,10 @@ class DCGAN(object):
 - Define data readers as placeholders to hold the data;
 - Build generator and discriminators;
 - Define two training losses for discriminator and generator, respectively. 
+If we have execution dependency engine to back-trace all tensors, the module building our GAN model will be like this:
 ```python
 class DCGAN(object):
   def build_model(self):
-    # input data
     if self.y_dim:
         self.y = pd.data(pd.float32, [self.batch_size, self.y_dim])
     self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size])
@@ -151,17 +151,17 @@ class DCGAN(object):
     
     # step 1: generate images by generator, classify real/fake images with discriminator
     if self.y_dim: # if conditional GAN, includes label
-      self.G = self.generator(self.z, self.y)
-      self.D_t = self.discriminator(self.images)
-      # generated fake images
-      self.sampled = self.sampler(self.z, self.y)
-      self.D_f = self.discriminator(self.images)
+        self.G = self.generator(self.z, self.y)
+        self.D_t = self.discriminator(self.images)
+        # generated fake images
+        self.sampled = self.sampler(self.z, self.y)
+        self.D_f = self.discriminator(self.G)
     else: # original version of GAN
-      self.G = self.generator(self.z)
-      self.D_t = self.discriminator(self.images)
-      # generate fake images
-      self.sampled = self.sampler(self.z)
-      self.D_f = self.discriminator(self.images)
+        self.G = self.generator(self.z)
+        self.D_t = self.discriminator(self.images)
+        # generate fake images
+        self.sampled = self.sampler(self.z)
+        self.D_f = self.discriminator(self.images)
     
     # step 2: define the two losses
     self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size))
@@ -171,6 +171,44 @@ class DCGAN(object):
     self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_f, np.ones(self.batch_szie))
 ```
 
+If we do not have dependency engine but blocks, the module building our GAN model will be like this:
+```python
+class DCGAN(object):
+  def build_model(self, default_block):
+    # input data in the default block
+    if self.y_dim:
+        self.y = pd.data(pd.float32, [self.batch_size, self.y_dim])
+    self.images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size])
+    # self.faked_images = pd.data(pd.float32, [self.batch_size, self.im_size, self.im_size])
+    self.z = pd.data(tf.float32, [None, self.z_size])
+
+    # step 1: generate images by generator, classify real/fake images with discriminator
+    with pd.default_block().g_block():
+      if self.y_dim: # if conditional GAN, includes label
+        self.G = self.generator(self.z, self.y)
+        self.D_g = self.discriminator(self.G, self.y)
+      else: # original version of GAN
+        self.G = self.generator(self.z)
+        self.D_g = self.discriminator(self.G, self.y)
+      self.g_loss = pd.reduce_mean(pd.cross_entropy(self.D_g, np.ones(self.batch_szie))
+    
+    with pd.default_block().d_block():
+      if self.y_dim: # if conditional GAN, includes label
+        self.D_t = self.discriminator(self.images, self.y)
+        self.D_f = self.discriminator(self.G, self.y)
+      else: # original version of GAN
+        self.D_t = self.discriminator(self.images)
+        self.D_f = self.discriminator(self.G)
+
+      # step 2: define the two losses
+      self.d_loss_real = pd.reduce_mean(pd.cross_entropy(self.D_t, np.ones(self.batch_size))
+      self.d_loss_fake = pd.reduce_mean(pd.cross_entropy(self.D_f, np.zeros(self.batch_size))
+      self.d_loss = self.d_loss_real + self.d_loss_fake
+```
+Some small confusion and problems with this design:
+- D\_g and D\_f are actually the same thing, but has to be written twice;
+- Requires ability to create a block anytime, rather than in if-else or rnn only;
+
 ## Main function for the demo:
 Generally, the user of GAN just need to the following things:
 - Define an object as DCGAN class;
@@ -183,9 +221,10 @@ import numpy as np
 import logging
 
 if __name__ == "__main__":
-    # dcgan
-    dcgan = DCGAN()
-    dcgan.build_model()
+    # dcgan class in the default graph/block
+    with pd.block() as def_block:
+      dcgan = DCGAN()
+      dcgan.build_model(def_block)
 
     # load mnist data
     data_X, data_y = self.load_mnist()

From 35a5b9b99756188f2782ed19b4eaca57cb44ceea Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Mon, 9 Oct 2017 16:22:49 -0700
Subject: [PATCH 55/82] gan api

---
 doc/design/gan_api.md | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 77c867bac7..ed7622920b 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -222,6 +222,10 @@ import logging
 
 if __name__ == "__main__":
     # dcgan class in the default graph/block
+    # if we use dependency engine as tensorflow
+    # the codes, will be slightly different like:
+    # dcgan = DCGAN()
+    # dcgan.build_model()
     with pd.block() as def_block:
       dcgan = DCGAN()
       dcgan.build_model(def_block)
@@ -230,8 +234,12 @@ if __name__ == "__main__":
     data_X, data_y = self.load_mnist()
     
     # Two subgraphs required!!!
-    d_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.d_loss, dcgan.theta_D)
-    g_optim = pd.train.Adam(lr = .001, beta= .1).minimize(dcgan.g_loss, dcgan.theta_G)
+    with pd.block().d_block():
+      d_optim = pd.train.Adam(lr = .001, beta= .1)
+      d_step = d_optim.minimize(dcgan.d_loss, dcgan.theta_D)
+    with pd.block.g_block():
+      g_optim = pd.train.Adam(lr = .001, beta= .1)
+      g_step = pd.minimize(dcgan.g_loss, dcgan.theta_G)
 
     # executor
     sess = pd.executor()
@@ -246,11 +254,11 @@ if __name__ == "__main__":
         batch_z = np.random.uniform(-1., 1., [batch_size, z_dim])
 
         if batch_id % 2 == 0:
-          sess.run(d_optim, 
+          sess.run(d_step, 
                    feed_dict = {dcgan.images: batch_im,
                                 dcgan.y: batch_label,
                                 dcgan.z: batch_z})
         else:
-          sess.run(g_optim,
+          sess.run(g_step,
                    feed_dict = {dcgan.z: batch_z})
 ```

From 67c77b57fc8ad8eff7e9e46e842aa54b1e39047a Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Mon, 9 Oct 2017 16:47:05 -0700
Subject: [PATCH 56/82] gan api

---
 doc/design/gan_api.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index ed7622920b..689f359564 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -206,7 +206,7 @@ class DCGAN(object):
       self.d_loss = self.d_loss_real + self.d_loss_fake
 ```
 Some small confusion and problems with this design:
-- D\_g and D\_f are actually the same thing, but has to be written twice;
+- D\_g and D\_f are actually the same thing, but has to be written twice; i.e., if we want to run two sub-graphs conceptually, the same codes have to be written twice if they are shared by the graph.
 - Requires ability to create a block anytime, rather than in if-else or rnn only;
 
 ## Main function for the demo:

From 63912dcc198729b5e29e9080da6d76e649fd9394 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Mon, 9 Oct 2017 17:09:04 -0700
Subject: [PATCH 57/82] gan design

---
 doc/design/gan_api.md | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 689f359564..1a7d0df116 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -262,3 +262,7 @@ if __name__ == "__main__":
           sess.run(g_step,
                    feed_dict = {dcgan.z: batch_z})
 ```
+
+# More thinking about dependency engine v.s. block design:
+- What if we just want to run an intermediate result? Do we need to run the whole block/graph?
+- Should we call eval() to get the fake images in the first stage? And then train the discriminator in the second stage?

From 6efacc14d857bd117d5918bf02afc9cca702bd78 Mon Sep 17 00:00:00 2001
From: Abhinav Arora <aroraabhinav@baidu.com>
Date: Mon, 9 Oct 2017 17:24:21 -0700
Subject: [PATCH 58/82] Implementing the fill constant op for the executor

---
 paddle/operators/fill_constant_op.cc          | 68 +++++++++++++++++++
 paddle/operators/fill_constant_op.cu          | 22 ++++++
 paddle/operators/fill_constant_op.h           | 37 ++++++++++
 .../framework/tests/test_fill_constant_op.py  | 35 ++++++++++
 4 files changed, 162 insertions(+)
 create mode 100644 paddle/operators/fill_constant_op.cc
 create mode 100644 paddle/operators/fill_constant_op.cu
 create mode 100644 paddle/operators/fill_constant_op.h
 create mode 100644 python/paddle/v2/framework/tests/test_fill_constant_op.py

diff --git a/paddle/operators/fill_constant_op.cc b/paddle/operators/fill_constant_op.cc
new file mode 100644
index 0000000000..65d03d5fa4
--- /dev/null
+++ b/paddle/operators/fill_constant_op.cc
@@ -0,0 +1,68 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#include "paddle/operators/fill_constant_op.h"
+
+namespace paddle {
+namespace operators {
+
+class FillConstantOp : public framework::OperatorWithKernel {
+ public:
+  using framework::OperatorWithKernel::OperatorWithKernel;
+
+ protected:
+  void InferShape(framework::InferShapeContext *ctx) const override {
+    PADDLE_ENFORCE(ctx->HasOutput("Out"),
+                   "Output(Out) of FillConstantOp should not be null.");
+    auto &shape = ctx->Attrs().Get<std::vector<int>>("shape");
+    std::vector<int64_t> shape_int64(shape.size(), 0);
+    std::transform(shape.begin(), shape.end(), shape_int64.begin(),
+                   [](int a) { return static_cast<int64_t>(a); });
+    auto dims = framework::make_ddim(shape_int64);
+    ctx->SetOutputDim("Out", dims);
+  }
+
+  framework::DataType IndicateDataType(
+      const framework::ExecutionContext &ctx) const override {
+    return static_cast<framework::DataType>(ctx.Attr<int>("dataType"));
+  }
+};
+
+class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  FillConstantOpMaker(framework::OpProto *proto,
+                      framework::OpAttrChecker *op_checker)
+      : framework::OpProtoAndCheckerMaker(proto, op_checker) {
+    AddAttr<int>("dataType",
+                 "(int, default 5 (FP32)) "
+                 "Output data type")
+        .SetDefault(framework::DataType::FP32);
+    AddAttr<std::vector<int>>("shape", "(vector<int>) The shape of the output");
+    AddAttr<float>("value", "(float, default 0) The value to be filled")
+        .SetDefault(0.0f);
+    AddOutput("Out",
+              "(Tensor) Tensor of specified shape will be filled "
+              "with the specified value");
+    AddComment(R"DOC(Fill up a variable with specified constant value.)DOC");
+  }
+};
+}  // namespace operators
+}  // namespace paddle
+
+namespace ops = paddle::operators;
+REGISTER_OP_WITHOUT_GRADIENT(fill_constant, ops::FillConstantOp,
+                             ops::FillConstantOpMaker);
+REGISTER_OP_CPU_KERNEL(
+    fill_constant,
+    ops::FillConstantOpKernel<paddle::platform::CPUPlace, float>);
diff --git a/paddle/operators/fill_constant_op.cu b/paddle/operators/fill_constant_op.cu
new file mode 100644
index 0000000000..eef8fcbd7f
--- /dev/null
+++ b/paddle/operators/fill_constant_op.cu
@@ -0,0 +1,22 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License. */
+
+#define EIGEN_USE_GPU
+#include "paddle/framework/op_registry.h"
+#include "paddle/operators/fill_constant_op.h"
+
+namespace ops = paddle::operators;
+REGISTER_OP_GPU_KERNEL(
+    fill_constant,
+    ops::FillConstantOpKernel<paddle::platform::GPUPlace, float>);
diff --git a/paddle/operators/fill_constant_op.h b/paddle/operators/fill_constant_op.h
new file mode 100644
index 0000000000..53b8b548ec
--- /dev/null
+++ b/paddle/operators/fill_constant_op.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. */
+
+#pragma once
+#include "paddle/framework/eigen.h"
+#include "paddle/framework/op_registry.h"
+
+namespace paddle {
+namespace operators {
+
+template <typename Place, typename T>
+class FillConstantOpKernel : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext& ctx) const override {
+    auto* out = ctx.Output<framework::Tensor>("Out");
+    out->mutable_data<T>(ctx.GetPlace());
+    auto value = ctx.Attr<T>("value");
+
+    auto out_eigen = framework::EigenVector<T>::Flatten(*out);
+    auto place = ctx.GetEigenDevice<Place>();
+    out_eigen.device(place) = out_eigen.constant(static_cast<T>(value));
+  }
+};
+
+}  // namespace operators
+}  // namespace paddle
diff --git a/python/paddle/v2/framework/tests/test_fill_constant_op.py b/python/paddle/v2/framework/tests/test_fill_constant_op.py
new file mode 100644
index 0000000000..dff7b615aa
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_fill_constant_op.py
@@ -0,0 +1,35 @@
+import unittest
+import numpy as np
+from op_test import OpTest
+
+
+class TestFillConstantOp1(OpTest):
+    def setUp(self):
+        '''Test fill_constant op with specified value
+        '''
+        self.op_type = "fill_constant"
+
+        self.inputs = {}
+        self.attrs = {'shape': [123, 92], 'value': 3.8}
+        self.outputs = {'Out': np.full((123, 92), 3.8)}
+
+    def test_check_output(self):
+        self.check_output()
+
+
+class TestFillConstantOp2(OpTest):
+    def setUp(self):
+        '''Test fill_constant op with default value
+        '''
+        self.op_type = "fill_constant"
+
+        self.inputs = {}
+        self.attrs = {'shape': [123, 92]}
+        self.outputs = {'Out': np.full((123, 92), 0.0)}
+
+    def test_check_output(self):
+        self.check_output()
+
+
+if __name__ == "__main__":
+    unittest.main()

From c876cacc7b7936f356b0c712dff8f32534eab2ae Mon Sep 17 00:00:00 2001
From: Luo Tao <luotao02@baidu.com>
Date: Tue, 10 Oct 2017 10:24:18 +0800
Subject: [PATCH 59/82] =?UTF-8?q?fix=20compiler=20error:=20=E2=80=98unique?=
 =?UTF-8?q?=5Fptr=E2=80=99=20is=20not=20a=20member=20of=20=E2=80=98std?=
 =?UTF-8?q?=E2=80=99?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 paddle/framework/type_defs.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h
index a5b9472213..6f65a942ba 100644
--- a/paddle/framework/type_defs.h
+++ b/paddle/framework/type_defs.h
@@ -15,6 +15,7 @@
 #pragma once
 #include <functional>
 #include <map>
+#include <memory>
 #include "paddle/platform/variant.h"
 
 namespace paddle {

From d350cdbc973a81d3a1e1b3cd90aaaddf2bdfac2f Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 19:59:38 -0700
Subject: [PATCH 60/82] Polish CompileTime InferShape

1. InferShape should be a method for `OpDescBind` not `Operator`, since
  `OpDescBind` is a compile-time concept.
2. Pre-create operators and store them into a map. Make InferShape
   faster
---
 paddle/framework/op_desc.cc                   | 35 +++++++++++++++++++
 paddle/framework/op_desc.h                    |  2 ++
 paddle/pybind/protobuf.cc                     |  3 +-
 paddle/pybind/pybind.cc                       | 15 --------
 .../v2/framework/tests/test_infer_shape.py    |  6 ++--
 5 files changed, 42 insertions(+), 19 deletions(-)

diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc
index 02aa74a842..6ce453cc99 100644
--- a/paddle/framework/op_desc.cc
+++ b/paddle/framework/op_desc.cc
@@ -13,7 +13,10 @@ See the License for the specific language governing permissions and
 limitations under the License. */
 
 #include "paddle/framework/op_desc.h"
+#include <functional>
+#include <unordered_map>
 #include "paddle/framework/block_desc.h"
+#include "paddle/framework/operator.h"
 
 namespace paddle {
 namespace framework {
@@ -184,5 +187,37 @@ void OpDescBind::Sync() {
     need_update_ = false;
   }
 }
+
+using InferShapeFuncMap =
+    std::unordered_map<std::string /*op_type*/,
+                       std::function<void(InferShapeContext *)>>;
+
+static InferShapeFuncMap &InferShapeFuncs() {
+  static InferShapeFuncMap *g_map = nullptr;
+  if (g_map == nullptr) {
+    g_map = new InferShapeFuncMap();
+    auto &info_map = OpInfoMap::Instance();
+    // all registered kernels
+    for (auto &pair : OperatorWithKernel::AllOpKernels()) {
+      auto &info = info_map.Get(pair.first);
+      auto op =
+          static_cast<OperatorWithKernel *>(info.Creator()("", {}, {}, {}));
+      g_map->insert(
+          {pair.first, [op](InferShapeContext *ctx) { op->InferShape(ctx); }});
+    }
+  }
+  return *g_map;
+}
+
+void OpDescBind::InferShape(const BlockDescBind &block) const {
+  auto &funcs = InferShapeFuncs();
+  auto it = funcs.find(this->Type());
+  if (it == funcs.end()) {
+    PADDLE_THROW("Operator %s has not been registered", this->Type());
+  }
+  CompileTimeInferShapeContext ctx(*this, block);
+  it->second(&ctx);
+}
+
 }  // namespace framework
 }  // namespace paddle
diff --git a/paddle/framework/op_desc.h b/paddle/framework/op_desc.h
index d0c314771c..81c4225041 100644
--- a/paddle/framework/op_desc.h
+++ b/paddle/framework/op_desc.h
@@ -100,6 +100,8 @@ class OpDescBind {
     return &this->attrs_;
   }
 
+  void InferShape(const BlockDescBind &block) const;
+
  private:
   template <typename MapType>
   static std::vector<typename MapType::key_type> MapKeys(const MapType &map) {
diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc
index 47bd7bc3bb..6333cc332e 100644
--- a/paddle/pybind/protobuf.cc
+++ b/paddle/pybind/protobuf.cc
@@ -196,7 +196,8 @@ void BindOpDesc(py::module &m) {
       .def("set_attr", &OpDescBind::SetAttr)
       .def("attr", &OpDescBind::GetAttr)
       .def("set_block_attr", &OpDescBind::SetBlockAttr)
-      .def("get_block_attr", &OpDescBind::GetBlockAttr);
+      .def("get_block_attr", &OpDescBind::GetBlockAttr)
+      .def("infer_shape", &OpDescBind::InferShape);
 }
 
 }  // namespace pybind
diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc
index 356c4986e2..0f6e3101e2 100644
--- a/paddle/pybind/pybind.cc
+++ b/paddle/pybind/pybind.cc
@@ -231,21 +231,6 @@ All parameter, weight, gradient are variables in Paddle.
                                    desc.InitializationErrorString());
                     return OpRegistry::CreateOp(desc);
                   })
-      .def_static("infer_shape",
-                  [](OpDescBind &op_desc, BlockDescBind &block) {
-                    auto op = OpRegistry::CreateOp(*op_desc.Proto());
-                    auto *op_with_kernel =
-                        dynamic_cast<OperatorWithKernel *>(op.get());
-                    if (op_with_kernel != nullptr) {
-                      auto ctx = CompileTimeInferShapeContext(op_desc, block);
-                      op_with_kernel->InferShape(&ctx);
-                    } else {
-                      PADDLE_THROW(
-                          "OP(%s) is not type of OperatorWithKernel, "
-                          "should not call this function",
-                          op_desc.Type());
-                    }
-                  })
       .def("backward",
            [](const OperatorBase &forwardOp,
               const std::unordered_set<std::string> &no_grad_vars) {
diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py
index b38ec9c037..99562890fd 100644
--- a/python/paddle/v2/framework/tests/test_infer_shape.py
+++ b/python/paddle/v2/framework/tests/test_infer_shape.py
@@ -1,6 +1,6 @@
 import unittest
+
 import paddle.v2.framework.core as core
-from paddle.v2.framework.op import Operator
 
 
 class TestInferShape(unittest.TestCase):
@@ -26,7 +26,7 @@ class TestInferShape(unittest.TestCase):
         sum_op_desc.set_input("X", ["x1", "x2"])
         sum_op_desc.set_output("Out", ["out"])
 
-        core.Operator.infer_shape(sum_op_desc, block)
+        sum_op_desc.infer_shape(block)
         self.assertEqual(out.shape(), shape)
 
     def test_mul_op(self):
@@ -55,7 +55,7 @@ class TestInferShape(unittest.TestCase):
         mul_op_desc.set_attr("x_num_col_dims", 1)
         mul_op_desc.set_attr("y_num_col_dims", 1)
 
-        core.Operator.infer_shape(mul_op_desc, block)
+        mul_op_desc.infer_shape(block)
         self.assertEqual(out.shape(), [x_shape[0], y_shape[1]])
 
 

From 6c6474cbd8514011b1c63d3439d49bd4700e46c8 Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Tue, 10 Oct 2017 10:32:19 +0800
Subject: [PATCH 61/82] follow coments

---
 paddle/operators/CMakeLists.txt        | 15 +++----
 paddle/operators/math/pooling.h        | 23 ++++++-----
 paddle/operators/pool_with_index_op.cc | 57 +++++++++++++++-----------
 3 files changed, 54 insertions(+), 41 deletions(-)

diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt
index 39af318ca5..31ae4b2cc1 100644
--- a/paddle/operators/CMakeLists.txt
+++ b/paddle/operators/CMakeLists.txt
@@ -55,12 +55,20 @@ function(op_library TARGET)
         set(pybind_flag 1)
     endif()
 
+    # pool_op contains several operators
     if ("${TARGET}" STREQUAL "pool_op")
         set(pybind_flag 1)
         # It's enough to just adding one operator to pybind
         file(APPEND ${pybind_file} "USE_OP(pool2d);\n")
     endif()
 
+    # pool_with_index_op contains several operators
+    if ("${TARGET}" STREQUAL "pool_with_index_op")
+        set(pybind_flag 1)
+        # It's enough to just adding one operator to pybind
+        file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n")
+    endif()
+
     # activation_op contains several operators
     if ("${TARGET}" STREQUAL "activation_op")
         set(pybind_flag 1)
@@ -75,13 +83,6 @@ function(op_library TARGET)
         file(APPEND ${pybind_file} "USE_OP(reduce_sum);\n")
     endif()
 
-    # pool_with_index_op contains several operators
-    if ("${TARGET}" STREQUAL "pool_with_index_op")
-        set(pybind_flag 1)
-        # It's enough to just adding one operator to pybind
-        file(APPEND ${pybind_file} "USE_OP(max_pool2d_with_index);\n")
-    endif()
-
     # pybind USE_NO_KERNEL_OP
     file(READ ${TARGET}.cc TARGET_CONTENT)
     string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}")
diff --git a/paddle/operators/math/pooling.h b/paddle/operators/math/pooling.h
index f15ddca69a..c50c57b5c5 100644
--- a/paddle/operators/math/pooling.h
+++ b/paddle/operators/math/pooling.h
@@ -24,15 +24,16 @@ namespace math {
 
 #define FLT_MAX \
   __FLT_MAX__  // It might need to be placed in another file, but I'm still
-               // wondering where to put it
+               // wondering where to put it.
 
 /*
  * \brief Extracting simple operations from pooling.
- *        Both MaxPool and AvgPool need initial, compute and finalize operation.
+ *        Both MaxPool and AvgPool need "initial", "compute" and "finalize"
+ * operation.
  *        MaxPool initializes temp variable to the negative maximum to find the
  * maximum value in the pooling field.
  *        AvgPool initializes temp variable to the zero to accumulate all values
- * in pool pooling, and takes the average.
+ * in pool pooling, and finally takes the average.
  *        MaxPoolGrad and AvgPoolGrad are gradient operations respectively.
  */
 template <class T>
@@ -72,17 +73,17 @@ class AvgPoolGrad {
 /*
  * \brief Getting pooling results, and calculating gradient.
  *
- * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in
- * NCDHW format.
+ * In pool2d, all tensors are in NCHW format. Where N is batch size, C is the
+ * number of channels, H and W is the height and width of feature.
+ * In pool3d, all tensors are in NCDHW format. Where N is batch size, C is the
+ * number of channels, D, H and W is the depth, height and width of feature.
  *
  * In max pooling, it is possible that the pooling region has multiple maximum
- * elements.
- * In this case, we should compute the gradient of the first maximum element.
+ * elements. In this case, we should compute the gradient of the first maximum
+ * element.
  * This is different from average pooling. So we rewrite the max_pool_grad:
  * MaxPool2dGradFunctor, MaxPool3dGradFunctor.
- *
  */
-
 template <typename Place, typename PoolProcess, typename T>
 class Pool2dFunctor {
  public:
@@ -146,10 +147,9 @@ class MaxPool3dGradFunctor {
 /*
  * \brief Getting max pooling results and corresponding max index, and
  * calculating gradient.
- * In sub-sampling-pooling, it is necessary to know max element index.
+ * In up-sampling-pooling, it is necessary to know max element index.
  * In pool2d, all tensors are in NCHW format. In pool3d, all tensors are in
  * NCDHW format.
- *
  */
 template <typename Place, typename T>
 class MaxPool2dWithIndexFunctor {
@@ -188,6 +188,7 @@ class MaxPool3dWithIndexGradFunctor {
                   const framework::Tensor& mask, std::vector<int>& ksize,
                   std::vector<int>& strides, std::vector<int>& paddings);
 };
+
 }  // namespace math
 }  // namespace operators
 }  // namespace paddle
diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc
index 2e6a5f2555..ab933a3400 100644
--- a/paddle/operators/pool_with_index_op.cc
+++ b/paddle/operators/pool_with_index_op.cc
@@ -34,7 +34,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
                    "Out(Output) of Pooling should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Mask"),
-                   "Out(Output) of Pooling should not be null.");
+                   "Mask(Output) of Pooling should not be null.");
 
     auto in_x_dims = ctx->GetInputDim("X");
 
@@ -52,13 +52,11 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
     }
 
     PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U,
-                   "Pooling intput size and pooling size should be consistent");
-    PADDLE_ENFORCE(ksize.size() == 2 || ksize.size() == 3,
-                   "Pooling size size should be 2 elements. or 3 elements.");
+                   "Intput size and pooling size should be consistent.");
     PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
-                      "strides size and pooling size should be the same.");
+                      "Strides size and pooling size should be the same.");
     PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(),
-                      "paddings size and pooling size should be the same.");
+                      "Paddings size and pooling size should be the same.");
 
     std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
     for (size_t i = 0; i < ksize.size(); ++i) {
@@ -76,11 +74,9 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
 
  protected:
   void InferShape(framework::InferShapeContextBase *ctx) const override {
-    PADDLE_ENFORCE(ctx->HasInput("X"),
-                   "X(Input) of Pooling should not be null.");
-    PADDLE_ENFORCE(
-        ctx->HasOutput(framework::GradVarName("X")),
-        "X@GRAD(Input@GRAD) of MaxPoolWithIndexOpGrad should not be null.");
+    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
+    PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
+                   "Input(X@GRAD) should not be null.");
     ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
   }
 };
@@ -110,9 +106,10 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
 
     AddAttr<std::vector<int>>(
         "ksize",
-        "Pooling size(height, width) of pooling operator."
+        "The pooling size(height, width) of pooling operator."
         "If globalPooling = true, ksize is ignored and need not be "
-        "specified.");  // TODO(Add checker)
+        "specified.");  // TODO(Chengduo): Add checker. (Currently,
+                        // TypedAttrChecker don't support vector type.)
     AddAttr<bool>(
         "globalPooling",
         "Whether to use the globalPooling."
@@ -123,15 +120,21 @@ class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
     AddAttr<std::vector<int>>("strides",
                               "Strides(height, width) of pooling operator."
                               "Default {1,1}.")
-        .SetDefault({1, 1});  // TODO(Add checker)
+        .SetDefault({1, 1});  // TODO(Chengduo): Add checker. (Currently,
+                              // TypedAttrChecker don't support vector type.)
     AddAttr<std::vector<int>>("paddings",
                               "Paddings(height, width) of pooling operator."
                               "Default {0,0}.")
-        .SetDefault({0, 0});  // TODO(Add checker)
+        .SetDefault({0, 0});  // TODO(Chengduo): Add checker. (Currently,
+                              // TypedAttrChecker don't support vector type.)
 
     AddComment(R"DOC(
-The maxPooling2d with index operation calculates the output and the mask based on
-the input and ksize, strides, paddings parameters.
+The maxPooling2d with index operation calculates the output and the mask
+based on the input and ksize, strides, paddings parameters. Input(X) and
+output(Out, Mask) are in NCHW format. Where N is batch size, C is the
+number of channels, H and W is the height and width of feature.
+Parameters(ksize, strides, paddings) are two elements.
+These two elements represent height and width, respectively.
 )DOC");
   }
 };
@@ -162,9 +165,10 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
 
     AddAttr<std::vector<int>>(
         "ksize",
-        "Pooling size(depth, height, width) of pooling operator."
+        "The pooling size(depth, height, width) of pooling operator."
         "If globalPooling = true, ksize is ignored and need not be "
-        "specified.");  // TODO(Add checker)
+        "specified.");  // TODO(Chengduo): Add checker. (Currently,
+                        // TypedAttrChecker don't support vector type.)
     AddAttr<bool>(
         "globalPooling",
         "Whether to use the globalPooling."
@@ -176,19 +180,26 @@ class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
         "strides",
         "Strides(depth, height, width) of pooling operator."
         "Default {1,1,1}.")
-        .SetDefault({1, 1, 1});  // TODO(Add checker)
+        .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
+                                 // TypedAttrChecker don't support vector type.)
     AddAttr<std::vector<int>>(
         "paddings",
         "Paddings(depth, height, width) of pooling operator."
         "Default {0,0,0}.")
-        .SetDefault({0, 0, 0});  // TODO(Add checker)
+        .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
+                                 // TypedAttrChecker don't support vector type.)
 
     AddComment(R"DOC(
-The maxpooling3d with index operation calculates the output and the mask based on
-the input and ksize, strides, paddings parameters.
+The maxpooling3d with index operation calculates the output and the mask
+based on the input and ksize, strides, paddings parameters.
+Input(X) and output(Out, Mask) are in NCDHW format. Where N is batch
+size, C is the number of channels, D, H and W is the depth, height and
+width of feature. Parameters(ksize, strides, paddings) are three elements.
+These three elements represent depth, height and width, respectively.
 )DOC");
   }
 };
+
 }  // namespace operators
 }  // namespace paddle
 

From 0e1f21a57050df474a4bbf47f538ddf1b4ef4f61 Mon Sep 17 00:00:00 2001
From: fengjiayi <fengjiayi@baidu.com>
Date: Mon, 9 Oct 2017 20:43:29 -0700
Subject: [PATCH 62/82] Fix bug

---
 paddle/framework/op_desc.cc | 1 +
 1 file changed, 1 insertion(+)

diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc
index 02aa74a842..c2e796b7c1 100644
--- a/paddle/framework/op_desc.cc
+++ b/paddle/framework/op_desc.cc
@@ -25,6 +25,7 @@ OpDescBind::OpDescBind(const std::string &type, const VariableNameMap &inputs,
   inputs_ = inputs;
   outputs_ = outputs;
   attrs_ = attrs;
+  need_update_ = true;
 }
 
 OpDesc *OpDescBind::Proto() {

From cb2ef7d97de9b011f2dc9de060e0a641794ccbe1 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 20:46:44 -0700
Subject: [PATCH 63/82] Add comment

---
 paddle/framework/op_desc.cc | 1 +
 1 file changed, 1 insertion(+)

diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc
index 6ce453cc99..c4dcb2210d 100644
--- a/paddle/framework/op_desc.cc
+++ b/paddle/framework/op_desc.cc
@@ -200,6 +200,7 @@ static InferShapeFuncMap &InferShapeFuncs() {
     // all registered kernels
     for (auto &pair : OperatorWithKernel::AllOpKernels()) {
       auto &info = info_map.Get(pair.first);
+      // use empty type here to avoid runtime checks.
       auto op =
           static_cast<OperatorWithKernel *>(info.Creator()("", {}, {}, {}));
       g_map->insert(

From 32cb74be3ebea9c9c59602576f45086934308789 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Mon, 9 Oct 2017 21:06:01 -0700
Subject: [PATCH 64/82] Removed unreached code

---
 paddle/framework/data_type.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/paddle/framework/data_type.h b/paddle/framework/data_type.h
index 55e3931f87..649899d425 100644
--- a/paddle/framework/data_type.h
+++ b/paddle/framework/data_type.h
@@ -28,7 +28,6 @@ inline DataType ToDataType(std::type_index type) {
     return DataType::INT32;
   } else {
     PADDLE_THROW("Not supported");
-    return static_cast<DataType>(-1);
   }
 }
 

From e21e5646a574b9e2fa299bacb3a8ee85472e84b5 Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Tue, 10 Oct 2017 13:55:27 +0800
Subject: [PATCH 65/82] fix atomicAdd -> CudaAtomicAdd

---
 paddle/operators/math/pooling.cu | 12 ++++--------
 1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu
index 06263737a9..4d50121de4 100644
--- a/paddle/operators/math/pooling.cu
+++ b/paddle/operators/math/pooling.cu
@@ -144,7 +144,7 @@ __global__ void KernelMaxPool2DGrad(
 
     if (maxIndex != -1) {
       // atomic add
-      atomicAdd(input_grad + maxIndex, output_grad[index]);
+      platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]);
     }
   }
 }
@@ -278,9 +278,7 @@ class MaxPool2dGradFunctor<platform::GPUPlace, T> {
 };
 
 template class MaxPool2dGradFunctor<platform::GPUPlace, float>;
-// template class MaxPool2dGradFunctor<platform::GPUPlace, double>; // The
-// 64-bit floating-point version of atomicAdd() is only supported by devices of
-// compute capability 6.x and higher.
+template class MaxPool2dGradFunctor<platform::GPUPlace, double>;
 
 template class Pool2dFunctor<platform::GPUPlace,
                              paddle::operators::math::MaxPool<float>, float>;
@@ -453,7 +451,7 @@ __global__ void KernelMaxPool3DGrad(
     }
     if (maxIdx != -1) {
       // atomic add
-      atomicAdd(input_grad + maxIdx, output_grad[index]);
+      platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
     }
   }
 }
@@ -609,9 +607,7 @@ class MaxPool3dGradFunctor<platform::GPUPlace, T> {
 };
 
 template class MaxPool3dGradFunctor<platform::GPUPlace, float>;
-// template class MaxPool3dGradFunctor<platform::GPUPlace, double>;  // The
-// 64-bit floating-point version of atomicAdd() is only supported by devices of
-// compute capability 6.x and higher.
+template class MaxPool3dGradFunctor<platform::GPUPlace, double>;
 
 template class Pool3dFunctor<platform::GPUPlace,
                              paddle::operators::math::MaxPool<float>, float>;

From 871a3f6e76f57432d64b0410f49277a6e4f7d477 Mon Sep 17 00:00:00 2001
From: Luo Tao <luotao02@baidu.com>
Date: Tue, 10 Oct 2017 15:18:02 +0800
Subject: [PATCH 66/82] remove unused PADDLE_ONLY_CPU comment

---
 paddle/math/tests/test_GpuProfiler.cpp        | 2 +-
 paddle/memory/detail/buddy_allocator.cc       | 2 +-
 paddle/memory/detail/system_allocator.cc      | 2 +-
 paddle/memory/detail/system_allocator.h       | 2 +-
 paddle/memory/detail/system_allocator_test.cc | 2 +-
 paddle/memory/memcpy.cc                       | 2 +-
 paddle/memory/memcpy.h                        | 2 +-
 paddle/memory/memory.cc                       | 2 +-
 paddle/memory/memory_test.cc                  | 2 +-
 paddle/platform/device_context.cc             | 2 +-
 paddle/platform/enforce.h                     | 2 +-
 paddle/platform/gpu_info.h                    | 2 +-
 12 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/math/tests/test_GpuProfiler.cpp
index 9402bd3ec4..d9f146f0d1 100644
--- a/paddle/math/tests/test_GpuProfiler.cpp
+++ b/paddle/math/tests/test_GpuProfiler.cpp
@@ -162,4 +162,4 @@ int main(int argc, char** argv) {
   return RUN_ALL_TESTS();
 }
 
-#endif /* PADDLE_ONLY_CPU */
+#endif
diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc
index fdc5ed19dc..e212f7737a 100644
--- a/paddle/memory/detail/buddy_allocator.cc
+++ b/paddle/memory/detail/buddy_allocator.cc
@@ -182,7 +182,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() {
       max_chunk_size_ = platform::GpuMaxChunkSize();
     }
   }
-#endif  // PADDLE_ONLY_CPU
+#endif
 
   // Allocate a new maximum sized block
   size_t index = 0;
diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc
index 6c9a46dd09..33166d9ce2 100644
--- a/paddle/memory/detail/system_allocator.cc
+++ b/paddle/memory/detail/system_allocator.cc
@@ -134,7 +134,7 @@ void GPUAllocator::Free(void* p, size_t size, size_t index) {
 
 bool GPUAllocator::UseGpu() const { return true; }
 
-#endif  // PADDLE_ONLY_CPU
+#endif
 
 }  // namespace detail
 }  // namespace memory
diff --git a/paddle/memory/detail/system_allocator.h b/paddle/memory/detail/system_allocator.h
index ee9b012f91..552cab4f96 100644
--- a/paddle/memory/detail/system_allocator.h
+++ b/paddle/memory/detail/system_allocator.h
@@ -51,7 +51,7 @@ class GPUAllocator : public SystemAllocator {
   size_t gpu_alloc_size_ = 0;
   size_t fallback_alloc_size_ = 0;
 };
-#endif  // PADDLE_ONLY_CPU
+#endif
 
 }  // namespace detail
 }  // namespace memory
diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/memory/detail/system_allocator_test.cc
index cd563844e7..6a8558937b 100644
--- a/paddle/memory/detail/system_allocator_test.cc
+++ b/paddle/memory/detail/system_allocator_test.cc
@@ -62,4 +62,4 @@ TEST(GPUAllocator, Alloc) {
   TestAllocator(a, 2048);
   TestAllocator(a, 0);
 }
-#endif  // PADDLE_ONLY_CPU
+#endif
diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc
index 790420a8ab..1df88a6da9 100644
--- a/paddle/memory/memcpy.cc
+++ b/paddle/memory/memcpy.cc
@@ -89,7 +89,7 @@ void Copy<platform::GPUPlace, platform::GPUPlace>(platform::GPUPlace dst_place,
   platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToDevice);
 }
 
-#endif  // PADDLE_ONLY_CPU
+#endif
 
 }  // namespace memory
 }  // namespace paddle
diff --git a/paddle/memory/memcpy.h b/paddle/memory/memcpy.h
index 0bccee58c3..9b36182c2b 100644
--- a/paddle/memory/memcpy.h
+++ b/paddle/memory/memcpy.h
@@ -53,7 +53,7 @@ template <typename DstPlace, typename SrcPlace>
 void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num,
           cudaStream_t stream);
 
-#endif  // PADDLE_ONLY_CPU
+#endif
 
 }  // namespace memory
 }  // namespace paddle
diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc
index 30ce8a82e1..5087c02385 100644
--- a/paddle/memory/memory.cc
+++ b/paddle/memory/memory.cc
@@ -111,7 +111,7 @@ size_t Used<platform::GPUPlace>(platform::GPUPlace place) {
   return GetGPUBuddyAllocator(place.device)->Used();
 }
 
-#endif  // PADDLE_ONLY_CPU
+#endif
 
 }  // namespace memory
 }  // namespace paddle
diff --git a/paddle/memory/memory_test.cc b/paddle/memory/memory_test.cc
index 0d402038a0..2444931e26 100644
--- a/paddle/memory/memory_test.cc
+++ b/paddle/memory/memory_test.cc
@@ -135,4 +135,4 @@ TEST(BuddyAllocator, GPUMultAlloc) {
   }
 }
 
-#endif  // PADDLE_ONLY_CPU
+#endif
diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc
index a9b6b79903..36450e9268 100644
--- a/paddle/platform/device_context.cc
+++ b/paddle/platform/device_context.cc
@@ -136,7 +136,7 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; }
 
 cudaStream_t CUDADeviceContext::stream() const { return stream_; }
 
-#endif  // PADDLE_ONLY_CPU
+#endif
 
 }  // namespace platform
 }  // namespace paddle
diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h
index 15d8446cd8..cd906c3fa9 100644
--- a/paddle/platform/enforce.h
+++ b/paddle/platform/enforce.h
@@ -41,7 +41,7 @@ limitations under the License. */
 #include <thrust/system/cuda/error.h>
 #include <thrust/system_error.h>
 
-#endif  // PADDLE_ONLY_CPU
+#endif
 
 namespace paddle {
 namespace platform {
diff --git a/paddle/platform/gpu_info.h b/paddle/platform/gpu_info.h
index fb33db07bd..37665b97d7 100644
--- a/paddle/platform/gpu_info.h
+++ b/paddle/platform/gpu_info.h
@@ -63,4 +63,4 @@ void GpuMemcpyPeer(void *dst, int dst_device, const void *src, int src_device,
 }  // namespace platform
 }  // namespace paddle
 
-#endif  // PADDLE_ONLY_CPU
+#endif

From 36da82550af759fcfcaec571921851dd04bc4a3b Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Tue, 10 Oct 2017 16:28:24 +0800
Subject: [PATCH 67/82] Add code comments

---
 paddle/operators/math/pooling.cc | 54 ++++++++++++++++++++++++++++++--
 paddle/operators/math/pooling.cu | 50 +++++++++++++++++++++++++++++
 2 files changed, 102 insertions(+), 2 deletions(-)

diff --git a/paddle/operators/math/pooling.cc b/paddle/operators/math/pooling.cc
index 5accde8b07..50cfb88bb5 100644
--- a/paddle/operators/math/pooling.cc
+++ b/paddle/operators/math/pooling.cc
@@ -18,6 +18,11 @@ namespace paddle {
 namespace operators {
 namespace math {
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename PoolProcess, typename T>
 class Pool2dFunctor<platform::CPUPlace, PoolProcess, T> {
  public:
@@ -73,6 +78,11 @@ class Pool2dFunctor<platform::CPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+* All tensors are in NCHW format.
+* Ksize, strides, paddings are two elements. These two elements represent height
+* and width, respectively.
+*/
 template <typename PoolProcess, class T>
 class Pool2dGradFunctor<platform::CPUPlace, PoolProcess, T> {
  public:
@@ -135,6 +145,11 @@ class Pool2dGradFunctor<platform::CPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <class T>
 class MaxPool2dGradFunctor<platform::CPUPlace, T> {
  public:
@@ -197,7 +212,7 @@ class MaxPool2dGradFunctor<platform::CPUPlace, T> {
 };
 
 template class MaxPool2dGradFunctor<platform::CPUPlace, float>;
-// template class MaxPool2dGradFunctor<platform::CPUPlace, double>;
+template class MaxPool2dGradFunctor<platform::CPUPlace, double>;
 
 template class Pool2dFunctor<platform::CPUPlace,
                              paddle::operators::math::MaxPool<float>, float>;
@@ -216,6 +231,11 @@ template class Pool2dGradFunctor<
 template class Pool2dGradFunctor<
     platform::CPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename PoolProcess, class T>
 class Pool3dFunctor<platform::CPUPlace, PoolProcess, T> {
  public:
@@ -286,6 +306,11 @@ class Pool3dFunctor<platform::CPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename PoolProcess, class T>
 class Pool3dGradFunctor<platform::CPUPlace, PoolProcess, T> {
  public:
@@ -364,6 +389,11 @@ class Pool3dGradFunctor<platform::CPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <class T>
 class MaxPool3dGradFunctor<platform::CPUPlace, T> {
  public:
@@ -440,7 +470,7 @@ class MaxPool3dGradFunctor<platform::CPUPlace, T> {
 };
 
 template class MaxPool3dGradFunctor<platform::CPUPlace, float>;
-// template class MaxPool3dGradFunctor<platform::CPUPlace, double>;
+template class MaxPool3dGradFunctor<platform::CPUPlace, double>;
 
 template class Pool3dFunctor<platform::CPUPlace,
                              paddle::operators::math::MaxPool<float>, float>;
@@ -459,6 +489,11 @@ template class Pool3dGradFunctor<
 template class Pool3dGradFunctor<
     platform::CPUPlace, paddle::operators::math::AvgPoolGrad<double>, double>;
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename T>
 class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
  public:
@@ -519,6 +554,11 @@ class MaxPool2dWithIndexFunctor<platform::CPUPlace, T> {
   }
 };
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename T>
 class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, T> {
  public:
@@ -563,6 +603,11 @@ template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, float>;
 template class MaxPool2dWithIndexFunctor<platform::CPUPlace, double>;
 template class MaxPool2dWithIndexGradFunctor<platform::CPUPlace, double>;
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename T>
 class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
  public:
@@ -637,6 +682,11 @@ class MaxPool3dWithIndexFunctor<platform::CPUPlace, T> {
   }
 };
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename T>
 class MaxPool3dWithIndexGradFunctor<platform::CPUPlace, T> {
  public:
diff --git a/paddle/operators/math/pooling.cu b/paddle/operators/math/pooling.cu
index 4d50121de4..736327f4b7 100644
--- a/paddle/operators/math/pooling.cu
+++ b/paddle/operators/math/pooling.cu
@@ -149,6 +149,11 @@ __global__ void KernelMaxPool2DGrad(
   }
 }
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename PoolProcess, typename T>
 class Pool2dFunctor<platform::GPUPlace, PoolProcess, T> {
  public:
@@ -190,6 +195,11 @@ class Pool2dFunctor<platform::GPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename PoolProcess, typename T>
 class Pool2dGradFunctor<platform::GPUPlace, PoolProcess, T> {
  public:
@@ -234,6 +244,11 @@ class Pool2dGradFunctor<platform::GPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename T>
 class MaxPool2dGradFunctor<platform::GPUPlace, T> {
  public:
@@ -456,6 +471,11 @@ __global__ void KernelMaxPool3DGrad(
   }
 }
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename PoolProcess, class T>
 class Pool3dFunctor<platform::GPUPlace, PoolProcess, T> {
  public:
@@ -504,6 +524,11 @@ class Pool3dFunctor<platform::GPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename PoolProcess, class T>
 class Pool3dGradFunctor<platform::GPUPlace, PoolProcess, T> {
  public:
@@ -556,6 +581,11 @@ class Pool3dGradFunctor<platform::GPUPlace, PoolProcess, T> {
   }
 };
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <class T>
 class MaxPool3dGradFunctor<platform::GPUPlace, T> {
  public:
@@ -709,6 +739,11 @@ __global__ void KernelMaxPool2DWithIdxGrad(
   }
 }
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename T>
 class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
  public:
@@ -750,6 +785,11 @@ class MaxPool2dWithIndexFunctor<platform::GPUPlace, T> {
   }
 };
 
+/*
+ * All tensors are in NCHW format.
+ * Ksize, strides, paddings are two elements. These two elements represent
+ * height and width, respectively.
+ */
 template <typename T>
 class MaxPool2dWithIndexGradFunctor<platform::GPUPlace, T> {
  public:
@@ -903,6 +943,11 @@ __global__ void KernelMaxPool3DWithIdxGrad(
   }
 }
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename T>
 class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
  public:
@@ -951,6 +996,11 @@ class MaxPool3dWithIndexFunctor<platform::GPUPlace, T> {
   }
 };
 
+/*
+ * All tensors are in NCDHW format.
+ * Ksize, strides, paddings are three elements. These three elements represent
+ * depth, height and width, respectively.
+ */
 template <typename T>
 class MaxPool3dWithIndexGradFunctor<platform::GPUPlace, T> {
  public:

From f2e7cf21415fbdc0ae2f34b88b6cf307b37966f0 Mon Sep 17 00:00:00 2001
From: chengduoZH <zhaochengduo@163.com>
Date: Tue, 10 Oct 2017 22:08:33 +0800
Subject: [PATCH 68/82] fix InferShapeContextBase to InferShapeContext

---
 paddle/operators/pool_with_index_op.cc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/operators/pool_with_index_op.cc
index ab933a3400..7b6afcfd1f 100644
--- a/paddle/operators/pool_with_index_op.cc
+++ b/paddle/operators/pool_with_index_op.cc
@@ -28,7 +28,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"),
                    "X(Input) of Pooling should not be null.");
     PADDLE_ENFORCE(ctx->HasOutput("Out"),
@@ -73,7 +73,7 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
   using framework::OperatorWithKernel::OperatorWithKernel;
 
  protected:
-  void InferShape(framework::InferShapeContextBase *ctx) const override {
+  void InferShape(framework::InferShapeContext *ctx) const override {
     PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
     PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
                    "Input(X@GRAD) should not be null.");

From a281b38393597e9c6342d365b3e0b7371194b97e Mon Sep 17 00:00:00 2001
From: Markus Kliegl <markus@coniri.com>
Date: Tue, 10 Oct 2017 10:53:02 -0700
Subject: [PATCH 69/82] Conv Shift Operator (#4591)

* conv_shift_op: initial implementation using Eigen

Limitations:
- both gradient outputs must be specified and are always computed
- explicit for loops => could be optimized in various ways
  (e.g., different memory layout)

* conv shift - gradient fixes

fix case when not all output gradients desired

* conv shift: minor cleanup

* conv shift - more minor cleanup

* conv shift: clean up & initial GPU implementation

* fix rebase issue
---
 paddle/operators/conv_shift_op.cc             | 206 ++++++++++++++++++
 paddle/operators/conv_shift_op.cu             | 194 +++++++++++++++++
 paddle/operators/conv_shift_op.h              |  33 +++
 .../v2/framework/tests/test_conv_shift_op.py  |  47 ++++
 4 files changed, 480 insertions(+)
 create mode 100644 paddle/operators/conv_shift_op.cc
 create mode 100644 paddle/operators/conv_shift_op.cu
 create mode 100644 paddle/operators/conv_shift_op.h
 create mode 100644 python/paddle/v2/framework/tests/test_conv_shift_op.py

diff --git a/paddle/operators/conv_shift_op.cc b/paddle/operators/conv_shift_op.cc
new file mode 100644
index 0000000000..e1e321ed5f
--- /dev/null
+++ b/paddle/operators/conv_shift_op.cc
@@ -0,0 +1,206 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License. */
+
+#include "paddle/operators/conv_shift_op.h"
+#include "paddle/framework/eigen.h"
+
+namespace paddle {
+namespace operators {
+
+using framework::Tensor;
+template <typename T, int MajorType = Eigen::RowMajor,
+          typename IndexType = Eigen::DenseIndex>
+using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
+
+class ConvShiftOp : public framework::OperatorWithKernel {
+ public:
+  using framework::OperatorWithKernel::OperatorWithKernel;
+
+ protected:
+  void InferShape(framework::InferShapeContext *ctx) const override {
+    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
+    PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null.");
+    PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should be not null.");
+
+    auto x_dims = ctx->GetInputDim("X");
+    auto y_dims = ctx->GetInputDim("Y");
+    PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2.");
+    PADDLE_ENFORCE_EQ(y_dims.size(), 2, "Input(Y)'s rank should be 2.");
+    PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0],
+                      "The 1st dimension of Input(X) and Input(Y) should "
+                      "be equal.");
+    PADDLE_ENFORCE_EQ(y_dims[1] % 2, 1,
+                      "The 2nd dimension of Input(Y) should be odd.");
+    PADDLE_ENFORCE_LE(y_dims[1], x_dims[1],
+                      "The 2nd dimension of Input(Y) should be less than or "
+                      "equal to the 2nd dimension of Input(X).");
+    ctx->SetOutputDim("Out", x_dims);
+    ctx->ShareLoD("X", /*->*/ "Out");
+  }
+};
+
+class ConvShiftGradOp : public framework::OperatorWithKernel {
+ public:
+  using framework::OperatorWithKernel::OperatorWithKernel;
+
+ protected:
+  void InferShape(framework::InferShapeContext *ctx) const override {
+    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
+    PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null.");
+    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
+                   "Input(Out@GRAD) should be not null.");
+
+    auto x_grad_name = framework::GradVarName("X");
+    if (ctx->HasOutput(x_grad_name)) {
+      auto x_dims = ctx->GetInputDim("X");
+      ctx->SetOutputDim(x_grad_name, x_dims);
+    }
+
+    auto y_grad_name = framework::GradVarName("Y");
+    if (ctx->HasOutput(y_grad_name)) {
+      auto y_dims = ctx->GetInputDim("Y");
+      ctx->SetOutputDim(y_grad_name, y_dims);
+    }
+  }
+};
+
+class ConvShiftOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  ConvShiftOpMaker(framework::OpProto *proto,
+                   framework::OpAttrChecker *op_checker)
+      : framework::OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput("X",
+             "(Tensor, default Tensor<float>), a 2-D tensor with shape B x M, "
+             "where B is the batch size and M is the data dimension.");
+    AddInput("Y",
+             "(Tensor, default Tensor<float>), a 2-D tensor with shape B x N, "
+             "where B is the batch size and N is the data dimension. N must "
+             "be odd.");
+    AddOutput("Out",
+              "(Tensor, default Tensor<float>), a 2-D tensor with shape B x M, "
+              "i.e., the same shape as X.");
+    AddComment(R"DOC(
+ConvShift Operator.
+
+A layer for circular convolution of two vectors,
+as used in the Neural Turing Machine: https://arxiv.org/abs/1410.5401
+
+The equation is:
+
+  \f[
+      Out[i] = \sum_{j=-(N-1)/2}^{(N-1)/2} X_{i+j} * Y_{j}
+  \f]
+
+where X's index is computed modulo M, and b's index is computed modulo N.
+
+Both of the input `X` and `Y` can carry LoD (Level of Details) information.
+However, the output only shares the LoD information with input `X`.
+)DOC");
+  }
+};
+
+template <typename T>
+class ConvShiftKernel<platform::CPUPlace, T> : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext &context) const override {
+    auto *X = context.Input<Tensor>("X");
+    auto *Y = context.Input<Tensor>("Y");
+    auto *Out = context.Output<Tensor>("Out");
+    Out->mutable_data<T>(context.GetPlace());
+
+    auto x = EigenMatrix<T>::From(*X);
+    auto y = EigenMatrix<T>::From(*Y);
+    auto out = EigenMatrix<T>::From(*Out);
+    out.setZero();
+
+    size_t batch_size = X->dims()[0];
+    size_t x_width = X->dims()[1];
+    size_t y_width = Y->dims()[1];
+    size_t y_half_width = (y_width - 1) / 2;
+
+    for (size_t k = 0; k < batch_size; ++k) {
+      for (size_t i = 0; i < x_width; ++i) {
+        for (size_t j = 0; j < y_width; ++j) {
+          int index = (i + j - y_half_width + x_width) % x_width;
+          out(k, i) += x(k, index) * y(k, j);
+        }
+      }
+    }
+  }
+};
+
+template <typename T>
+class ConvShiftGradKernel<platform::CPUPlace, T>
+    : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext &context) const override {
+    auto *X = context.Input<Tensor>("X");
+    auto *Y = context.Input<Tensor>("Y");
+    auto *dOut = context.Input<Tensor>(framework::GradVarName("Out"));
+    auto *dX = context.Output<Tensor>(framework::GradVarName("X"));
+    auto *dY = context.Output<Tensor>(framework::GradVarName("Y"));
+
+    auto x = EigenMatrix<T>::From(*X);
+    auto y = EigenMatrix<T>::From(*Y);
+    auto dout = EigenMatrix<T>::From(*dOut);
+
+    auto x_dims = X->dims();
+    auto y_dims = Y->dims();
+    size_t batch_size = x_dims[0];
+    size_t x_width = x_dims[1];
+    size_t y_width = y_dims[1];
+    size_t y_half_width = (y_width - 1) / 2;
+
+    // The below trades code duplication for efficiency (keeping the if
+    // statement outside of the loop).
+    if (dX) {
+      dX->mutable_data<T>(context.GetPlace());
+      auto dx = EigenMatrix<T>::From(*dX);
+      dx.setZero();
+      for (size_t k = 0; k < batch_size; ++k) {
+        for (size_t i = 0; i < x_width; ++i) {
+          for (size_t j = 0; j < y_width; ++j) {
+            int index = (i + j - y_half_width + x_width) % x_width;
+            dx(k, index) += dout(k, i) * y(k, j);
+          }
+        }
+      }
+    }
+
+    if (dY) {
+      dY->mutable_data<T>(context.GetPlace());
+      auto dy = EigenMatrix<T>::From(*dY);
+      dy.setZero();
+      for (size_t k = 0; k < batch_size; ++k) {
+        for (size_t i = 0; i < x_width; ++i) {
+          for (size_t j = 0; j < y_width; ++j) {
+            int index = (i + j - y_half_width + x_width) % x_width;
+            dy(k, j) += x(k, index) * dout(k, i);
+          }
+        }
+      }
+    }
+  }
+};
+}  // namespace operators
+}  // namespace paddle
+
+namespace ops = paddle::operators;
+REGISTER_OP(conv_shift, ops::ConvShiftOp, ops::ConvShiftOpMaker,
+            conv_shift_grad, ops::ConvShiftGradOp);
+REGISTER_OP_CPU_KERNEL(conv_shift,
+                       ops::ConvShiftKernel<paddle::platform::CPUPlace, float>);
+REGISTER_OP_CPU_KERNEL(
+    conv_shift_grad,
+    ops::ConvShiftGradKernel<paddle::platform::CPUPlace, float>);
diff --git a/paddle/operators/conv_shift_op.cu b/paddle/operators/conv_shift_op.cu
new file mode 100644
index 0000000000..145e966fe9
--- /dev/null
+++ b/paddle/operators/conv_shift_op.cu
@@ -0,0 +1,194 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License. */
+
+#include "paddle/operators/conv_shift_op.h"
+#include "paddle/platform/cuda_helper.h"
+
+namespace paddle {
+namespace operators {
+
+using framework::Tensor;
+
+namespace {
+
+inline int div_up(int x, int y) { return (x + y - 1) / y; }
+
+// Some notes on the design:
+//
+// Each thread is responsible for computing a single output out[k, i].
+// Thread blocks are based on tiles of x with height 1 in the batch dimension.
+//
+// This design is based on the typical use case where the filter
+// y is fairly small. For large y, it would probably be more efficient
+// to also tile across y.
+template <typename T>
+__global__ void conv_shift_forward(const T *x, const T *y, T *out, int x_width,
+                                   int y_width, int y_half_width,
+                                   int batch_size) {
+  extern __shared__ T mem[];
+
+  int tx = threadIdx.x;
+  int i = blockIdx.x * blockDim.x + tx;  // global x index
+  int k = blockIdx.y;                    // batch index
+
+  // Check if we are in a boundary block with fewer x's to process than
+  // blockDim.x.
+  int num_x =
+      (blockIdx.x == gridDim.x - 1) ? (x_width % blockDim.x) : blockDim.x;
+
+  T *sx = mem;
+  T *sx_pad = &mem[num_x];
+  T *sy = &mem[blockDim.x + y_width];
+
+  // Collaboratively load y[k, :] and length-y padding of x into shared memory.
+  int pad_start = blockIdx.x * blockDim.x + num_x + x_width - y_half_width;
+  for (int j = tx; j < y_width; j += blockDim.x) {
+    sy[j] = y[k * y_width + j];
+    sx_pad[j] = x[k * x_width + (pad_start + j) % x_width];
+  }
+
+  // Load a cyclically shifted slice of x into shared memory.
+  if (tx < num_x) {
+    int load_i = (i - y_half_width + x_width) % x_width;
+    sx[tx] = x[k * x_width + load_i];
+  } else {
+    return;
+  }
+  __syncthreads();
+
+  // Compute dot product of sx[tx:tx + y_width] and sy.
+  T sum = 0;
+  for (int j = 0; j < y_width; ++j) {
+    sum += sx[tx + j] * sy[j];
+  }
+
+  // Save to out[k, i].
+  out[k * x_width + i] = sum;
+}
+
+// Compute x gradient - initial naive implementation with atomic add.
+template <typename T>
+__global__ void conv_shift_dx(const T *dout, const T *y, T *dx, int x_width,
+                              int y_width, int y_half_width, int batch_size) {
+  int i = blockIdx.x * blockDim.x + threadIdx.x;  // x index
+  int j = blockIdx.y;                             // y index
+  int k = blockIdx.z;                             // batch index
+
+  if (i < x_width) {
+    int index = (i + j - y_half_width + x_width) % x_width;
+    atomicAdd(&dx[k * x_width + index],
+              dout[k * x_width + i] * y[k * y_width + j]);
+  }
+}
+
+// Compute y gradient - initial naive implementation with atomic add.
+template <typename T>
+__global__ void conv_shift_dy(const T *x, const T *dout, T *dy, int x_width,
+                              int y_width, int y_half_width, int batch_size) {
+  int i = blockIdx.x * blockDim.x + threadIdx.x;  // x index
+  int j = blockIdx.y;                             // y index
+  int k = blockIdx.z;                             // batch index
+
+  if (i < x_width) {
+    int index = (i + j - y_half_width + x_width) % x_width;
+    atomicAdd(&dy[k * y_width + j],
+              x[k * x_width + index] * dout[k * x_width + i]);
+  }
+}
+}  // namespace
+
+template <typename T>
+class ConvShiftKernel<platform::GPUPlace, T> : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext &context) const override {
+    const Tensor *X = context.Input<Tensor>("X");
+    const Tensor *Y = context.Input<Tensor>("Y");
+    Tensor *Out = context.Output<Tensor>("Out");
+    const T *x_data = X->data<T>();
+    const T *y_data = Y->data<T>();
+    T *out_data = Out->mutable_data<T>(context.GetPlace());
+
+    int batch_size = X->dims()[0];
+    int x_width = X->dims()[1];
+    int y_width = Y->dims()[1];
+    int y_half_width = (y_width - 1) / 2;
+
+    const int x_per_block = 256;
+    int num_x_blocks = div_up(x_width, x_per_block);
+    int mem_per_block = (x_per_block + 2 * y_width) * sizeof(T);
+
+    dim3 grid_dim(num_x_blocks, batch_size);
+
+    auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
+                      context.device_context())
+                      .stream();
+
+    conv_shift_forward<T><<<grid_dim, x_per_block, mem_per_block, stream>>>(
+        x_data, y_data, out_data, x_width, y_width, y_half_width, batch_size);
+  }
+};
+
+template <typename T>
+class ConvShiftGradKernel<platform::GPUPlace, T>
+    : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext &context) const override {
+    const Tensor *X = context.Input<Tensor>("X");
+    const Tensor *Y = context.Input<Tensor>("Y");
+    const Tensor *dOut = context.Input<Tensor>(framework::GradVarName("Out"));
+    const T *x_data = X->data<T>();
+    const T *y_data = Y->data<T>();
+    const T *dout_data = dOut->data<T>();
+
+    Tensor *dX = context.Output<Tensor>(framework::GradVarName("X"));
+    Tensor *dY = context.Output<Tensor>(framework::GradVarName("Y"));
+
+    int batch_size = X->dims()[0];
+    int x_width = X->dims()[1];
+    int y_width = Y->dims()[1];
+    int y_half_width = (y_width - 1) / 2;
+
+    auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(
+                      context.device_context())
+                      .stream();
+
+    const int x_per_block = 256;
+    int num_x_blocks = div_up(x_width, x_per_block);
+    dim3 grid_dim(num_x_blocks, y_width, batch_size);
+
+    if (dX) {
+      T *dx_data = dX->mutable_data<T>(context.GetPlace());
+      cudaMemsetAsync(dx_data, 0, dX->numel() * sizeof(T), stream);
+      conv_shift_dx<T><<<grid_dim, x_per_block, 0, stream>>>(
+          dout_data, y_data, dx_data, x_width, y_width, y_half_width,
+          batch_size);
+    }
+    if (dY) {
+      T *dy_data = dY->mutable_data<T>(context.GetPlace());
+      cudaMemsetAsync(dy_data, 0, dY->numel() * sizeof(T), stream);
+      conv_shift_dy<T><<<grid_dim, x_per_block, 0, stream>>>(
+          x_data, dout_data, dy_data, x_width, y_width, y_half_width,
+          batch_size);
+    }
+  }
+};
+}  // namespace operators
+}  // namespace paddle
+
+namespace ops = paddle::operators;
+REGISTER_OP_GPU_KERNEL(conv_shift,
+                       ops::ConvShiftKernel<paddle::platform::GPUPlace, float>);
+REGISTER_OP_GPU_KERNEL(
+    conv_shift_grad,
+    ops::ConvShiftGradKernel<paddle::platform::GPUPlace, float>);
diff --git a/paddle/operators/conv_shift_op.h b/paddle/operators/conv_shift_op.h
new file mode 100644
index 0000000000..5a160b0f16
--- /dev/null
+++ b/paddle/operators/conv_shift_op.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License. */
+
+#pragma once
+#include "paddle/framework/op_registry.h"
+
+namespace paddle {
+namespace operators {
+
+template <typename Place, typename T>
+class ConvShiftKernel : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext &context) const override;
+};
+
+template <typename Place, typename T>
+class ConvShiftGradKernel : public framework::OpKernel<T> {
+ public:
+  void Compute(const framework::ExecutionContext &context) const override;
+};
+}  // namespace operators
+}  // namespace paddle
diff --git a/python/paddle/v2/framework/tests/test_conv_shift_op.py b/python/paddle/v2/framework/tests/test_conv_shift_op.py
new file mode 100644
index 0000000000..b9ab21a06a
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_conv_shift_op.py
@@ -0,0 +1,47 @@
+import unittest
+import numpy as np
+from op_test import OpTest
+
+
+def conv_shift_forward(x, y):
+    out = np.zeros_like(x)
+    M = x.shape[1]
+    N = y.shape[1]
+    y_half_width = (N - 1) / 2
+    for i in xrange(M):
+        for j in xrange(N):
+            out[:, i] += x[:, (i + j + M - y_half_width) % M] * y[:, j]
+    return out
+
+
+class TestConvShiftOp(OpTest):
+    def setUp(self):
+        self.op_type = "conv_shift"
+
+        batch_size = 4
+        x_dim = 17
+        y_dim = 3  # must be odd and <= x_dim
+        x = np.random.random((batch_size, x_dim)).astype("float32")
+        y = np.random.random((batch_size, y_dim)).astype("float32")
+        self.inputs = {'X': x, 'Y': y}
+
+        out = conv_shift_forward(x, y)
+        self.outputs = {'Out': out}
+
+    def test_check_output(self):
+        self.check_output()
+
+    def test_check_grad_normal(self):
+        self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
+
+    def test_check_grad_ignore_x(self):
+        self.check_grad(
+            ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X"))
+
+    def test_check_grad_ignore_y(self):
+        self.check_grad(
+            ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))
+
+
+if __name__ == '__main__':
+    unittest.main()

From 805639b16c5eb8af6f689a3f5a311d389a88df07 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Wed, 11 Oct 2017 01:59:48 +0800
Subject: [PATCH 70/82] Fix compile error in linux

---
 paddle/framework/CMakeLists.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt
index 3e0e0f5903..1bf80b3e58 100644
--- a/paddle/framework/CMakeLists.txt
+++ b/paddle/framework/CMakeLists.txt
@@ -19,7 +19,7 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope)
 proto_library(framework_proto SRCS framework.proto)
 
 cc_library(attribute SRCS attribute.cc DEPS framework_proto)
-cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute)
+cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS attribute ddim)
 cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute)
 cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker)
 cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc)

From fb2ad4c9490925d49a2f0d9a641472137e308876 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Tue, 10 Oct 2017 13:10:58 -0700
Subject: [PATCH 71/82] Change PythonAPI `.proto` to `.desc`

---
 doc/design/python_api.md            | 12 ++++++------
 python/paddle/v2/framework/graph.py | 30 ++++++++++++++---------------
 2 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/doc/design/python_api.md b/doc/design/python_api.md
index 6213da65c8..c4665e44fc 100644
--- a/doc/design/python_api.md
+++ b/doc/design/python_api.md
@@ -22,7 +22,7 @@ Whenever we create a block, we need to set its parent block to the current block
 ```python
 class Program(objects):
     def __init__(self):
-        self.proto = core.NewProgram() # a C++ ProgramDesc pointer.
+        self.desc = core.NewProgram() # a C++ ProgramDesc pointer.
         self.blocks = vector<Block>()
         self.blocks.append(Block(self, -1)) # the global block
         self.current_block = 0          # initialized to the global block
@@ -57,7 +57,7 @@ A [Block](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.m
 ```python
 class Block(objects):
     def __init__(self, program, parent_idx):
-        self.proto = core.NewBlock(program.proto)
+        self.desc = core.NewBlock(program.desc)
         self.program = program
         self.vars = map<string, Variable>()
         self.ops = vector<Operator>()
@@ -98,11 +98,11 @@ class Operator(object):
                  outputs,# dict<stirng, Variable>
                  attrs   # dict<string, Any>
                  ):
-        self.proto = core.NewOpDesc(block.proto, type, inputs, outputs, attrs)
-        core.infer_shape(self.proto, inputs, outputs)
+        self.desc = core.NewOpDesc(block.desc, type, inputs, outputs, attrs)
+        core.infer_shape(self.desc, inputs, outputs)
 
     def type(self):
-        return self.proto.type()
+        return self.desc.type()
 ```
 
 `Operator` creates the `OpDesc` message in C++ space, so that it can call the `InferShape` function, which is in C++.
@@ -124,7 +124,7 @@ class Variable(object):
             name = unique_name_generator()
         self.name = name
         self.block = block
-        self.proto = core.NewVarDesc(block.proto, name, shape, lod_level)
+        self.desc = core.NewVarDesc(block.desc, name, shape, lod_level)
         self.writer = None
 ```
 
diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
index 6f2a76a983..7fb72c3638 100644
--- a/python/paddle/v2/framework/graph.py
+++ b/python/paddle/v2/framework/graph.py
@@ -11,18 +11,18 @@ class Variable(object):
 
         if name is None:
             name = Variable._unique_var_name_()
-        self.proto = self.block.proto.new_var(name)
+        self.desc = self.block.desc.new_var(name)
 
         if shape is not None:
-            self.proto.set_shape(shape)
+            self.desc.set_shape(shape)
 
         if dtype is not None:
             # TODO(yuyang18): Convert dtype from numpy.dtype
-            self.proto.set_data_type(dtype)
+            self.desc.set_data_type(dtype)
 
         if lod_level is not None:
             # TODO(yuyang18): set_lod_level is not defined.
-            self.proto.set_lod_level(lod_level)
+            self.desc.set_lod_level(lod_level)
 
         self.block.vars[name] = self
         self.op = None
@@ -38,13 +38,13 @@ class Variable(object):
 class Operator(object):
     def __init__(self,
                  block,
-                 proto,
+                 desc,
                  type=None,
                  inputs=None,
                  outputs=None,
                  attrs=None):
         self.block = block
-        self.proto = proto
+        self.desc = desc
         if type is not None:
             # TODO.
             pass
@@ -63,31 +63,31 @@ class Operator(object):
 
 class Block(object):
     def __init__(self, program, idx):
-        self.proto = program.proto.block(idx)
+        self.desc = program.desc.block(idx)
         self.vars = dict()  # var_name --> var
         self.ops = collections.deque()  # operator list
         self.program = program
 
     @property
     def parent_idx(self):
-        return self.proto.parent
+        return self.desc.parent
 
     @property
     def idx(self):
-        return self.proto.id
+        return self.desc.id
 
     def create_var(self, *args, **kwargs):
         return Variable(self, *args, **kwargs)
 
     def append_op(self, *args, **kwargs):
-        op_proto = self.proto.append_op()
-        op = Operator(self, op_proto, *args, **kwargs)
+        op_desc = self.desc.append_op()
+        op = Operator(self, op_desc, *args, **kwargs)
         self.ops.append(op)
         return op
 
     def prepend_op(self, *args, **kwargs):
-        op_proto = self.proto.prepend_op()
-        op = Operator(self, op_proto, *args, **kwargs)
+        op_desc = self.desc.prepend_op()
+        op = Operator(self, op_desc, *args, **kwargs)
         self.ops.appendleft(op)
         return op
 
@@ -104,7 +104,7 @@ class Program(object):
     def __init__(self):
         assert not hasattr(self.__class__,
                            '_instance'), 'Do not call constructor directly!'
-        self.proto = core.ProgramDesc.instance()
+        self.desc = core.ProgramDesc.instance()
         self.blocks = [Block(self, 0)]
         self.current_block_idx = 0
 
@@ -116,7 +116,7 @@ class Program(object):
 
     def create_block(self):
         new_block_idx = len(self.blocks)
-        self.proto.append_block(self.current_block().proto)
+        self.desc.append_block(self.current_block().desc)
         self.current_block_idx = new_block_idx
         self.blocks.append(Block(self, self.current_block_idx))
         return self.current_block()

From ef4132051c0fc88394d75dc7c482a024fc70663f Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 10 Oct 2017 13:43:49 -0700
Subject: [PATCH 72/82] gan design with graph

---
 doc/design/gan_api.md   |   7 ++++++-
 doc/design/test.dot.png | Bin 0 -> 59401 bytes
 2 files changed, 6 insertions(+), 1 deletion(-)
 create mode 100644 doc/design/test.dot.png

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 1a7d0df116..a1626e50d7 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -27,10 +27,15 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to
 | Concat op (done)          | ?                 | N (Cond) |
 | Repmat op (done)          | ?                 | N (Cond) |
 
+<p align="center">
+<img src="./test.dot.png" width = "90%" align="center"/><br/>
+The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes.
+</p>
+
 
 <p align="center">
 <img src="./dcgan.png" width = "90%" align="center"/><br/>
-Borrow this photo from the original DC-GAN paper.
+Photo borrowed from the original DC-GAN paper.
 </p>
 
 ## The Conditional-GAN might be a class. 
diff --git a/doc/design/test.dot.png b/doc/design/test.dot.png
new file mode 100644
index 0000000000000000000000000000000000000000..768e5cac4b14201f378896bb3bcde9e6ee540414
GIT binary patch
literal 59401
zcmc$`byQVf*EW1mLRuuHJ%oaiD$<}HTDp;v5|9#*E(0m)5-I6Y5Cj1!0g;vxknV1f
zu5a%9e%|r^^Zxma@r`?o+n>jC_Fj9fx#qm)bzN%*sVK=16VMW%P$*(KS&SMAh2@Gu
zT}-@$3!mf;)pNrixF!lR7}PoPzmGLJ(I^x%N)B^h!!2ob%w1n&?hJ3Ui%5hZkQrV0
zah60cSfP@~N}Ee6L#<S;QY+hHu+%3m+hW+7d#tp;VtCq->+V<Vb^~>%Mz*_7^*H9y
zQaEoDHa1APqc7dK_;kL5UfkZO`rU1(lm!vvRQGwI1yPyn=nH6cFGbBT&SkR5ZJKoq
zhW4k!b#(7X0;Y9|(|_b8B_)5jUkA#nQVNWclap`#Bv=0Ex-ou<l(c!^^W%Q*pg>tZ
zO8(Khqve}-Y=<efwzhPtT(~~mvkxmYs51Gt*hAk$9?7ne>rm&iMvG=)A!A?&eR&aY
zx+RijaiUJ5_WaEAlYZruo7#n#o%zmU^LDB-1vWHiJOTN7fp(GfSBp-E&B^;eo0^_X
zHTdW1SC$-Z-G7Zc^|#p6ZL=P)CqstxWGD3;V_}gUOi4}_n>^oGV_eEeN$HPu5tmt9
zoY4SAvsc+)pxx|yer$x?>e*nSl#ULK%ob)g;ts?5KHcZ|HyLhdN9PYjt_spCY*RN<
z0_hXx8)`mf>_$0IC}YB8Z%-Bew5(x89wnm(Q*l#Fj|U(5{SA8I(a%Ev^OsBX0;9Hf
z|N9rSE_&|KewzPYxkQ_Ua@{(7K%qfZlx&DD;F7B+?f?ACbr+`fJ>mcP4e6@DD8rLd
z!z*FaL9(_kSJ%S?{`)VPR8<77GK~NIjoRz6H&t5yGfo@1U5CNx{~6lnT-tT(&;f66
zZ!&oP=QF>aj8;6)HLMZ-=OMPYHu7}077MQ2f{Vck3JT&k{(<UQa=t=N&XSOjAh0rG
z%-a&fq24iJKU^w@YI#;vWAT-Cz0|s&b8CM;-eY%JPG3KX^X}!yy+sVh=Fp<VaZbMa
zU^*mMEsKDP$MD_nWRZ~)&%jG1U%yUGO}#8Fy>sW;0Fh>aRuioD<ovuvpB@1#JG;rB
zTveSqA2CC(Eh#TpKnGV>?JO#EFK%Lh=h*ib&kwiFz0Obf7yEL;xvIF)!osu=89!^i
zkGs>Q2sNL{J$i&`4rAC_NcA-tEHtnjEKJVdy&P%!^)TrezO;ET6P{sI=ac{YZjnJ1
z)lIEWKSu29K6`vAGOVHPOuQFRZ!D>=PyaeOuub{%A@kjZ5AZei{(Q}7m$l)bdShOD
z3FpNK#TAvEzZc1@60M)M=f3zA7&z%qZTJIzUTibOFDN2%5v8WCzO`09p;vQ$K~JJ6
zHSkZ?a$baW(*Ehh`5845Uk(nuRJU~ekTp2ioC*vKl$4b8(>%uI9B@CyWHKf)sc^NH
zJsaGd4&j-u+Nf=W-LO@U$MmDUU16<+qReC0`ag54U&#-1+o5tDU7&hO(0@U%)+0LO
z1-j&J75@GOy(*7rrxi>lZWW=cBwdDE*g%l1B18=i{R@h&9$~V)&ca3phfk`aGAkG}
zo>lQt!oSBiQjnitCm~<=^A4L9(z}lKoK=}J{9!1)MgN&Rvc|E+0q+k(lT)b@X7Bft
z#u~rRGXMDp7KZ}|Je%9W3zo@&^g-+mHGXCP{Y6HPM#aUp|6WQatI~l-W&A%koNc*7
zw=OyK-!D;F3Xc-Txcp~SR#N_#Tqno>JKSfh^Gc7t#k_wXsG1@D<n(AaFgUnjzB5rZ
zOP=zUVKq&%h^tnWC_3G-AY#gJvY@E8;XR)zvdE_Eqm@aouAw_~yQfG{!pS5^lte3w
z=eclTFu{4ePLRha@f(u^Wncf_l*as4ge(%5Z!{?IPT-UhCi6a#D;iB~QbA5;b@~6t
zul5E7<{G^D+>v;XG=z$avvlp>QeUoW25xnAb;rnZe?b5-oxtDkqG)uJV1a=jm$`+-
zG-N-iZ$=dkY2Ewm(XCo+WMsNh{vYDpm-BKy_ARX+(o<7Yiyp6j{l*a(*dovQr<4c<
zJDC8Flo2Dx^Ejt(s;KVlL5;_*&0`v}$XA?yS{P7oNf-#OU(YdN>RZ{h?8_#GJwk8j
zPq?3x;*tMT=tYfTc-q5&;R-YwJ*QdWKziwtWR6mDFhfnqB}nR;xxs<5x|H0bSj@u0
z-&>NZqNB;byKaQ}`JrO-(P%X#f1>@C7Fj_NYH>mh46j+s<ycPmSj(Tt)!yFzA3UVW
z4-pX&Wb1NjNntXR2(GT9kgw5%N~3)7s>;jD+ce3@I5HaEdQQ}OPc1Fsqj+gUX++(=
zv{Az6ISp^Cs;WeAiD;m%YUEw<*kAwbD22hasoK~k*qF`8oLpjgCx6%KMOkVd@hv<(
zi6PpRv4H}EFe$x^yUS@Q!{(PGlhtevUzX{F$O`@yns!Gq;_wnmK8t-;+^z0LFfmD2
z*3OI;$@<$OM(U12y}Zb!IYVR|GAyODE+@LAk{S`%vd6ia1o0L3zaL+oLTs$8xds==
zRLO|;9R*$2=~LY2uOb8Dd{8{Hs(B}#+u%W}xVQ10Z5LOJnXd*zWymGiMFH0yzC}`(
zUt%JivvBeuA<Ax|HgxFgQ&Y%Emi_sZ$PyhKxCLH<N*?h3J<XqdO+qUxD;fbCnv;{0
z?oYb6=>;8rj4I37!aCTDeW#rH75Wgu#sRXl^YS137t4R{>*>)drHUgZ-DLV_P+(Y?
zbN~s1!@osU)DLPSub1T4m6-YX$TFlu`S)W}Qo``b*^PTM<!B^)L_PLagSKXxS-7~?
zYENMir723(v<QBd%+LQ$zUfh87!@5Yg)y7&NVp*RE1b#R=XW3(%j|4x^!$eBzr|}<
zSXhoTzW`RrM##r<;-eDyOy4F+_!`FX7;&Rm(dg+~Tm3p81IX)ajEsyIBrSd?aTFI9
z^LrgQw03mF;1biy%F04wo}Hf$4i66p?DA)n1ZDO2OWNV_yLUwR#Kf^2>e=th0Y4}{
zc=(V_P*4YI<;IYa74gvo6FNN#immtY9%Zu9bs-@kH2Mk?6Avm=F;M`GE^*y3#8O!u
zd`nD+!OXtIV~h})$ausajg5^RSWr;VWSg0pNs+(NNGcHn`%n*qc_*Fm=FOYHuV23&
zZdEm&hTa>$jB=c7i+!+m>+!cosFpYQ<m+%_h^$2)jbx{do(wzzo4<acbobJI&(ENQ
z;-Q?^hAB~Be9wLESoLBzhcodMhrU`$6sg<a-EG=k8RW9du<p<I{}#?9!KkaNtMkPe
z3u;(obo6V;GFL{1%AQf9p!gJY?}aglTtK;OjC*&T1qQ~XFDD%@_GD}wE~Fwsb1+zU
z=C5t!O=(|uDv+(1h>MbyldG?ET5tgDhCYtlleBOj{#$O(gy6r3h|AT7^NE1jw#8D5
z1{cl+yHX{LEIoIZ`>{~s;^JBP34yuQ8pN&{J%OgpVNW*3YYrw~lYX%q6RzGJc<kWv
z`&_aoSCwU_zFQ<&@cD(Eg)SaVra3Fq#qKm-8luIqs(Yv(X8c$!5|{NSe@A-NU-E|d
z^dIIBq^|YcU;m%WC&wiSIk6+t1v_E}=KEE7`Ms0VQ?e^pE<AnuRP^j%<`N0XFUYwF
z_CTibx^fBvIKEhP(xN0?Tm<}1esH~CZ|Ug3M$r)Ah77l~wQ=4NuJhRC<mczVOh*^8
zGFSvRrbBJZ{JVeuey&lSc%flUa0oR|x>B;R+gjNmaw#OeKMWv|3Z=b^LJ?Bkl04p9
z<Ma5p=(#6@$)w3*-)MJ~d;zm~<?>~fS;?Hby1GxgU!^e^z*X0gI5RRb$}_C_{*GQq
zt<b2B8;yqPxVLfJ`^dTS`OI6dlda}?Ti9$dkIU_1|4M6V#Z{l~jm$5E!}m!TMJ)^M
z&rS}f*Vec`KQ50G^W1Am5^~Cfjni9XBq67y6ekzWE@y1~u~IAY<hBL-qxZ~W9{(a>
zceX?9nGKg(MI|SDPn^kz;Aub%I-^}X_+lD4KRuvGXTD+JgnpH!5Rc~Iz=r`~M@T3-
z&9zaU9IX@~;bJa+L*8F3=sMQyc}GxSaK><H$Li46E%;X!A)%q_&em3Z07Ac5!bM+w
zxce+$la?&9QGLyC8J-&pmFm5xo8~9sw#m${_~8xZO|9Jj{yW!bJlEs50RBz4rkip#
z3#f+6?Zc6_V1L55IhgWhcd3<nnLG>H(JjBTQhn(8@8rPs0EM$F{|kkl!{=*Qzs_gp
zXy<rr){g?#1qQfeLwrpQHMrJ4uKu2%vt0YdgZgkGx6tKUn{|MTb-X0T<x#O!0?8uk
zBGF}$<C&pBas?lKwQTRYUxO$=GMcobm5SponFHB8BL*_7i0p+?d&ttTjA?ph*5<?K
zPfv(Y2Um>HI2`hE(ik(%4<!4UnXITAP8IeI>?l8c_7l8w)lU!*&rB=gCuzc_gbOdC
zwj%<RH}SV4axA?un1Kcdp8BqXtG`|8?UgX-0n-|jGC&N`^NnMjoy3NQhEy9>_=(1^
zT2E>iJs5woq65+n9eCPv#ps6q1||D9j9p@Jc|sjF85qTOg-q8!yHSjd4d?9ajB2Bt
zVI=Z(5_-g7@|FAh9E=qPqm{Mf(ACo72Nk>NxCu>F=)hGnhPoJMDx@63imu98a;S6P
zVdvD$H{0@|`+`;3!nMcua<fzxUE)c!U&V?h%X$9&XEQPHe?Nj}{q*TpNaHC?0)z9u
zmlFC)G094sIs}QPmGEdgzvINa&L+FThossS4a9aRzbqZS^`pSRPf}ZKzH4&24PsdM
z-S_c~4$=woe~_mf5c*E3iUxnrC4^5n^V7M8_42II>D#6i6Xqh{sE^JB!!oXZc!F#3
zk>dlWE+biFzk}Yq_$#t^Yk$Kozqz47Nfx<l<uRL*oJ{QMy6;#vg|F-^iRTYcri55A
zd^A*=B`GNh>6TmO|ARU+mmKaTJnCM#dwK;zKWk}rrX@L$lT$H3L}neMp=u?32w(fp
zpE2*_TBW$rmtT(k{j8!1O9m6AZ8Dn@AiCP@lZn<$%ZG<j?wcy?TVh7fS!YaYc`8l5
z)I~eee;{!Mus1tT<=Yq9%inOx50Gt`RNvJ4@-o5edX+)}f(C=4H6}_jt9RMt?fv~_
zqb#mU?Uy52Oe!Yua?Ar;$oNJ#UL!w}?Nd18BwIh*Z025laQlvWE<D#tpL0c3MFoZM
z#}7~|Mo0DC+}&fp6Khwbiw!@{08CoYBj>J+T~4lDs9z5oFCr=F9khN>;EYGU*+L5U
zJ(pKjmQhk7fgbdCidYCVl6jAL$s#GO*hlLQ=jlXuI&M2WfBrI%nTMz7?}EFV8?%@g
z12;FfKp|i%mpe|9FMjx8ZSL;+*Bjs9`@8EQ_Wi}mFXQXz&b~(#Rh5-bpr70)vIBT(
zSmQDD1%~{>(n1fnCDpY^J%{9-v}sV6koVCYsNl9buzYJ)7PBNM$kM<)_y%R{fBeXd
zD?+Au=>vf)ibll6rg){bqN3s>>_DpWeGc)a9XG?E-oQW`X{V{P7M5^kQPJua;dOU+
zcPNShO=}EIs`@{0vf5bCCHS5GBB<)^wwqGlmYjv1mwKC~zePp7vChrL=wdJe8LbY!
zfX-F2<YOO<=zX&Z%GWH=8>ND_gFce|Kl_lU=3ms|kNkpyn@9ha)U&@qXv_xQ0UW&9
zK*;mnzBoQH@p1N{%YW8a?(39chBProTKWP?R7|WVTahkggtsB^P9|W4mMKdQ%@(Bf
z1PHM>T6w!rzw%W~48>@*JNeMi5XukW%?^$-RN(HFT?a=;q#yzcVD86eHzIEw!^+CZ
znJJ&}c^J+p2F=7D?tqs0JJ0>CA3V4a#v~EuwD3Ej?suw0IIuHlKEACK?N8&;w?Epa
z$@KrpzvMtF3Hb0bJ-y9Y)sx1zmVa_1-RI=gCZKudvmY0&K0iB_RaBIMK8M%)=pR3m
z(=|s866$q)eEh$)-XcEdr>^uuj@SXDOw!iYoTwjzgXJm*1D|xsA=_;3?A-Q+nXm8f
zr$kN8&1qW78ArEblhp$ZibzNZnw>RcWn;SrscEpti09w@b0|WAmqtfN5ghv_ARq(o
z>NXoGbD4??_xknm%GNlbDz=86rcRByPBg8Jl=In*3PLMFxKVd5jsRi-+hJo^ipBAo
zs^P3FLvO|cJWWi{0(PT;P(w|Cg{W|tx}dMG4;Oq(#ia+aLI32*lS_nzuZxO`I+xp8
zTkBh+uRjMUj69P}7=3;HYrM#~I9zCXL{Byw0mfWp^jzhGv5kBx4E|-zJDF%-TmL6d
z9T_?iod6MnvMyY_*x3~n9^MQHr5Jbt<d^Toy*&~N-I*P4m;@*#30}MX^mh!5Hqfw$
znv|4u2?mT5|Lu03x)zxDe(qu&=?v8{Mlnvn4Dq?SH}HvwC{SwUb_VW;bMb5f0$SVK
z+kd##$pMFc(kqjvy<^D?I}o9U;(3f}7sRVvH{$HZzT=~iORtWUcS-kO8A=&@<Ef|j
z;g)`d3Zu`#6D(#}qbm#yTm@R6qaeb{+HZaZRM!fe(rj7);}yWkcdh?K@R|N<fkyHV
zcjvW)N9?+FJ~eOr{eujP;-PxCruv>)snUtKtVRF|{qXQ?vi`MJew?gWe!%fmV}N-}
z#I@=9dG=UN&8Wpi%b^nU5NJ!v6x2tT`f|*-XIs^Blo+s>*M`fYf$np<V<ZoFe;W|x
zhli&ysdIKklZvk0AMbBBGx^@o;D|iZAl$F05L_DinuCe}aM(WG6vE)~#tuKD=f?Br
z&wJ~9eS21R)#`k_b-x(DG<*8Az~F;HaA;^O1V^IGu)NPQ#{uhQMn*fIK0&AXcP4A(
zc}??EK8xMgmLNHxB4EZ^0VDr3;~)A?RQ3GmpVig7)-i5wZn8jhnKuPfQXVZl&w>6z
z4w$UYv|0e~1-qKO0Zg>C)+vp3&P!4KM=T`*iT<8yYEcjlkN$boU@(}LZ?+>g{bdg;
z;0vPXCp*)#v#g|I{~}KIM(nNQGe1oD94kWDvY=+C0+`|;REIxpK;qW@W|M$48PI_I
z{Q0v5X2^CxB2vxf$~y2>vCx4lz{HtD#-L9af9T~^?YuIe2-VCCnl(!6{CQv;_$|Ac
zfidj;X8RafH|w(5+1Zu`e?qn!H)Nrpvm)s8&mWEc<!dD|nvJJ0OZ?7@Dp<^266e)G
zL^ggHvwGWV7UfS!9ULDY4-J>b4&ISciYP}<PY?1C5UTOO7Y=ozCSf)BpZ=yt0524r
z)}KM-ha2zdU-dVLUAuN4U=bU9FP6#Ypk8mxa`<p(Aq!?v4LGBrQmg3Uxf|;zhdW60
z{{H>Dx8k{J6~S&5uqJH)s1e9Jy|8c{XyuZT6|O+cCyI5TtkocyT8&olb&1}{SzWcc
zDq!<oP)MltX{vX&i%>8$ib&iBCMG8SDbQA?;?`f}&3T=jnTZglKSxF$LoR2BD@Ki!
z+ZPy6sgeZ;9OtR$u=4V%2)nG_SXx?=zi0FWT0=$X!f)x8VAj;yZgFtPu`4CrfZkNC
z%EeX}xEyFM%Gx=}xoW6-*`*p!d$SbO3beSappN<rJspc1_gLY2?&uhG_t`+ML9kS*
z=<thK<!hO?!Tz3xhAGgF%hBGoW`*`${;q<n`%+dsjBI3DVZC1^az1ZE+~*6Bmx_kw
zieV3n9a4o~<X2QxwZpE8wT=poOitz^6+ie09Fe@ElT$x;jt=D^QM5?ciPgZT1Op~k
zc3XAUQ69tU8+LYf+gr^{H+%GpAz2{La<sp}^I5Mf3fO-6I-gUs*_Oy{2)s4`^d`>}
z$;imIi+oSp3HtR0VM=~>c6P3le4$FBL&{e{65YCN9gnB*(^O7&sRl82cIoAT&o`l+
zmcOI@vnhl+3yKZ0fY6@CyR&M1;g;?Ea5oF;3RO8BB_&(*b!DyZ`iV4R9;MhgIIR%s
zf4G}0v|k8MZ>+2r*!rICYL_Xb6Qqw$r9?%cA){@lo}b7Bc?1J>)SLDY57OYRt<^+`
zTUi~Q_nJrL+JIunTwPstzdPqy-*pg+>-{<>EQCQ5yg80H^5KAo*Afw#2adNKRk4xS
z>^kB7H1=#w7pRv^=)(mz;%{=q8Q9o-f?cX(?yrz85-T#Ak)A%=Bg3Sb9%n-7m09CB
z7i?lXKR4If7JJhiDgZJLGP2gTwy3%G_;z(=F*YQBja9iSK#k0V{;|ZgndERLTw)fc
zomxUd0vL)Xk&*GRk)Z8vgA8d72xxkFnG@AvH1Nf;2c4&p7o|9us%lE?{Q0DIJ@eo2
zKaA_m{krKdUw8|CzaGffRPgfh3Kp9&-d`WvK0Dq>tP?*wFohv-YF1Hm>t9CAS9(AS
zeEK_y425)z5+|Fl=_Pzdy%MYU$32zpMk}rXkw}5+g#~@E)|o7#4T*{M`t^{&mX3tm
zIDYS#Bx=1R{5)^uHc@3*lx-Mh`cDjw#Z(kfsLHi5tRrkY4k6VIRfS?0M<xJ5HgP2P
zVIeig5uRSH{26}&fxnd8$g5Yc2tsFpIXO5!4hauePTM=$Te~To{M$=KNL<_yC7UQ<
zD{l~;1$!OGPcvS5yw0}{*$60E;QKYIHgc(ajM!S++oO4E4<5XF{ko~YUsF1qk&A<a
zL!jt&sg%(;oy}7Vi*(>x=4`Vu03?1vFB*}OVz@fP_4pe$3bs2b5KUQj+Bto()`{`)
z+K~$fhliATIWpmlm&t5KE67lj>+2+_#o@ANdCy8>07{{lSy&pay*u9XU4*Us5O6}V
z^O9Pr-Po|T_gDxrBG|<ZU!F96fsfrhJWQZ5{|kAmbA{RVIk){qM5+SDCN?)q$Kbnj
z{dkRMvHOk%5$&DamKh}_B^0Wrc6391j^E>-CDamQU}gRSd-FR<=rw>gI{WIaxBW+t
z9zDWFVdLWJeGZ4qnJ)Eay%(^hT^=k7Cu5PG1b#^pNSMz9Ib6A0$DDFrUZUB`sbTQV
z3}6}nTzu!W=g;JuV>l{*HmLL~jN$QF*JU;!Kw^YnEXxAElTT7^Hel*hD5yAHA0L26
zLF4@m9<ku1F<bpxEFi}F&HLB|%Q(7^l#pA6fQTptV!a)5xNU96n~d|?b(fQa=R5vN
zkQ=JIgD5oDr|e*&K>3T?7En)z@6H@=)CKfdVpaw|7eDP{47`Mr#C(Bmfbs)YL~6~#
z!s5aW(X8jkd$vwhI7GM)(-tlwJPqoH<(&`Zok>FEbOJU%06}c_DN8(oAQyxN01(ct
z{PrtQ*K?sSfWZ1RAcLi?tqrws5(?B$;1!Eu^ZY3?Vp7jhDqAj9!QwipB>+Z|@%&^#
zf($So<OyS_kAJINc_0j*?bha9;Mk~Y<f%JBqh^8An6q6@;lF)*4mNAt+39inLsBtT
z1`(GS$c$}2US6C7C92G_vL71SggaJIho$AD5+^FaopKdbZwI3;!(f5{T%`IO$wA%4
z_NzYGZjVYzx&=A#?#4aOJsUZB`730RP$d;WOiJrpLIuKf^lLr087j%D{@1IZWp(TW
zjFcJR<>{Fj7Jy^?&jxN47;qKDP7Fde?F<P41ST8D_4pShYpXSE#mdUcD-;y0Kwhb*
zGX|LPCzt`9g8XRbTXnT)rQ23I!aqWF%XUk}QRj#9rd@5T09zed&8EIS^$Qm+Ah&bz
zozQ-KUW@QjFi8}ULGvD$a}WAlPgI$;4t3v|4+3_cm6i2IkACH-*p-{>D=^)`dxz^)
z8w98wp*~k(&%IBjmOT%8&M+8Z*Y#TfkkqTkO1|IzvHT~`bP&s;U}7-Y-mA>IKT61H
z{_iOrlT`qE3i@!a7b^p?pe(R`a1uN}XqLeCgMHU{Qf!2o1uE!2pt9JsnPSZz;8uFy
zk{Lup@VA0DA~}qZ4%W8kDF@EHhaB62b6JB8p4Qlo2p|;}5DI61W}__7Vn8t|I5>Q%
zsH~K$a#=H*{PD5`qNGW?O%z7#=IJ?ddgm|XVGuT@VbL<J`m$Zh3M<QejDTcCCUR?c
zHx}lv4S<4Mft*@sdWa>$iB3;VT?3e_Qlfj53Xln(lr$ceMYyB+80PQ2@;)J9-$nU~
z|KkN9j0U)qXAm{LI(0`iy^4btX+W=By~;5@K8|qBY>9=Ny?uRbX!HZ?!9uQu-^uD#
z^dSsl9)IY!k!k{|EZ0D-a&CUU4d7aqMdyd<r6ms2U+*G+{=}5Eqpy(pHyW_|QuO60
zGv%pe1+$;weZ$i(GK__uLKgVSS!g}1<JGx6_f|QacL<&phM02Uto(&GYr@MZsy&_u
z0X^5WbVvSy%Dg=rtsD$Ywk9=Y!6jfGZ$L(k1GfFInY+Q`p<>ej_xmO5V^!_YWfi)}
z?Q33MCKcWEgQi>7*}1g8XoH#aW&xMJEyN;#SXeSvVc`!>5mfvMk4lW|FKqq$7X!b3
z<`Vnk?_V9O{`}ZQXJLT<Kyx}`)}n$b>F934<KE%vTv<&0D0}?LNBb4v#QWBNa<4(x
zzYUzbPL+t_!AenGCg9%b)zw>oBEF66G{sUNKh-O}Tw};{5s#!aQADKa*Dra<L7n#E
z(2#0Xv8JmPpK(3KuyS!JnwXe$FxAh@%-HsPZsZV*{SCtH2Laps;o&7~KHAXjUIiW`
z=G})8##t>Z4unj3SeMRo3sQQ)kMax{bMt5io-bB?><GdFO}U+rr~1chmGfwbX-ENi
z8byW%`5JlXuTQ&N-m5djnm}>|edU6rbQnFygx8K@+Qm5eaf>a(z4ft`O+2q4j1B!d
z`Y=b~Y(e5Y5}4Fr0QyMN0BxW33jgW@dHG=28V{kJM5-W?)K}TI^jv}SwwkD|(EyI1
z+kF!!hZYwHCqemc>8#!3xWMpm9_M@u(ET-!u9Z-NLjMxLv2L|nF^CKpz_QL8;|8^0
z{OB~wMivZ89ly!XO9%itI67*}l#AwE&3O+g5D6mIJ9kQdE`5g!6d15NPyfUZ3Jarx
zxrTf;I4h;4!Iww1jsvA0YN{$y<pH}RS&T*cT?8~~t(~2kRiaG*L-e6kai^uFU65>O
zY%~WMI4U6_&&4VqLC{DPfjp={IUsL1?t6CPzO#Kv@xz_sf;qCln2#P66C8Yes^UkB
zEU@9IQOM<0RaLk9wT*PwW-7`?zrRqJo}M;?UIl6Tu$ZCp^Si9-2!SPFcrI@l3GUUe
zUBS`O(T@`Nr2zj*9aG=xm|I)3-D=RM<}r-<pC)09+eSJQyNY06Bw5T;3j{0{$Y<oh
z(qS<J#E1i-O}Eyo@&q&$m%F|4^72v88ni;?z7{UJ9SErgA7#~>8T9DG9XU|1%%2BZ
z^(5SWnh9wX>0BhvPn;@;kzWOoUk?rlX!ef(^CFWiC@AJ5dF2eFxeF%?%S92_^`B4x
zzd#)Z1rG&zesg>K0jLW|9RnN)jo?S%%W}1g7=a+6C~7q0pMt`RNQ=O`2S5^<1V@6P
z$G;m87Z*^Lf4^K4c3$*@OQu6lpZmDHaQcN{K9lbW*~7O)_W==X0w|`gudfd-^*lX#
z;q|Xa<^b}5N{z>h>aAaNZfA!JQ>&}*fP_M15GL<`WTK*?ygnyx?-)d=*4NjA|0$Xk
z(W6H<T%f4ai+NB0Tf!Hx0;$^pG(MD{QqDsamCK-D;Q#^#Fxc4DMe5_@1IkxGd^{*u
zeSPEr&Uq6a$to-3p`b6FgzmHA`3!DURJ6mVGro$m+2Ci|Mc^PJY-w%9Lct_Yfi5Qr
zHXw*1GDgNQgur2DmP}>}W*{d+aIV`-=)KL3+g--+qs@Q+G^$g9JedL0$1k8cFVoTn
zr-*r)v_`SP7@8r{5t8mlW8=dupP<#qhi^UQdV702GM16wz{Db!DA1gyrl%1i4#xN1
zzQk)~fD8UhGN&&Mno<-Bm>X!xGT^_vUHfN#9OHFAljA9^`a=TKI$s1tC1kq0MuiVs
zT3UV}=n-;9M~WC7tVfwgYL0U1FQ_q_K=~L02BW!cLBy$DXtN8W3CeD$5rj%{u$U3f
z3)?ef-}|Edq(ODaH_H&Qzo!1nrE9R(f$!d707GdN8HPg8<r!R2^-t5TLOpBy{aYFU
zS3-`wfC&rAm}hU=^`y|u>cjr-pwSHUdD5^hB=mwmjIWpQDFz$^aK-xgXe#9u#ZMMS
z)Z_(fuUE<xpA`!Le(_rMvJx?h(!9na{RPGDG7U|xi!O#T4CDk*j!-I}_3no3he~}q
ztRv=~eC!p1Vdd0Hq+(-ZOMBXtf(Vwir+bfqZMqC?f_1!<#x+C$xa}A&bt`Gg=FE^;
z=Z#cwfA(D{(+}L-1g~7V^2KxC4kIP?pJ&NFbg0|;BcovRQ<}z@*Ecmlw4+gU7OmZS
z<?fT7bSZ%EHyOP)@2^hOF+oDohFnhoY<o&fj7GNVv0T#6yZSdv#3=K0Li^=ovhjgM
zr4jcMln$l!*EV#=0sZe@s+v4z*|GLqon9N5OHj{mjoaTAXuSHn<)L9@^LV2t&AWQ}
zn1ya`!|Fi5D<?302hh>=6ln95DWKCuEJ0R?jn&K!(e`Oj=B?K-tHNieZ&F=tGGuFW
z(t$qbL3<omWPE(U>GA&Uzd@@J*^Q^bl7QJEO6qtPO+E7E^Ir$NZRL=p*Fta~2Mjh;
z4r%C2yQ^I7SBJ|8uU)$avxaHvcCLswpRJ{<dQMla;5CZ6IABc7J*DQ}?sVho)vFK&
z2o&aXnt$|3r?`Q0rSWnkKbYjquU0lAVouG1(<9y|*Qnxh?gSt_+DF@wZwIg`z5sN3
zlq9IS78STC4&Mj(Lltlgnv;9W>zn#%JX6gx_{y2r6oam;jYFFfqV-v?9`-OE2|c2N
zA}&Jt`+ZN_fuio4wo2fmdHq!|B>((4I#}afqoB<%i&9BK4Jl2*`c`?$k#NAKk5WXh
zA`kfJ5z%~CYFB|`5Pe{FW6dp={WKcA#p74I=GN)#`Ssi8MD(g^YFAQxj;xSLP>APl
z`r0L4;39`HyQUc6sXITqF+ksWt@~eoi$9VnBO@anUc6`m;uT4g$jb0{|4Z^zQVP?w
zE6>q{p1c7d#5;gcPoU5cxuEf~|F6bF2|P+DG+6KB$)HKU=;iI*&m9ujcE7KWUpj+K
zNoiQ(K$j7yZe)_7cmCqCrY8!1O6zzGhM@c3m$)e(`?|BEo4{}RCneF-i@HVBD`j3I
z+9$|0zWS0;%;O`7f`4l~??E3|1Y0MQS*cxv7tDKZ>y-=tQO$RLi&PKC$Y#*2Q?|ej
zc){Wal=u{w<)ndPL}vLD1_mRa#N|X+p7xrA?#unO{6EDYxR+Y>;nvpH3VI#zzD|~%
zp;_O5Hsh;s9#{JNUCnLU(D!$&t^m;!4%{(xB|Mi12-K=>p-cD`mD_<QtGB!pR}lKj
z!S&~^8JIS<{`D!F0@yj|@;!SE_cnx}Xpx4XfQ<{f{l4$C7)|8){G=tzKI8%ukawVy
zo~UI2{sur6y5w53{4K)D@88k)?v?A?3Qm`H0QnLm!|09;wY(n4W13YAWyB1t*3}z)
zj;@(j2{2h8i(lyyKfd+r-PPBC_pkskYr+WCUS{W*$Iw)Yw3T*%M9u3kDVZede&yM~
zr+PoEi+^B3G(e+seg2%owPN8KDQeQDSt1`o`Wh!E0Za$d>e1e_`vl>V$$!ik78hMm
z@T@Wii7o6`0b5@lXmZG4He0W!egwb4Vs94Ve={K(MQJ?ivoo&<Ef4eEyOhAmLDjk6
zl_CaR+aK;CedrRmPIkJGKDYVZ)!P;CP-K+p<YbDY7K_j*x~+UJkBEseBia7<&-wkM
zY;+gs^|u~8c%YH1b`?qVFmwl`%I<XDTO9bDILt;CncUM+`pD6d9}xQ+K(CX4Iv?oi
z=@r)-LR!Vf!z)mEvXlwB4p`_ox3(Zlv!Kxzpz!{!^OXRnS8!H>gD(_qGJ1Nit|cmf
zDx!OTO<ly;c3`zlfp>o|;%b++22l*naTB?+=u3!%)$tlSXumY`<iYXs0pB%QDF8&L
zF(<C8VCaxx^8WKyQ<t5an*>~#C=`%FjDSk8Q6LQ>{^9pxo&w?kWsrnkSXgKsCy#Ma
zy6v<sd3OKTj~|kN#R~p5zYt~sYKQFdW#gH)^$oyZpeK=W8`atv=KpPO#xwFgy$d-Q
z2W|(XC^NJ_YidC)SeA&y2dXxbqCuVs_O90bYC(nP$=&9f;$@|nz>fnoA0Lmi#U0v6
z(2aafkJ3PV5OiL=3^O6Gs6It9+oiiOH%B=H8fvh(_mR%%_?<_MuiaFFdgYx#&OzXf
z)Up5#5%7qYh>87EQ&TmcF{5pJ^eY11zD<W%aB$)FBJj1d;|7nD)pJid*;h=Ts6WB<
z0oBdMID3~i^kBd4JQPAw?=vpCml4qp-_hMGY`O7wY>WYVwyzbRSy}6u&g5gYeu7iO
z0cyI5gze}OVnhYf_OliqnN<jTV-4cO#>eLie~SpCE^7q$?%fL+k$-R{KkY*=+XWN?
z=0Jm@_7p!$b{1xEbadRDPqf#|B7u7<zO7QqX#`%sGe>H9*8?%(J+ftE8M0>Ck7SV<
zzx|gmI}Tc!V8BA8Ak0*1E;FOodj8}^xVE*lNE#b6gPDvN1zt>v?k`UshKy_lNMt{u
zHT)S8JW%d+=!oc7uIC4YaU6EtAF*&r8Q<lk`h0)%f?TyJd&s}Bx~7H?zz6~dft0wR
zE=l_YL)%M{8V6>SUtsd=n4lij$OH)mag=FjP(VkMZJCPED5fCZU$vQRW_sy3t~1Q|
z&aNA^gY^%L=Q=gR)2Dp~Unj?#an3}-FAl~Qtn?r5KtvLwB#mP^1K0nOsjiL8q`hkD
zP)g}IFL^ug`K8D1r#=C)q0sa`{%3<1A|p9tGpfU}{-KPOXC@<2qF5L#OS~W1H^)SG
zDK<OreOrwT)KGDrHFS_qk<OkrQmyhc$#v2B*LwZ#R?xe^b>--%?U%_ko&R@ir3G(2
z>xmKn<~It`eQ2AiA-jX$nE|i`f!F{jzK`DX-e2MlQTfjFxL*;INzv3e2EjAa9NrAM
z-xw%VF)Dg`A|PY3s%|}4^7*9c`ryH<Y1=DgSWROV_Whj9ocW`!CeRs^0nO}Rm6+oN
zNxQK-?IFN!l93ljv(bmXMSA8{;z6>MQubeEv|N{Z*?<bblOEp=gGv%<l>i0l$;rW1
zU{Fv!pa#U$GXtPxz8RblP~rW7sZRrZI=^<jzaaw{W)s@tsm^;7Z@zxL4LJ}C1uy~V
zM&Y+aS6@Nz^bwNF``b@R04#oav7!YVf&^&FMj*y}^3<<`gooGK+A4Ka2?i3aPdYCl
zo@ha~Z3ZCTU1F~A=ksG~r0;=;--JU7KPD!apzUk`W1{@0#~7qO0@wEuh9#?@&<Fr}
za$z9=>Nw@_K0FhRpaV0=F@c~fIxlu(0lSNX^4e;m7JNR94Jn8ZLUczQgq-)hPc9%<
zBydJ$04zmpi~#hF@6alEjUm#@32l(9!mQm*?O=+pm8`h0?}J~~>}MdE34a1u0fA_w
zxPJY5<<~<(YVJ$_F&nx+wzSNJZu!H$=XUG$IiQiF5)*GG3%`g0^JZX=B?Va|n?l?r
zlx~ej86X%1=$nA!N)l2xF*P*@?iadW-Atzc2VJj^LS)9dn*H%;x_fplAU)&w0XLr4
zy;Aj6EV`8yaEMx-8XFe#ZHtZwXj9s^=h|`nK)HCo|E&AtOGGLRl(n-fJUBRz?aNl|
z44*jt3av*(cehFczj?6x2=F@4(`x{a#)jAcimEhliQCT6=+2w26<vw<lu<1(Nrbkf
z#Kc?_>+I2avL6qRj|oxG%7%e#3yO9;l()mpMp7I<Ft0JqFVGU<>Xw)VWoBmw2ew=!
z6=g=iOO{Gq)2{mHw~rq`qIcl}AOm!HkK9~5{=M)&c1rh=04ARsPz%ZeXY1<!ua9!X
z3pzM#KmAI_ER}Nr&*UinCk<~rYuDADRFG@Z%%#K)RlDEzJGm{to;gTur<c=5IkXRL
z>^*3l-a*>91{D(Gb8~O+7c`StC?FsCB?gCvu6Bv-(*w36EYpUMj`!3@1;Dz3?%LYf
zdFbKsy=e{XAF!Z+UIwdeVRI~wl=C2G0bIx3@j%*YX>Y&TtMw8DOUlRE_5XT-%_7|O
z-d|xye^;+;JcU(2`9Y5uvd;h^Nx~%l4#ImHxJ((a)owOX8%;upp!^WUzg)B*w0KhJ
zS7c;mFO35y1T4;BN*9mYZBgGdue3EF^zl%zemYr7=-zC{08en?qEHYGrEU}D;~s<n
z1DYWu-vL;l1xE3`s5?J&!&s<|>K&ytr#WzIhC_>AT+KG}6SBe+UK*sAK=Ia*RL=bA
z7LnP;Vfk0(8-}yc099x#fA>n{FN%cRVu#<S@NGoUMMPSJ+5li@1`9AfH^+vWg_~W8
zJh}K5l?kPL-djvmw8Lo;2ns}L0!6@aZ&epaK2Vpk6sQv<d}{&e>mqC$o(@oPH&Ma#
zk8TyFLf*kgxlg{t>lguCgzaZ$X7;|EvNRz+-nDCwAVu4OS!Z@b=mcbMGP13`Jt|ZQ
zNGQ6+CjJ@)T8gL^&{XC?>_=E0giwUd2x%7PF$?%xH4t(i?5rc~C;*Pc5w4{d@dOK_
zL7YT6kXe+tV)mNDFFqSUx!Q7rVUiix>US(MVa6ch=zO!e3iKL+Dc}YyH*Q=43f^sh
z%(d8QL20Mj19lM~*fnj&tFJ<;&?&Vf%hxUngWb){%8CtV9~!}#y#<ia6pXNzP&2`?
zj33?l2EfY{e2Rwx-aP}-7jiVfZoE1O;weW(0&amoDPRFZIyxSeJ{u$f-L4)|BLUfc
zEiEcYKVRIoOuUZQ%29qmEu=w8z8r~OvjsCZ@SivlzVwK11$>rQfycjyYU%83LK+<4
ztWBUA2s+MQLP|GSc~{4(sDb192>>+`eB8jArh}ozvNsbSvB1EgF0g%h?F~IeT8M8p
zLl?lWdP}eL67K7?pMwV01=<VeVwNgP``6F`W@R@uT1mDDG}8%Q|L=G*|DTro2&bTM
ziqR;=6>ftIuU?^Q<~E$D#iFNdHzg?_vfZSxz`@6tTpPC5zK;=5xik9g4=O47yp}C7
zd4$kd$WI8RZ!_vT>+|C-Vc_zGcg4>q%m)gShbb>d5>qe~x-5bd_;6q4g<nxB;Nk2^
z<BAyb^Gc^3Bd01C^W|fWn>?%QMDIfWJXwz6F!v-L`+j=*m6Q}O?iQBaSjl+TYDT!t
z4o^s+#@wdUdfn+B-7t76z9q-=be(%{jGu525@w<)6I$Mo2x}ac5M=xEGQfT>t$rIW
ze&i{CL!6O@@4}@^C_`^bLGR)1Jvg5<ImL}eY}a7aH9h=I&5gn%nLRK6dV6!NoNCPn
z_v!XCKjyyM<lf)*|5K;OJvVf<WOQhFZD=N^FT8(r2y(@bYIn)Tw~`~@4x1&2aNCF3
z(g`}#8c(4%g2nrORY?bn@2;j#?nOyM#?85Ss<pH0XK&7YC2B=yr*iOo8)loL`GDZ@
zI$Fw!>*qJcot)Khdwl;rlOPHsHOKTX`3`=CTTjWipxg(Jiw+ZCpOOOah<IlXe;fH6
zy7~p9n!~WbKjN`}TkvAM2+0%ywoIx>FyuX^44Lp>5Nh&93M!X*pEOL^3%K&r?yM*%
z;fkMJzJ!m3W_h_Xs4yW;rYT6m@SoXa^jN$G44>{dQ!i16WsNCYZtfp#u~~N0w|=8Z
z-RC!md5swc(v}Jo`P{e7s)4yXSS~1%H|l#<ge5NSr=-2@1PG+jrxBN#Bk!LrxvKw6
z_Y|T~BeH(&%)N=(fWu@WT;rwQ6IDl!q#yV+Pi~cV;w!mb<(cWUvY##+G(_;lrmfY&
z_DyZvarcFBXelIW-T1(kyo5|_>yySY%R4~llY?9AtBp+L$yc*r5y^yz&HeiW#|PxT
zRG;yslNyD9qpCh$uX3A<)#e2&%_pO}TIaG*8mu>OsgiwpKK5ZTd7&^e@r7dO{4a3$
z5E{%7(C9KCW5lghJXJo^%xil7yp^ecqM3M95|=;NyW<iIyD5L+G$%xW7!i{={cBv}
zhH!~9xAWnor0s?N&|m4{OS?aERev08?b`<ga*75J?*oFnUo@Bj4)M6PgU)+?1<$s^
zuM!<*98EXulHlark7Ffue;*jsz2uCrbSWOo93WO-f)rij>wSKJ*PW#>WySJz;pBBL
zRFnP6LkwnrvU`e=p8lPD>@A(aCQIzvGuoC;u0x^L)*s(&_ig5H{eBP-weSnp2}jGA
zbWgHq&=i<fq>v)k$nSpVn~f*0b0mFQ%54*wmaAN_y2S9_F(&(pi;}70!$iO-2fM?V
zNBZ;;A3Qb|Dk^5{utpoNCcV5VPjh<F!T%=qtMDy8)DN(p8+q?iBE*L8$qZ9qNJy0{
zG1ub@58uw7yPh8XAeBJb*+~u-{unMh#M1~Mn(|k~XrTW)Bf5<1D+6=qYt!5N@n4<J
zwCne!O?~{v8&eO+acA)g2=zOtZ&x-l*mmRcOiSHrb_5=@;~2P-sSuvpACN<PKx?!4
z`(<jptoE3j*I4q5E-JihV&r)DqJZrM0XyszH?%JUEjqm_rh2xBp&imP0H^Lhy?ua9
zarH`XRIjC;V@ro&>y;gOK33@#OKg=E7@Z1>vkVfjeC_aN#$7a=XNwK@iAvBhfQ~0H
zePH%PP%gsoLd#2eKKtSBOK`FrGz+`pezU=1^t|PJr|vn+<brG0hoD@Oo}ifRpLQZ@
zylhSYGfNDTJ+3EnpHH(9#tTx(rn&DMW3%A$xiJ(?_!^NR>a$L@8=qF8e$e$4w}$Bw
ztXU(~<HZcWI*BhYo=uU#=CmXca+Clb1N9N^K4*VRfXZ@wbb4%E{3C5}_i>7p>G;0y
z)1Jj$51?$PKq{9yKRe|r#8Ui#5^<g9AzxjyJv_r?>XOi{zkK-;&XhPF(yffwoY*o;
zjD}BJ=A{<(E%AcG6fS^7A0!nv<d9UmNa15@w`1G(eHOH$(_aUBfVRWt6ur2%5pVB=
z?17yP(J4L-uJFPBdjE)>h(l1}ZD1}v1QBIIlxlbP`Dt+sw8_wePcAJ5fgy|v{mPP?
zM-nqP3yW9l?OE2Y@&{}$X@e_UqfTA*X8ccp`$H%_l8&hcWS4{H;BwDVy8(zExDUen
zAZknHLds2}*IQdZhQ1yyG(wJ<c9`<nnwEm&NW)`ERcgrL8nSvg@np<PbD5l6eeajn
z=L;5{7iteK(1_|3(+0Ji3F%i*C*fYbawXl7Z2&n40yz({LxWM{C-6La18=vxX2Qd8
z?M6|C9&$quf?Dk0Cva*I3mlZMz&pTCytjz;D22#*IrEVxK?1<Pe67#HK(<|#laqUY
z_nFoZ?FRwkTn&7;!?}|(OK=H%Q$d#~fL|9ma0V<z>@!|&Zr?*Jk$-TGwP9q04uQsS
zV4T-=U4L=cT!+MR`sc~+q>(#LO9%G2m-?YeP)ng8jG@-+Dqv@|s&^&^O`5Cqj6^?B
zO~ds3W|zQ+d>PIzLuYWy;5)_drZL%LANEJ6(Xp4sraRQ~o~LK+td}Ea94j0Uf8^NP
z<NNd&R&?Y`BDZ>lVi(9S&%FR}A!3Gw1v`YJ)uCej%agpT%F4`wI>p0*E#g9>QFX}h
z!AaK7ouTtW1rHVBhut&+IQ*W^(b9F~+zJh^F>+MS;M?1`q+LdGQGs_trwTs~w@6Sq
z;IoZplqG?!FAzcu2e|8pU^R+}R6X(54+^B`9JL?&?hi<)DmgCi73ZcOUg30p|B>5b
zS^%QOC5d@b0q~b{bS&vVLZh4KRHjlQA|%G16u}|ZmzPSdD=tcc83XCRL4>>r%IQyN
zem^hhzw!S+UVvdHG9ktQg8f&Q)t&X&1+;pcMs3N~QQ)`+Da{2kWd<BFnh&XJn+sra
zo3TZQI>5<$II0974N+L;XxGLWat#fN=dC}!Rcv5DLHqv_c@hwa5CRjJ0L{H$zmqTG
z5?@I148u~+#DcTyKpsDolteiTkDaD|ieg-Rsb%@%{5mHqGcyVytdWytBn-mFuw%`i
z-4GN!$DJ<Fnk#x39iL;9hA~q|u25to^8!a^Xn>a>W0ThKt5qt{tB`OXVldd>otIug
zn-KdsEk$hSA{f$k1~Z=gAQ!q7K)4SFNXy#4D5RvM=#<+rAT<8Jl|fR4xLX{L9}>Bt
z6&-SKYQPb0i+vW?yGWh`Ehks^E4%bNnwQ`Quin$UDlSEVeKHfifOoPzcc?&hIkIuG
z#lRHEuxvQK3TBc?P-2>a1$)BTj@6qY_Hv{4Vxb{Uo5$T-WO={R-kQ=^#*YIZg|Gz+
zDV`*Gd3ib|W(elJ^(<4n=!6IDC`fYGQCVJI&1ZHz1MiD%Vu>vrC4Y@)3V0tGG-B~K
zv#`iFxI#7**!bocv8yL1e?Dmu<0h$aQ5+IjlP$L!LzKHoI8*?HE?rbqRIupvn>Pso
z_1!u)F~gX-(zA~eK9K7neE-ReVPNVqNuI%5T`*dI0ee(tRu)3CRDfIOkPoO(<mC$I
zXB+36P~-D-*vL3qpET4Mzr59CSM*e>kC(XMi~Jft0kr@1Am@U4Ee!m`$lDlDe#p)T
zgA~dSjE#fUK3(2di3T`h11TnXr&7SSj>^?5%WsPvO_ex_e3RbCWKBp!^b^{-V4wux
zyaP^A?GdN=@#<~l%&uRnSp#xvaU!A~<-CaMNHCoX4$A!mkD;!vJ_WQY^dfl%ZQhy>
z@3J<E$4`HTw+Q&09X<s|0fW<g$IHO?Ot)!>bjqwLT~_}--;mbV_sdDe{S}T!#a-&y
zgwdcPapE+yw$6X}ZW!wasZWI;p>@1!I&fd6jc<wlU)A`*0c1muB{q6-uY>V>GmIis
zvI-Xsy~uL)g?z<m(Is?n0v?($_<!-u`@+3QCRpf7l>r0=ZEi5}K1fX9HK6}b8S&m5
z^1LVT!ukwHsqrmVVc}tc{CG1EPl!t&*le)-U1X2OUs^hpxXHxC1O*6uL5M&Nev{43
z%^Qscte(*^F>hdBrh)zxXJmihyul$n6z9V|?NT{=H<pWgu7uU#3KKnA%z)f|r|T=V
z3<i)cHXQG0XkQV(ca(CM5EAHHA}mhL3yhvbl4m$ifn;t1eua#QDI8j{Y3OF>O|NIo
zt*nHANADwGHOuj8A%wmKMPSoJ01x=U%hc2XU;{%`2N|HIp;hIx`OAx-K(JF-$LAum
z0pSg2Z8U4A*2gIAdktmQfpG)A?4h9{V^VT*q;^BPG5QrS2*%)QKw8_4v0Np*j@0mK
zx_ck#cv1G!($iJ1@qo_pMRaYj7+4OMa=>ZqP&l=d4s07JM-L$L!|@FqkPE;yj%ulk
zyN`*8jC=`)+VpEYDB;AxFEC&M<IU#<wgExkGf@mS>=|irmmqKZ0G_)2T3C|?1tjqX
zAPKh4SI1m+G@6g_HaFH^p?ecAUFxN{|HUMTOIn4`SPs*7OBJJ-o*Tr?Vd%>~|2;FP
zhaz2C)wKR6D|;IyMU}}lfe$!YLCg$GtJ`O1lO@^|vHoI7UwKm(rc`xZ<`uTap8QTq
z0RH<bObwzaAT<J!vEfVv!V3XMXk-jU_tK?HKcMImFfcIGnwE=YUNiJLGzZd}YJ%?r
zYn;gvgGayyO=wj690db|A{hP%<XErt4h*oPLTe!odw_gM9oCYP!dd%U{2GWz*`s9P
z9<$>2hHg_h1SBNcAQ^LVaHx8oO;4ME6XP-t9$p`?EOO{b6*z`(35Os4K>|@RH_sYA
zHcKfwTWFh@NG&ZbZTb86JuLJRko(GBpB|*C_<Z{IP2egmtr~C_r#oE|qOA5+cd{!;
z8H8ivxDDD|-wDhCdBGap+Vbm{v~C9z*ZT1AFxY)Eucx?zVZj65&C&YjPfTf9nH6mC
zQlM2l;Pos8z9giKq95TY`e4_~K7O3o(b2K^npFHF^dsdSaeFt!#FEYUuMQ0lN9o*R
z=i%WYASKO#E{k1ELPR7HPG{u-hlK412lyo+7&TN>emvj)rfeGQ3DwmSNMNsK%V-J$
z90p<MK%ig$ELw`Qv%d`tR8v#C!OI(gJm~Q7hfFmNIJ>lybmqH0QK!tG@D9$mE149(
z2j>NckWj*gj;)fpIUBqdAOcJyK^Qb36>v+-gBzI3dLKfGf`&#Gj9TolhTHr5Ilu+y
zfBvkgzP%@98eA0{OX>gi?KVS3X8})yi;GL!^!n=RBgj?Pf-uOd6w)DA_`?t<R8!@-
z<NF}D<^n;?8Yv4K(LE+UzS;FwX#D&4vTAuTFgLR3NMsN&78OOsP`zUgPR?`?oI%-T
z!eR#bEvnV5`oto)6_$pMhNc@x^~10pyFUX1y|4#b`}(3WXfj<ry<QM_a18!1U5{_K
z0xV)?ZZ4IbosF!D0D*?8yL+W$42d5Y6XCGGW=ve%0OzUEcjq8@rN|?8cJ@ETrZNH_
zv|mb7><<s?Mai<3LN%$)D5V27Zgyow0rbOCuqZ2Whkc8Ku+xLG2ca67n#x4ibnpHt
zMG%y5=3r|nDXG_a%ye{gQSw<aA3n4xFe-i|YpN+NjX`xo8SI~^h^Ow^npw8jew{4j
z^!C%|&#k|Ihk@_4)0MtV=*2Px5)`4K#4z(=8-lOYB^Y|k?Ts!V=g80rkECHRUNX;^
zIXHq~c@uye=b466(JC)q+@0vbmSPF^1lJ1IM{q3V);;&L+%ViNFGxeqkpb0Bet39T
z)=EhTfBXX-GrF*l`xH!9Ud?20j#R(H-{AcPazgUd-<x?>!SoUfH+amkYecJxoWNB?
z-Qea{It_Kw7~St?49?OiD{s#NCNm0yPJ{durO&+}Lh^LhQ3wYY7x`>+)1h`|GySZg
z68vF~_$F|o;FnUJxtZAw>I;EQCbaNQ1%H44yY6xpk2A@d5S|x%GU48zJ9IKlHKe;~
z;Y+4)gV+h*^JLa5_}(YQ^JaE=lat1vE~JB?yUCEHDeAKWd6Eq7*9)ldFf3a@`Ix>)
zzp}x#R4WHA2?7bWAM(1U2|<}AZ`RPv{Y5djrau@ZdwY72L2ol;^~2BMoK*56v?X3L
zQ;#exvK>#)Gcx{_l<?PPJm<W5GX(ajkg%}8K{b=OR|{M_jjTzGpFaxauyd0|rfDqv
z#1gkt#j|QzOJDyH2?@#U%uI%AJAJ<tL_OdrC>V04IjpqmnwnBz?_l=z^@Y$fv$B$d
z$Ok>I2^p=*jYSx38?__6tLOu~xItQ9+Y&!i@Y4)vo0o$yv}HmuD)&e|M}hZIF*FPV
zMPAJFd{00g9f`1&;0Z#y&57rFt8nZdPVY+tfZ_!%hk}aA1RTC#0t<M={u&^_1li+c
zom;;)ry3FC8`M~@-G1!~co7xIk5lvWZ-LjG<4dt6m?+{_+=6VV4xDGDRZ>RevB*D?
z8d;wF8iN<W@X|xFI0B*bAgrMZ{5R3Etm&Yb>@Y}~+Yr0${3*Hu?pyModOk1u0;kWw
z4HK1>#da@LMuyz{X+g~p>C(exnbSYxg~IS6mv@F<s(*PTKU;w)bjNX4w$}8HF8J@|
zvefwb_|gDHM-&zDKw8K(&r*lTUK^{52Vyp(cPBVF*lnqt4UB&`L`C)8sS80vYc=C<
zoBzmN${u=S9ZD1cV4*FBhiuO*iG`wXPw=HIgDV|da_!y@^wd2ETQjl8y~t~YkZ!FP
zOav`mUEu%^SpV9(ukef#9!^_7f6fO%stQ0JAGNzW6b+#;H#?jAIUJ0_9t+8?F>4aa
zii*Ua4XWbc<rdOcg5E^?+`fI=3fTSGjSXcm16sK-bwx)+baNgrl;Xm$z>INicWoqo
zgN8FG{f3@Y!9m!T1{f=6*Vli`jevOt+YhpIp;zjIw2xqIjHaij2d_jBcrz3G1qQ2B
zJlx#6U*Ql6u4M@6c{cxqSzxDjP5&-t`7SgxFY9(q*YZ8E<7PeL+A2-v*%+;Cbe~vG
zCu{PCM&J(U%eAJZR~;*p-o1Ob2xlv&K85F;UhimYLn|mKI66CP9O6cUWY`Vy(N9^d
zijYB9>F5Ye=2Y(A$AXE7gm=ndF@rH5(s4{`>ICO$fu1b5pogZX&#HYM7gR_rLuO`;
zmCplUNBm7BH$UIHpFY1gaE>f1E33P&Z$?Bq@i|yEC@3lGM@FjpkJrIyhG1mCjg#O(
zw%gtT9drs%8%Px-eORD6+<)@q8kpAv*A}dnyBLz;*v2g`u3|(`XAq_UbPk8n{4_ZG
z-@l$7A%OQ+w3zXaR=JXbLHTWQG2gh`46%-$9&)6l!sk?Yf{ZQ;5(8|iPZb3{;8QoY
zw7dk5V|WAyz&L-J@TI4xe>h1(3@`AG7I3v81m4r3(Xp|#qM~r{S2+&Im_kOm4DZSS
z-4bx^3=|y1+k|`wI|`0Dlv}6#LQWQgHsY_r*#s;HVlvbR8(vU&)r%D>kgJVtY>1SV
zmFY!X9v$ic#sSF&VF-xm1uuh5;}us}IP56IpbE_(<U8hAd3Z@2G58^oZy@iWYinye
z2s@PUI$!Rl0F0jl4zk1z8r!!ToLTUeH-}-ol%18Yu;&uqzt1gGe6a*Xq4&X5K+^tB
zsgn$}hriq>6t@lrM<B2uz5)YtzkmNgL`)2(V#PzZ)6z%aVT3By14O9o_wV0XsX}6(
z=HX&vtH8O-Z6WB4&7M5D-_+Ekytc2KaIm-Nf(JN+`QpWkaJFUEg6S5e)z)D{uLiQ@
zgM$mO<WM6?q0ceAZE0n7pY_Vthbb$l`4Oq9UAq4tS??W}^V^1x-z}x3GRi0#D$$~Z
zsI<{uQlf!M(n6(F(%wTMX-Hcnr9pehYEY!DEgB>(<#${@&-e9P&mYh8deGhdzTVe$
zUgvq7$8nrXWR+1BqR|$8_bwBpz=TGF>q{B^y?x(`)1fX5f?|X^Yjb@<Lc#z7oVI5-
z>F410Q52BB#P2%cOFDMw(y{J!=nX^CCI2H4ML^A`Pq(_XeP}G7aA8@oo;9FX0b`E<
zA-*4<Py^VJh_KN&op!#FqPosiEA#A&^6T1iRI1?v8P}*p#KqNcHMo7NEGyfDSb~oj
z5UqWBX|j7Go-MJc5?TY&6sP!*G6HT}*Go(HTQ6t7DLDH^$HXLLs$z+ij*bqe!1tt>
z_rTydlPrZ0^-!)KhH@IzN3!{PA6YxTtEr)eZXghHDP*Tkz`d|xSWv~PAbgu1id21D
zdMiHOxp=lJnqNVSRmPs_CU!R?6O;Qlc|pNd($dm|;p(t4DLo~{=R@BG=y}+-Ze5dY
z5P~eCrlDaK$ibu^f_%(+`B@vTKBCg1r>BSiU%?TYM`C7W6v_pAd%UPfB=UO?h>H$2
zKi`4iN?;o7bK*oLEF$8oA&8#t0darsmZ*VKMn)M<oCe2qe8W3}E}WU+9$pP*Y~tes
z?g=?LB7_tKgxcZhZ)e*J53(*IZ%NcP#eeTOZvMTybZuN*9HBaV|Nfm|H4uGhEjdI0
z)4iXOu?kXr%DB;Q3Wfd9?0&E?DHL?iKxP*f&yo|t$wa|5BrIJFa=HW>yT+!b{1Osu
z4Gj(8#$bQQ;K0O*UwqN;@xF2gBAxKAU14_{_IH{J-q_7RdqBy$t@p|?lJ&;NZ?HB^
zj*p9Dr$G8{H@yO4{U%9E+RIbD_YiUnE`DlljlOjG^8Q1Kr4JtXArDyw7?wEuLkgEV
zqTH*R8sE9W!A(m%o4B~xD2T=PvD`7(1XL+VGGO!|gw&Nf<z(P#Q`2MUsy+ZkUC-WL
z)X=~ns=WLVRSNKy7DSM*xJw1G^@r)&Gnc;~T;%F=&E?#%K@W87S%^8Rm+2K{?;ht5
zc^(nLir^HErxBT4xT(EOM{2c$Y3rh_x|OA6Tkmf)WNjdPdp$vqUa|_d&vwwmucBLG
z0~GC@m+op-RzXC1EhdLcm!w>+>805Fx@Mu<g_mIH&AwWY*K`r+?Ym2&nVFdzkyYcn
zsiFQrJ6Rb=@&Ql)JR~1p2Bfv?=xAxl50cMj`CV4(F(-z^G8+5x^4ItNxChiw0=b>5
zR92Rhl$63bqgFr$y^e$9eNJH5!3z6);MV=cEFXgg#5RQ5VxM<QHR(EuXF5|~XN{83
zUVsi=Nc7Z;+f!V=GZ04Uq4JrJNNqc{rjw0PQ*K%btwQYkth_vbtV*A*!F?L;AYRq|
zuNNZT1M)ie1?bITL&Noby4yH{la}HW6W_W=yncO@&|Zm;!00vUxS}Fy<hUrB(642|
zX&^lJ{DOiS$fXIGQNjOge`N3W3)a^3cQgV($ms`l9WrwcoKfk4O)34M9@l<+?Rdi9
ze8hm?m*GImVLFld|9*Swg9o$_H1*3eW=+a8T33{olsG0?LDna{b7w8?gnd)T3}?S(
zgLMdXWdww8Y*?hRPmhy-PB!B1`pK=I28A{s8sWcx|K^gpNKoW_r$N<Wvl{qBe389>
z-fih#YA`BB1`79~t2ghm?(1y67u0xD<E!jqMC`M6V&brU=gyrXqN2mHj8i;vp6c=O
z@r7qzaa5#bJ@6`4H#6&5^;~7Q=FI$LBrw?RDEx{Y`bK0;LRVLmd^-I{79v5c>c{Z1
z*5kj{BY6@L5(>n&0G@e!O8AB86H7W$HUl{6(@jXv$_lEfsmVf}0EWrQlj?%8w%<o<
zi-2LE3OaM~V$;%VfkzA#C7BN%uu;m~e(>R=pCuT5&>a|)VU=FZ#Kix^_-&7a>ZX3A
zWu!=iHp5d*H*~S}MMZ_z-Gg$EN|UZ^;k|XtZb4vvAPIFNh4$$kJ=0(4RL0;nqOL$m
z(2lUI3vkvJeP8_a^PfMR&q~rLfcInfq5iuqqMR~Zs06L72CvLo3e69ALG44WAmh+G
zBzyF(p|P=gxyPK~@#CM2WEm70ApCw+U+)isk%{N;%$9a-r9%I~!G!x>2{{OcEE(D3
zpW2G)eZ?o?sz9646Ta8P6c{_Mv*^rB>*w=pqGDr}XjU<A^YHLE<zRf?Ui-Pv?U<Mv
zSE(YmPP&n2az`D)`a_ox5g2?Ll9eB=MlGO1%7^dYyU(pWl^Fq;GBh@(9uW~?Z)vFP
zf=WJCLy+Ip)O1*-9>rB0B+y+*zIRATp-ViO_o7u8>?3zqJCE&nu=!~VY6WC$o@%z7
zh;6nY6$5is_5Qmo)92JSiJK2-;ZeZIH)CUKUAG)|`_sb4&(Dtw+<+&aS2>a=CN(cl
zGaw*<U;r>xScBqHX}W$HKS=BIyw&et3EN$mmr6Dk7$SVp>dMIZ+DoEOTD^Yr#uoiG
z0ghlBq=pyl|LV8(U7YIg-`3bQ@%aj~CKkTJp-+yw#e9ahXu!td&NFk=>JJ`2*2c45
z0(w$0DAfi25?PHi=H?2>NQY!2RI|2jjNBw0f{*<<UxMta=xV&0jNXU$?@97%g8Ih{
zCJgOQZ8*X^ycA4LHzOIJ5*CpSNt<V>{K1{}`P|#v1XCk96eOy<k%vu#>@&@s`wIKw
z!P`S_)8>R64m#eDY`iL4Xg&0f4Tz{DH6yotP<VJf`cJTKC1sLUBp>ZK>kz);|FhV_
z-b1nVa&h)sE2u$QOP}04E6WZxL*9Wuj(pC#Z%b)&#^)g=xx~Vr&WJ7h74ZhhI-&LL
zTVXX1&L9CAC^pmm)rlAEyW`sdaDaNEF2K=>|JiiVsB)7EJ}9<bII_>?jJ>@kYRJjn
z^4$2}U2Cg3=b#}>($Lf_{pM3Uz@vVBNaa*w;k#SNdfrFzDlt%ICwuO~_-$K|5+ee~
z7@CUP!H=a@K$!v-IqMJZV}osNZ93>|0M|G)6VU^d135+-I5O;%ahUAAI#Y1&^n-}V
zNFhWE04r?}>;Igcwg2#K+VloCvrM0-l97>7_vI=JC+}Om>CsX#vCkH+Ae~`kU`WAb
z5{oMr%&JxdJO!`^^Wu3$MZ<~ZF}@F<sHiCF#x&@<S_u(YRLNA&#N_B&K@IYgxxZ>|
zM@6OVdYjX|Eq#)cA%hps3(8&0jT<zs+>XZGx8Cb*QdHV!YbuPa#Rk_2DZs8?y-Mrz
zePRNl!V_TO%j;gauq*E7&8+lfsEBoOz2+rJT<^BGhm*_KudN{c+kk82d!qGUWVL}E
z0XZdrt?Z39Yu?k#PVW5LUF!0@vNh<K$w93-nIndVaSap0r?fYBW*uhfliZh%GU3xH
z50=N}zc&FoaGLDiPx$phT8UMpF#t><NSTjd`p=#{%YX9ZI&$>4@7`IQIYS$*PK)zL
zQP9<84`M*_4dRgs>RnVraL5Tj2aS#W*RNkuh9`o*Mnsf&f9C>6^u7~)DZ@K>BS=Cy
zhBw!b@)j)<wWpn+RbQ{Hln2lk4Ff9VyXGK^5mtu;JbegSi=yo?bRgKMf$##LcBoho
zL;=wLXQL!#8{gH2)?w;tm^AS2-pv90c_iZozZ%8W)#rx(>*JQ&SL%{W0_@H^n671G
z$L0%#V<sW?^YhbS)s>DOWyFP=0=4C?5sEpU?1?e6l9JNxJ9nzzzvtMycQ1Kv7;qwH
zu`FHe**4z6zK#0IByGp|tqrB9(O_*EiF|(mP7Y)DNB+{cx3}+e?5B{-9_bmhZp7*q
z4M`u9C5t571r;a-$r__wF)ADKTCP5`BfLjlk)uzSb{HrxAxWTrWFT*`Lx?`V@Zr+)
z>r_!UX^l^0%b$@&Bl!kQEs0zXjNHKM*QxrVxuiM%9B?f?$to&Z-OM9>o*}@Tb!3`b
z39>oUu%`tD2Hrn!(EnZXP@$6uymsv_5q3=f_)!h+3j4;5uNXZx1I;p9>tIgp^MosO
z*tp4+xmt419wnglcaL|wxcr@)OW3qcgt4Q{@Y<Ko>!2K3VQ-Q;6BQ6*?mJh302*8>
zjQ)yNYLgm+z%^0t;zwtGQliJa{|ZuX5T$<qp`9~7oR3dEU5~Vdci&-#<SNj3*E~m+
zRBV54hG1m(olJb!8r%<t-j)}|Qqm1&&ObhG#_~3aWy_(?{OLVMv95$D=e0B;LnJJ2
z%d?*rBI+D9rp3E14E62>(^J~pWaLNdIZYPFKl}1+V@(Q*q;bFA-si-(viMCL>L8!>
zzpp4x*WU~8?h>@K`!=HertPELf*0R5VFvXhw(cc+1!QG6gA7;);-%Bip(7wLM@*?}
z>}2<J8uc8SoM1`ZS8cYWk@oQ6Ll{<NFRtBskTUeMVXMCIxKT5M$7P3H^xa9%ABCqc
zB7Q3FT~9Sjlj#0X*-`#EezozBT6^+dS65S^M#y^f$Y>&XZc4t~?Nb{iQ=6vGyYHvZ
zrb%>yxDH$r20TJeBN!(?xPxuaVuce9j(RZ_v-z?Kr}WE)^y6<1r|R^h?VWSzM{9ai
zcy=D0+&rbR(Z*{Z3n%z|gpiH=hKN9MX0cY&3e)vwv7_Cw(P8Ga#S6=46KcVRr8q*e
z*`9R!)~ER;*-KeV`p1~yt#&aagS!)sUZe{`Owsni!tvMaEcp~=@+oSQ1Ao2TKQ}HP
zLC7MFXJiBIzl7MS{`Lf}>g>GyhDYUo*#v{HuTNuR*~{icsxsG6f#RsV)0I4cB^pq0
z5{CE{7d=^_`JC)m{I09Vou3Z$?gJvyt~%+wKP8Rk_FWn|xvFF0?cr^i8reH1KYr}?
zUoYXWcpb$n|E^ukD5~mYr(V>$3aC<OPj^eZ51l&QZP(iP{?IDeY~+M@7ZqI(T(h}1
z>({hJPuUz3=Xi*xEKp5<6m0{zN$YTGCG0z;D)=R-dFO36?sax}4yfHA*~R-0tHHHv
zRrTqQN{aU&w!v6#7myYZ8|oQlKlt<q)+I)Er=@-S?J`Q)IoUiiG~|bXXK?&@n$j{|
z>xgj)n}oFCX$cm`zbn^3-$i7g-{<vbAGlAot_N9#Fg4)6836*S=J?|Kx0ZUiJAIVG
zCw)YJOn4n*cg3~M(A2zh7%QJ^<2*_OcY+$oWw6q?)BW@EL~-h=IQ|rd2{xCyD#@tx
z`ios!h~QJl&$PUMuLwA@PghD6R|gT-Ag4~;=j6No%1HLVRch*Cy%KeJl~Vp5zRe%d
zK_HwagjG+qRtE2t1tHROCDIS1_Y!;d49arwoB!K;&eloIP$ztPRMfoZoFTzlt$@&z
zQ}_co*nRAF>~vbK|Mte)nbPOapGGgafX!`=@aKlE6PXOKx5FYqd0KuuqrdO^xxH^|
zYiZDw6W+1IV$@Ae6`PqDxuFmvST8h0crlxl`Bf(`Ja~TvpLZiD>h|qM*L8=pA3m%D
zt&y-S2=V-3{vMIF|4wKVUPv73RBY7tJ<>BPOP7Uq+m`tHQWM|{&^FA;eSywm^;i&y
zPnnv&nc%IsAn$q#@Nh|4Sx#oj-%<I?ecCLjJOYqJ@b1~OmauU_2Aq$mvk|W5bk#KS
z<MZ1_`?+yMHY5dRhor(9w_#}M4}A(7D$-v>FUxVfZ3n9VY3|@wvUtk0(E&*|r~UY7
z`MEaQRMM+S)gvus=j5bc_z=--C*ta=pmfWS2kq+Gz%_wDz0q4?K|h8(f5duM)+ZD6
z3FqK@CM+)Qu%Ke->bgyI!rP<5E(pyAZa@zNO5x+@x3Fovc-6{;{6ssa>wc4ceD;Gt
z0s$LZpM>Q9EQ`Ba8~0f1;jWF~tx@5H)xU6`$wy*4Ha3Q@YRgZy#~p^t;z#{5w~W1|
zP=Fs`V|@BKaM6y5Yol%53t9FrgqXm#Zk=D;&f^2f$A+(bp?sKR@S-Y32>%u;JL5?R
ziFh+6AA{6se|=xw{USR_XG`>(VdD}<UJw-mKoqgSj=*N=+-B6hzq`4dXLG51a%QFw
zkq6(5i%UhOgPs5FF<p7F$B7dgL8KziEpYCj!M*iGmg9r!>XkW#rqJiNI9t-0gfyQ<
zdwgI4Wz4zsPw@6=^cHfYr7jrvywacs<huqa1@VAEf`NUK$nmGqEcUXhHI58RN5QjE
zCr3vge}6h;w!X;Pw*i_W)`iGpa0RzKwbn*O-T42@SLpkqZ}0uL(?`2!Q|LFI@qEcv
z_&>q{j?L@PFd%pw!L+FMDHMBq`y~BmZ;|=+4-V4cF#cJd>l>?SbqHae5jN`HcxoLd
z=OB8}<j$d@0C0G1iH#YGhTibYgu8cVVsyFT01-lD%E-Nm+^=gP##Z`5)4ebL=zsxP
z1zez@{JLhP1kDG9)2A!rK1iDZE&=C$)3po#pW9ycLk`~Zoo7c-Zy=>|pJXb|_Bmcb
z`Yt%lBrgDbv-#jNujQ+66Qr2{0V%t=l{9FJN+ziNcX2)fWITw*u-iy{lVVcL2?*mw
zL`1gTdLs$K0|J$A5YQ}Qm;_)OeAlF&#JXw4Y5B{EoUYH3WgmmR$O4!8OrFN~MRv#A
zH?ypv&(N^7D6l>TS4kTi;q&Lu>)`Q2LKwq%4sFuU&^U}gL~qX@9W$4N>lR7=>;HWY
zA#ec&xIXr3bGZP=k(m$=_x+>d9q3kA-0*%g?2IQGKvw|%;8mD}(O~<RdFCLAV-Gpv
z7!EV&MZZQI`PNq{54Z7^x?f>P^LOpqW#G`#xk-4}!<|33lsFGFf|I{gr_!I0un934
z-gB#lx6Hni&_ucV=#|Ul`YZ(rUni#G`3OF@zjNMy=jwhjvr9WP^>mg^Hb2}Yoo;7?
zqEfqDOGjV-2BsUBqsG?}?l5X*_BM7JW$W@v`AQxFyG!PFlCJQ&cRMGIj4pdS>vW@+
z?}r_KaK@ay^kf8)*uwng{8X|<LI{ZlqTYrdVEPPsx8i;gBmCgqc#wBm-cwKZ=-bKR
zeuNRzp}DFn*m<bKa4Y2&c?anF1N<?q3i0>=uQx_`bG)WLkJjN;eoIMn+w9*1D+uUB
z`W3EKjAkd2{@b^2o5P`z*n^`})Yd6qo8@aSU&XsuG_7p0eGm2`rNw+J=__NY@28|t
z)~{cmm6b)IkZbVaLuLxLCCR3>4;@<fvVl)VjP<~Q0}^}p1_TEmd8&ONCo2n-?w!eQ
z-hj<@%*^1&>q-Q2x8e0Is~OBr`!6=!(|jXk27`K7mR$#>5U^eq43EJ@BZM!4{nped
z#);`1pb<W*r8?<QDedMB{F}5ZTwGi_Iy-AVe%t^sMG5Uia|ee=&{?V*8^?lwSN!>T
zY-n_Jr#DZ%r6k%YB*wr$j{>O5_UkPli#9B@xcFq}invkt-^{{7(zgLWiw_`cR8*7?
zm}v(+=h-MJnVFfpO0bxq3HjW)vk^I4T$+QGl@%hYg0}V=g`SvKi_6xik0Q53nVmct
z27G5YsB)zlf@eHSKhphqka?ox;?hrc+MGR02Xr0x5E5$|WH3>$&b$uILctEsO$`{T
zpDjLynFH&xD~K3l*zv&8ho(>0&1I_hkg}TEASx-sw80ll(GfPcwOvit6U=zfTFCSO
zn6%NYT^qY$RtRqe26unv(!9On5oDk#=7)O&?k@GCM-`DWpl*41^6ABM=T?E_0iD`f
zXpVdzJ-QfLu$%doa$85<b!tl+n;_D_Ko`(c%_1QM%?4Bpwdhij*+8hS@c5)e!Ok_R
z07e@=m8Hpd>kYAIM<zlI<{J5`h_ESXX*cjwR>bfELcH$%ayii!1~^|A&CPxAgHM$F
z=roWPpoS&Q0GQf`qs_K&-#$3qGZBHXyw{7A;t9ic{3b^@(h!?gINQ|$6~pUZ^PklS
zvAy?HJHz#b(-qBXgd`qtKEB8@&fNC%qlFji8sfeNl$cDbAk`Y;;HEvh9d_`M)qsL1
z5XjDY<m<2%Pyy!#qMpw~w!8tlG6>#rjl<4F!Ucyt8Za;Eki8N?EPR*(NtX7*O+|Sh
zDAS<85xp4Fb>c@$XjhU@_E!u_PK5P4SiT>+JZXOfUD;<2eXPJYh<E-NKw4<geF#%)
z;pgx~MLx}CP8SRk2_T*=C|hg6$0Ac42s(rY7xKbkS-uYu`2)y-yaoAc@Wlz)ihLVV
z(17<qAY-h^@0z7`i;Ig4p#CAGI>GD|cdiAH1M&ut5jVIV%-}Y!zc(?uVgL)NdJqdG
z;p9ntUVuB+;iUBUw|L#UvNtd?at$$rhLD_G9{}u(%Px@)%zQ*cp#_Px8m#*j*?hZ7
z-np)eLsYOr6e9$7^mK@c2G}wkpFjWL)=YCpc7|7shTqnFD6Nn=^#d3hk==ls9^3tA
z;`1%eNS%V!fqKUFl)BI(O?rtn_rbs7L4I}^ww3b{?dK4<&>HJUM};KpAYla|5<cM;
z*jQv$e@`jlnNx@N%FNbZQ+-=eOA=Td@h<bpX^+%Bl9JWPO-SqLLTBM>SQb@bN6kmn
z7xF$VaZ!)m%Zk3xRw5e(FOPTc-VM<656S91pZ;j-zzcyqdS+Z`!vG9IeN@PfXScuJ
z61oHm(KPxmRxqIHcxJ!3tKeDO>{u%eB9z`m+bA$?Tm>>%*gTyanD||JWnD&?;l?%%
z`Yb<iQLD<22~EHsFbN_sJ9`Iy_k-@_B#VU5^>br3=54}S2^$s*HB?nsow_z_2q7^Q
z#9;Pua$9>Mig!RI&c@Du9VJd;nsSZBg$rS5%oFEmQa|@Oak0D%d4B77&z-wlIL<qP
zPegi7#GWsxzI$2Nz{qF-cfm^{7(9Qzb7&L^)fXU9_5aJbn|`hi#@;5nG$?BR{O&l+
z&z#0cGGoC2E<N>DnnUvuKm9;_C2V4d>kWCXR<8#k3r2LzG8elQa4W?oe}Zd@CrXDc
zf_4?K8gA+YZn2Kwo!F9T1qCrry>36#{I6K8XH#0W@oJh(pKj7Lj7X1_a=P1rd$N{X
z*Ir3%;sA6!IANN&D)ZW(rM|zaa4N?+YO<Br=&BF#ki7{<58S211PB$*KJXPlk5oYu
zAamLH#0d+S<95K1BXe28{#JkFXRCU%Wdbmaef>&CZzay^g1n+Fw?GRVAwNIg2mHG(
z#wgr*BpX16hkXQ(Ej3_pPcThuc;zb**m+Eq2ZF;}Q&&fem!bkX0P2zCg^rmEUC&rB
zxJyDxst$~ZY4p%?hF#9T#lrpo(#0nvL~pjQLaeUvuz(|l;CkT9=|gy`08+u1hs#Jq
z;OoqWCYBC5a_sCq71!<}Lzf;^WBo_<2nxd4YyABA4kU)@o(IwV0O9jhZ*LM1(cmER
z7vy?c*1o$p92+EQ>2{8L$<4cWk5Le7tP<zy3lI4w$6K_C0~Q15m00u_-;Zb?aH_L^
z7K^E=sTF+}+!6cA5iG*Dm;<sP$Hc_s3C(i}8fh!bQ{FTb{3c+OLFiU(C@Ly4hH%$;
zW#vTV>R{ZBq%B$Q_NLJ6VM%fAa`&;LN6pR6t4=Jzz>7lh@K{<kb^N9C8|)iO)mnXF
z#4K70VcSy>pykP3iXL=z%HbPmQ|J6iOrE{n!Ug{!PX|gZ1t0ikxf^_i5MW|1A6F3l
zDHL?}&O1C`p&R&N`s>^10~Qt*DheriP`60H5{!(a1Azy}vdd-b+8C@YbQ?9GAWjK~
zt+K4YXT{5Kcy|madp;nxWiH)9OrlU=hL`pG**CSZ$YL}3%U?i&O}UtznU=;+RK6s>
zi`^nkb@xdt&F#0|m@Z}?JdOj3aF?lXqp`VR?!n&~pKnY#6!yT08wwsrj5+<#-f-mz
zt-pEa&Z{F!04@bVMFg>L-NubahKGlVRmJ;HpM)rvki`oY8yXp@QW~3@v=$t2OB6hN
zmH|H0@JrHiBrMaMM~?*g$iNcP5@lyMJbpYD_#g_SvC>Fse|*cY<Kw`mO=6QjYytWv
z1Udqeu>QiW8TOX^b5|MxFyLND?CI?Vdk;$cs``2|BZ5q65j$DpgMljlSSc=}tm_!-
zUAjO<VTE*@ia_r;<|^o10`(4*>2;$)d!+_xO(e#BRK0ymkc5_xA6HQf3=GWSV28AZ
z#391)M;JXg_atL_ZjL~ixD-0V9&3F7Wr`DtKl@PqDb;ypWr4Th+AGHdILLSzVuE^{
zo8mD?y`@Opm%-x+=Z7lYm2)PjEl~hRgQSh;|G^#T{uU+ZKeGbtZ8!8n79whGwgKSD
zB<d+7jlI7wvIi?SUpuCCvBz$`;;RQb!ue=5!W#BTO_S8!{wbZaCGdj34#6e^eDWJW
zifyU%{%bUGI;c{Dar+0bF+z|9;_i<mJI0FNm37XAvy9}BkUIu|Vu}9ed_?U(dyQ#$
z)B{am8EaK?dmg+y;l}zoqmE{?ES%melijP$%*;TtxD$K%tHuXNK$soMXEzYNIYB@M
zB?tl#afH{O=_Dau5zYX#F^FHV5zKx;6>Mwf{pSZg=DkpGx08BnYM;jDsu0<iXQHZU
zcsyUa34-^ND0giWMFL#$lO>MVN0le&*#U@M#<IUe2_FvLt4e8Ntc8j1de>DHb9?)6
z!bQQ`C)ez!)83-DnZoQ1y}abejf!{LPp$_DJDa~c1!^qSc*SsS2UiAM_W?LFDPeFP
zxOv32qr$4_2n6An1h@uyMF2jO)G?&+=Q8TPvkE{ldf9+F!vFm7bOVxtgKp4uc=LoA
zY;YhWn(nlFOoA|}#4y|Jwd<}77Mx7mHo$4XP67##0EI?=aWPTPqlX7VWi)0Yu>+mJ
zX62KY-%7l8(8{_gq$U|w_GF+J@5n*qiE$VJKukVhN3f}bAZ!&Ho}Td0zYzID@Ti7H
z9a3rHb%zGi7trAA(Apv1aMRr3wu8n_4uMOhxv#-~AzU-y7&oB#9>n$NkDe~ZUcFfu
zIgtSV3?pHeL+DMKZ@@Dwz^_AoU4xJ2jaHwJXe^{bVxbYVR4Q`|@B{c%j>zN5mmpOJ
z%r^Z<l?_7p(4|PX)ZDzhdi3>(H#AJ$S@5;}kP@t5Y9?5<tI7Y3-_SL2!}bt9a~D@N
zdL5oGb#~%>lY)wnCCMV=1NqUNM>)ZD&`pAvKGwZ_Il~?60$vM&h{3=yx3dd{bIbrb
z+Jv5owS0mro`B$@4rkn|12{B72{MSCj92x?2OAw(05!mT!UO`nnpnahwvq`)gWBsr
zmB-!q5?Lsz3h?%Upi;x+j}YzAxauDtf9<LfK!v@UCJ2f;Nk7p_S%dl&FeE*$1wxL&
zg6Zn()7aVBktPuOe&h}yR3J<;tw@sz2|^kbg5tU#3<%Qi#LLn#GFpZG2-|sb=13+;
z29CI%3~;C3IT_Xo+CDs+Yfzpbla@Ss>=+?0)ipE>;1lNItpjNf3njm>&<}GwP&N6J
znz5;g6@+p0fxdX&!TAtHgPJtCkhvgjrN@=7j;WT|wFHdW5K1iJ$hRduDm1;Q5v=#N
z*D&zxoMawFOH2a3&%icJ7AjEDqJhhc8K<D<%)@94YM`W5vijp+zgDAgMZZ!)Qt~>P
zfzS+Sr>e1$L7^x7_^kKi?Ciscx;HA}!mpHcCphBHouD{%9UUDo#kUYLApxGoPeon0
zaG?d?g)l}*zXpPBY{ykJG&epj>Pd!m<`3|2vy?oaxp%+kNl_7jm617r>R&#4_U!BE
zOUtIXMz95bAOfU9tRgHbs)8B?g_43YI4mk)no<;w9b>|sK_R}9ipP(0f=twqxv(&e
z!5IagPOE_cV}9{s1Uds`1xW|7pujTs#O=NQ%TgZhKkoUsE8}8=m}s%i{}nN5{#Ojg
z#89LC*1q}KQ<`;*o-&Tx*w#JBC}ie8d0`;RO0lomOdnS@aj|@Nmk+D2uKjpvsqC_X
zecsht$uP>3fhI{!1lP3(-KHrcBO@Rq=g99DVa%%BDJ9Ah6lUPCFMu7u<HqnPu=kjl
zLPriQOn>>iY<7%keq{5kpCuO+iiK7N@V+EuWR5QfGtxQ!vgt{tY`aAxDXAb^awFch
z&)CUYig5E1WSjvly3OZ*St=}o@JwG@B7mKMdf_eKqdhKNGL!T+&}7NKyi-RfCCQPQ
zU*TcN)8%eyMXW5k-iF=-sY;jiB_q*3Vmw$|sQcgF)*$T0$p}fGII^0JlV0jdOYzB5
zt`<dJVo4SxaPI#Y50qQq)^-PY%?~?cBY3X;PWZhvcfE4u{E7d5{r-;|5a{fc{eYq&
zlg<8J*D%txb^C^=XQMUl)`!UQy8)}w5nh)Q*}aB`HYYcEr9_b`@c`Y)%H9H>f8TPC
z?{S<WO3wPZwze`+`oh9-)v55F&mtRW8X6Rg;;;Jd4)e(mujQ(?GjfuVp<kI5Pf?<;
zs(Uaw5h?HU=GM1oTO}l^5}s0RthHgla*%&E)xFWUyzI+TOw4$O`S*$T+YuIi!34{D
z-rv5pBK2qD(%`{^J~fKc0+w7~@CqhI#oQ2@C=_n)d-2l0Uhll@!2E$VpDAaH)lzA3
z;y|U$rX%CU6KA<bN;?|wS?z9|k4X7({;vl=+^LyAT)HsS#Bgf4(QMZ_`A5tj48D6a
z@w?&ABy#Z4><9NcTXVHP`70XM|0G(rHVaoQ`5xK!MH(iXrZI&>y-uR$Wr%E7v)Odp
zUQw4n`y{MX-_S^gtwLdJbwPeEK39Cats`}Fj*c*8Z&7XGT14KkiP2u?&c8je<%=h}
z=JwL<oYau5A&U*JDJ3OipvkCPVuE(*h&8RYeSP_}`0(yKS8l9#Rh@1-;B1Y1O$fV+
zHZr_>qw3`ggA)I~PhQC!^u2fP2PiZ1d;Z%Ubokkxmzk5{7M)bs3k|<r9FhnGw{G1w
z+;>26C_XrZipZBylL`w96B!*;w69$B0@wxFi{_iyE~3{#tUz#e4$3lqI7)11Krzd$
zUc!Zp-UR~KVdbps>}ufcPG4&1lJs_m<v(G{3F~L(lA!QKXH_qaGhIcthe1bZ|Au28
z<n+xb3%^J_iX#fx7Ti?UUK)^h;5snf!Cq^_Lw10qgv7r|zTH<+Btg;TlCsg+nPJKG
zj;xS6gmzJHhQs9qH-t)}3bfwf9bh|!<yb0ZiUCU=$v{f_?2s?U;DkXkm)kq$P)YUb
zLZDX0wSPc`m@%$byGy^s07B#bY2TbEzjfag4VG72s`G$axw@Lm7ZBR$)zn@J88xBo
ztA@m(s9_yrN<N!}%yBEl0CuteEv8(wx*zV4z5A{cgdF;q9GUpLrp3i!{{EZR?hbQA
zcwy*7(!_$KZMbsS*?lHnVPxdv@+(!m?P@%W4Ib{cj6;(1*)Vm1nq%VS53VY$;c5H7
z|A8AwwvT>BeMgFd3Q>7h)!F6SvnA=hMlJ{Nik>-f@w?%UCN@<oD@zWzd|6Sm!gELo
zCmsw9Dhfzal<|?F*<ZgXZ9U9jS<!>Qe|vOD;mpORh|hMlK+uWyj>v3BE-Z%s+uHdM
z9u0t>1@5^5_zq->C@hL_`A|_lJ~9hMb0VjE@(mw*gX_4Tgsbbm0|#obL<`oi$UGt0
zVh;GUhB&^0iVPn!;;)74ya9I~YM}OhUr!Ib#jfUi$H9~#eNIvbfckI}B5{BRASMw8
z1qzTY+}s$LXanL$iud1@%lu9*E-2!J=dQ}EOkFyL9fSLmLP4pGj^JSk7Ez`mNQWMo
zo8re%D8fkrr15aekFL&60yhznH?m+N)!o^3&M&i|fP+}E5aI<Y{wk1&LD(b>N+%de
z6Vo2>K9vBS5jcyCS0eYh?3G+g(-miAO>Q&}RY;iC*lCf0fTKQ$G?Un(p#jFr5!{c7
z*r@46m@c3SNli<ubx%UW2UY;`L<5b9)CAGOeLglmZV9}RbmRoJlW@&nbxiViuK<k;
zL!zv(O~LK01LcLCiz^63j)P7zGoXL}>!yP6g4bM&E#rsG9~D9AwF_p4sp);thIr%3
zoI!^|l91fdw1dbrjG}{YvO`!n2r!`n{@hdT!nW<({UHZY1~z*DLaViuairc5zgk~{
zm1V_Xku2WEIvyS^Ag8oG00-c{Zg?{(srSL?aYMsaJYFL-hbT&*ON#+B9|j;!iHWBl
zmh4F^_5Lddtx7?0u@GfweEcYs%lA?KRHRAYf$Rfybt_m47(fz2X@M+-@R$GP<DW7x
z03#UW(<9)3q1mC1dXmT|0S>&oh+RwMDc~-bSI-qZe5eur%Ktio$OP=#zJk;y0HkQQ
zv~)Tujniq2iyzQ~A;L*mhz67IXl&eqj7bZueyGd#0D0Jf-wNOHV-%QTpMw5T7}s~+
zGJYx~B0>-%CL-2EVgbhB1LAmJv{ep=IZG2$(KM6g?x6{QE58-6Y4-j5zEj4Duu4lo
z^%q2l#dURls2er0e4a1g8_%X)!s$n0p1E<)Ih{+FF6I8ZhDJXl3k%q3#jDcXHUD}Q
z3^WxQgBm4zq5#x@!p7R#8_1Q_Q*}V!Geop*0Xwd-qfg9%_y2GKI&Q=M@hB*`Hm!w+
zJ!0bGRG@#viP^lao$zbpzqWF21dKLp5G80?-}5GPY~E?I(_s*VMI|gUI@NpN+>_j?
zsq1|bN9SF#d@>%1Wvg5wdMDMc0*9}99;j<<48qI!ioOfhEaiU`Sa4MSQDFDo;Tgc)
zNnu}lr*L-7R=EeLY=}?_qh5%brZ1ZI3nZ3p7^+1`Zvd2ka;r&TTm`7Ju+6>KVAl9>
z>$`8?zU5BTT(m=69vl$k-?&|u!+K(d7vfD!N<9p={V$}GKe*K#b8>QsymT9AnDDPl
z0nfRD|Hu8DoDeKiBXkgW#h3q6^bwqE%I2M(uI^z}ldremLFvj5Iv+A%JjJ@5lUq1l
z*=)p%?)5PPjy{IpAq6BKnk1e+Ct6YH-oO*H{WaF=Yx<@+9h>$)elL-g;$9Kv<MYKu
zMznaK?!0c+<%<@Ue+f9KB5en@M~FwPikceH9cscGncwT)24A27gujc{zYa>`KBs*%
zV_8N59MQmupaDCosW~jWDHDNuvuE#Y<vqxRP&jsuMjQxx4d2r+(#8e0QH^SB^OltA
zgZBah=?RFAI~;cLtC5#^s_Eq-cw!z>b!X=$f&~D!`+=>_2P0b4S|~(3)rJhvaQQMd
z6$yze>;rgVRfd8yfZUbk<xc=(uT+c@BIg>UMy|L&+R$qSV-f2sQZZ=r0QU|NtKp`G
zps;@YcnmP{WQq>N^G5_@TmM1df(((57@+0Rd&E$Np>??<c%$p*tf8k(M{Tc*(SjZK
zZi`;RMFae*5Y&!}G^u0DInjvyGR8DwzYO_3b56ejiMD0jZsCMeWQ5@Brjdv4n&Ojy
zRlt`X!|jnc2MTl6`<LjNd;p4T3rCMF+x02|qWIvQ-F@+OeIFwuBZ(%2FT6_&#QyW8
zUM<~Ir;mwi&HQlDmrqERv|0rbXfWY<Lq`nsr~X7TDXn8e33q@*2_R9;C$?f`3o*oZ
z6D&%C>jwno#U&-9VeG^`T<DX5w&gaUB%plu%f8k>WZxHu7f>{tIzIO}BO?G8<S`VI
z{K3lie|w=2wFT2#>hib&#Qu>sKmdqtTk3hTb#|Zp%i3C{($pjk!8BYBMZaqS!#og3
z&uuGnu^%r$OP#1V2}aqe?e4xGLbceOTRv!>OFhB_d;8A9V2HBmisj0@Ue+y|9sHKb
zhU#?xSMBzx(=Ogx3Gb)SE`;({%YDx&oJYX<x;8d8p^H~}#I964M7BC(06gshtRITo
z7tc3&n`rKuG&D3Ml42CdL$ZvohJUW&{EOYR11p75FTGI1EiT(lHotnhsHv&B9dy=3
znA-OA6M~--@|g2mb!9S2N^XNFj{yP}v_9kvBPtb`N}PQguedwB49G9SOX?FL^VKKN
z$0ZK&z=3|oe?eyjvd#b;6eM1O0NjX*0g5EpWho)xyTAF^>9sJ-iS`+^1sT;#r@=ac
z#@Y&Nn`kW|AwleE32K2B9Y!_K5gP~Sj=xMygk!)FaMs*5L0P3ZTZ1!a1h7?z_fJu=
z+5Zo#Px9#0!v$gd^c)M~3C=ljTfk1U!W5v_E=HJ#7DUW(A<y*-4qi<%A$-%ZZ{Or+
zBrpH`@#8S~wjkA;PB>2D;1G9lqGbacgAp_iV%-l7X<&KzwcImqNq1!&2GSzse39k=
z`6H+V5mWK-uta9RfQSy+QvkYxwZyT?^9LPfSg}yB?W%~CI{8t&TEx;CAQb>NdTq!!
zfK@(1O~w;&6QPhk@%V~V@+%C^L<B$tGV-d#6b)8<4bm*ceUtq|hDpmu^UX9LG6I4I
z7Zsc8>nnkba~}>Y!!HJ4cLA?~LN^nrXO!J(P0IllIA6IU2<&V4{1%+R-IyZzTOM>c
zjJ*VcJ&wdci!uiM<-Eg^6I!-JJ_ON|WAW?3C$<}LT$~*pxAEN{A3wiaN=g72$ME3n
zhuz5JA(d2-UzsOxGpquP9`UaMmHNShzZ0WF6BC*nHf&&t|0&n$0B3uKs<lFPsPWh-
zIF><+@=oh}N}O&06oeip{gvqfG&7~fVaAq*Oew3p2jW{c%29Q7n`tj=>(LFIbL4ua
z0PZom5xIKJ8X-y{n6-R-!}}j0Pyo;!19|E7-}&aN8uxJ95K20tZhP-u-JXzJXg+S*
z=a}--?iZ*xjz1OKIU+gdkT)fOYNDciIr#=y+PGkLY<(;~8R?}Be}mN&45o^OY3pi*
zG>6EG&T}@E=o4<EXm8o2f9jMLB?~ESqUYp0UC5@P4oE|kj5gkt0jhor=1pue`vss2
zg@`U{3Ms1w=Az<nqobe3gpY7@8mK}0L`f<+JOnN$tq;-VDl6k7_zxXAG#nD8_Gw!F
zo8;vWh#gQ-Xmw&Vy4dBhmg|%#un){5BRyV}tZla!0_3W?2+H`Y&d##xW)&-WAyDgJ
z$iTWq9n-Xk1~zW-o{R$zmD_!V-si576zG&4T;vZm0Vwf-p}<4WFXt~9u6dqvb%-Gf
z5(~y^Nf{Y63?}UCf=M+G1@-0W3H#9c%koY5V3yuK*n3iZQd4;;=IEFckQO6pLJ>H_
zU~UNAkO%gV0cfze*^Lq9%k3$c!zv5pjF9{AhonBi!K=ed(uG^Zk>qhlRGTHwpVxvO
zi-6^a#MC8O|L-!AMHSq%=<{`rNpJC2Y#p2F>F%Zw>tU8&Ec!J{6^K5Di(5+cKpaBj
zh*+ECKYdz@-VSC|1R*=c{WumJi6Z;|v~^Mo@Lmr7sC~o=OM8kBZoHiG2|%BFJFlQ)
zLm~763h6d7LEu6BFtID>K*d2PaHupgTYve}N8F4P_Mi6b-K&a7Mc{3SNi0y5%)?LH
z0uVZkNWY+UzKySG@&Bz7WZdLqJ@WAH3UD)mvEyE1SiRbIS`sK?2PP$^HIpk1G0h32
zh*eXO!NJ$z_W@aO_&IO6@u;{se<FE5cLx6)U6*?Fx*+5I{fjf%=<hW}m6rbUv+Evg
zV;eiZ7#I7y^cR?!ur|<L+SG@XH&&UC7qDqaSl9#bP!0g|{rc_OT1pvO?y+4tmHlU}
ztuy}=gDi5w)KuoS>uEs1<v_k)hib#kSg-e?o|&2Es#U9`U~M`om!XAwybVntBl$)p
z^}Ak&R+jnsc1uWXqmc6tE}Z}vIMfONFG(Q5D$^>IE*}PVDtfXciuj}8wHoN^roLEh
z2LWIqzQZC`?iFTn?$_=!uEZ^a0wpfc5_&|~F|5ryRbJL!!4rA4bD2Kpf}G&|fEZLL
zNLc<!H*UqAv<me=x@~J`_W_5fz4td{N@MU^$1LhY2vjMkaNmQMZ3A8PrJ+W4VAIX^
zC@TFiHReI0%Sq^Y*RNZrbL7Zs(bE-)$f0aOeInt6oT$y~bSW4sCvO6(TRDc|5G#tE
zrwx*3Zy(S^-)63~8}I&K0fagMC5i&7oJ*i93SL__9k=bPgupWf?Jf7KSKnj3AbPrq
zemhan;KEY_&P^0FAlZ;WZ30hOs2QIe9zi-6gDw)11bz7A5DSvmoR2eSO)HnHOdXcx
zJ?6MKw6`u({dw-sr>>@;!<KI$ZmiqZ!P0T;f_j_zkv4-*HXPD|PS?VF>dcRP54Q1K
z`<>(1y7lQnLHA=ighSM$SxuX!)_8rlyM+N{<^SArir*&E9h8Mx-&@)Qd2hM!E&F3I
zE7@lHzRlcJpHRBq5zvdv3?<3g{<6u?J%8w;&(MnOa?_EelY~h}#5z+FIroraBe`VP
zxZId<wIx8u@p6A)|7iDi)X*bzZ$fo871MGg@n<Ju*aI^u=^^c9?tHAw=RWu;eDlEb
z2;FmN5}~Q{5?zP|Ryv9~Orm5R`FMG$P?Qt5V`k{ednVQ6j@G;0Zr4KjhFl{MNWBFP
z48;d2P(Q-Fjz}ChSy?9Zi|$ijA~Xa)<arF!2J+8wi14zFRsK4y)hXkLR5ccCU|Xq8
z6-wC~h?)(xLn7ycoSe}$#aEo2IcC3nR77kt&sTbO!g(}5$wP8|s9fpGhK3kuMnR(s
znbJ|#PHWWxrjU(p)cpgy4#4H)=F#n_BoFC}#@tIY^*Y<WRdm;`U#FIT7}6$p4?FL-
zz8qI-6yN<fMSh+&{b<F?q8&=X4Wv%Sr%k|IavS(#A)%pzvNw;qS2THi{^6w9v#q|k
zAnpiKL|)3!@UY6!^cd7&EbrxyO>WVP4q5jg#qQF?yEPjm;*P2isc3t)ejuvo1y;42
zXwl-{|K~r$sv-`3s{(!)6USM+JQG{u!r#Ap`!)-Nfim5))YWpOn>TK3#j-50rZv2I
z^Ck_jeheHG?z-lspfMSxAvkFNPU=&V);ZkbQceTPr(!lHzwZ4GYn1u7cg;SRxyv!&
zGd6D~<~j4MH+b&@5gEsj?EU-p<?l#v))R^QpPi0pU`=Vk;NW~hfJEVOZZ+YXwj%M}
zcNj70GYv2V5hPoyJkGg+Ecz<QQb$DM-orF|f%Rp7$w9dYhqEu)_u8~5v~b?0OYYXx
z(6GT}=^0pkP$gZj6&*Vb2<gyCYCdx0q-1DBL<YDsXzFnPGyh?I3{c4iTevLU`|RxR
zslLi)SDW>EFhW7jAO$Zy>-Yf1g-z>@O=_fb?|1e?0iNx(ya*9+jqAP_aPdsX5hkx6
zmPVl%m9oHkL<gy~76|Q7ZPB^fH0GjZc>_=lp84id6mu$@r}JpRh&20?tCPgRM-`Y9
z79k66N%zf0j<4So9spXKCIaLcD=Q`R<*n~loJvD!4q~bnn1%Sapl+NH@0yq$hJ^(F
z9m#C)jywa6czv}e;l*_ywYc*qsz6x-yBK7o`dAIJVz?CPTy%qvoiF;A)S$Q`RWpju
zFX?6XZ`%}^n3P|YhT(z^YB^z)$?z>u2iW!4IrU*q2o!Vvim5vvjEs(|VertZ9S<7l
z;=A{2Ox7D{9MyYYDG<iK`ct8a=tN&-GQ5wEVabtc*yM}c2)G#?eVE`w&>lpi5&}Nl
zf{$HbJvI$AOc}a<0eBsOj??_-)L2@b{n>-=KZnw{NBLjbVsRGR=kA?5t&ls`n9*+$
zoD=%xr;w6%>1OP2|D(C>dT`pGw6cfOwk^h7q7^Dyp_D6CWTJ^Ul7i?7v}ik8ItiG6
znj)UH`W2C6VhPbMQbl9!+Ju$9o}PNPK@4WD?a9?eJ^X8f_UH3FxTx@xc}BVJuk(kR
zES#G&E>K^OXqD9J<h+4Oc@ZPy8mp_XQ*`iDpru!$B-&T~S4~7%I0Y@x_n`Y^%1XgJ
z^X^35LkdnHNYesXX=Ea8+<tmIXTLS#`>Eqa5#1*W+WScSkJiKY|44OpbsGjBP%Ds^
zldhL7JD*m@?&*wVIumX%cObvim^^V)Wm}8W0Yq8L`$oB6p}3U7Y$xnu%q)f47MNxl
z<Z><818&;Cfel{**tv*F;>3@O80>A{k5uPo`1_q>sKnYHnaLL9=cn}d_lp$G=G6Vk
zh^<NLwK)SusK*WTTOa8iwA?m7bP5BlFRqd9mngN?6FfF4p4w^yiO;bI1|r!I6cJr6
zr4e8sW}_S-O;mW1$APo@m6v|KEO<S_D6LT>9R>L*9o6V9lj4h-uyCP70iFPi5={!f
z*1VS+Lf{Ob3lmau=}&!d8or~q<8J2;S?9>mZ0^)pw$)1l%y^rGX$7=-B!ecHw&N*1
z*CCTBO>17JXZb2zH7)ele)W1?)&;W`86#ougXaG~&{o&LfR>VF_IxiS5MRc|7@(H#
z=;}%XLI7Ed5`HZe1u74aQGhQf{}VByqK2W48tfvFLIjdAZ=<7ws1|i{+bI-TS=mDS
zt_}D(B`}RzDz14*UFe7^p7QU2zm7Sk7R|$kQW670cb%@-0+vSNui2qjYf06M@~#22
zHbCY6WP$);h=H79ZfzZiElcJQx=MIx@{P*+$b9)DkdZHua`qU=eM?6)0>I$+MI<G?
z&a$o`hW|B*i3m7l+ks8XN=i=*#MFM3d$|3XC)OIojwD4Y+eT{B?SzE;2<lNs)6sGD
zMKnchBDzYXW#-5vh_fJKHUlhOIyyS0XA5%gL1k`<z=h|331uxAv!t~|^mw2hlTl(|
z7!&ZC_?~nYJ49kaJqvQ?k?-Lu>)e*Rby#s9+!u{qyv?(g;gYY`G<V26_kO(t{zk9#
z=#ezgDohJ)3ywC)6GRfBs;WwoDVWYt_3=v9HR%lB+@X=`Lw(V4P<GvqW7_l@8kA#i
z80BXX<4OQKQ^?5Pmh(l65sw-`aH}zODEEhNy#-jYgth>|);wz{RaJ~^;9CpU?Kh|W
zg!)p0`qI=C?j4A`I*ygz7W;}qK?d^@Oyk*&C&G)sX&}?GAfPRA{6dXlfhO2b^Dv`-
zf2?dRMVPx(cAvp`Ht@9%+qseRgD^I}r(jcNem)y`!$jOSjnl;sp((E8FS5Hv_J8*k
z;b)E0nA{Y;3J1L<FG4rBeIIPm9-S%=iQi(T{+P_cB#kNf{|}<zuZ0`wJZrE!9lDTp
zsWBgRBf)#0m$kzRL`~=y4xf&0XHfRTK%^=d@&epC3_aJe@6<x`Lrtoa%s%gjP6bCV
zp$`TX?ytA@0Hr(1F7Jfyc6Pm4(eJTVevp_6BZa7YP#dg523=8W+3T}?uHxEt&}tuD
zQ<aBB^|_{p(;q-wdw4g-JwY|CnzU9T8*_@_2g`&fBfpdYFj(rZVp@A@1o7KTD_5xB
zzn_orb&oa5qpXQ!JOsd-6Gu**drMaP`G*h|YzkLB&2F?)B#sf2))fD)EVt&F%79$~
z`C8yq{%uCwhMY5n8azMqncqZ(ok8Lit(Ca%N2l}zyf+jO->LXH0zrBvJq7Rww?Dhs
z6M}*Vjwv(T8}tw3!n6EMt1Yo+#&PT10&BV|w!Mc%#j>ql_Bu?dk}y}UDgMv@MM>-y
zKjHxzK|`_v88OJc>JK`L=Ze-&4}#f>29x{nGF}+?2eo*^%v^C2hunYCk`58-xeSdv
zzNod06f}oA4!F{o``0o9wDKrH9SpxXKuK-qXPGM1&eqo>q4GgrZD!pL3{K%A12IX^
zK;Z`or9AfcRYX&<a}PxXyip|Iz`e$S*pUAp+hS#Ya19}z;9rwJ0fDB^;^)ykqd^#5
zRYRFh14b7{L(>t&H?V#zxIgy!ZMDf=r86%<s>9tjyQg@&fB+dMf)SRZJ4C*a;{qla
z6qo7{hfqUjdG4}B&c^EIQWvpZ%ww%LVlzG`K2NH930-CSc7_}<O2&#@rlsq_qmDr1
zh$x~Fp3cx!=Ip2za&1zZytYV*i0Cd_v)B1?!CQ^~Kkg|Z=~u(U!|g6Pi|?G5hW!~B
zb3}BFY<NI+R{;M7lK`dod_QW&HWjl9CHCTiaMkxY;VS2yog=~K!9Y*wW`kiji#<BS
z%~314*LgIG<wA-d$E<GhluljVuF~U9T^^bmlV|BF(GLkhUJt%?{Py3Q6sKRAiNqDe
zx|}a7NcLVI`y5Vs1|o4h)Et2r_Csb7V(RV=GLg(^IyZnF;z$iat6BN;2k_lN-IXBl
zI;fR|V$upUyh6#6S!0^tHNJb}we52W63*gcVhuPkWaJ(pS%HjsF1S3vlHXNz{@2a!
zA`{UEhA)J7o&klq3Y<bRyPVK@A@=W=6$nuC{i`|Z*>tbng|mX^kWZFyzqCr#>$C$x
za|OPq31=@N;#}}Ob}whhh?~YsnUag7TPhCe)Q9ig6rRz^E%gI;1sL}6>;G7us>(RK
z?-y6(6FO4~I(_ps9u>9OKXSSn$smWam=_?AE3S#SVzOsIV7~8?q|n^D2j)B@yfhn;
zQl~5mxE7g^Bb%z7EQh%_L;`y3`&%Y-0hG?)OzPRexl&xJn#KBIH&GCP(@3nk!RRwb
z3GNwK$8fX%X~gZ==d044o(_pITW&3#g0=yWp5v7(4d{xKAx&5}T8-(-ua>>3CVQQ4
z9qvx6A6JBF@$S;mkr7KsoVX5Ml|t%Q&mpTX=DSrd#C@nK<E)=bo>Ap*4MSeghib?!
z04tm9f`x@2+#LKIzFQj{le+gm9Bqnup445z;^qNmzG3zq5Cd=Dz58v4@Wr_Ge*nl{
z2PX>%+zCr-5#{L<fipwBnQN>ypYC@(v_pjYe%ke+GYaX{1qDa#UVO@9GDzVmHIDIs
zdI&u)!eX+h_N6110?PaE)lDCpKB4yZ!|scYH?UU~kw~)Nn*9bleNMMOB~roG*0*|l
z^SYi!+I?76;#u1LE`+yqE2x=B-m_4xVYzM~tD44l*Yn-V8nnMuM%79<)o-=NJgdq{
zOG_X9Obo<+-+J{_Z)l|C?wsjM2~n{GpTK2v0|m}~WKrlFTLs_C=Fk;zeex*YLd5p>
zg?>%?!Yl)YO0V_jjj~=HLdi=ARR}M*#aPLJMW7fXvb$7=5Y3P++CXl<y~vj4(&V2-
zktpZ={ohPe&-vf(e#7X6w4P)`sF+6){Sy9*y3G1?7~DHf^JF_mirw)P-?T2QZ*^D@
zW_^Qo!9|R|$mkr*UPXPlz#5vZYV2(7XFKm$)%s(2?$vNr%Hj1F3!wBunQX}X{T*Qu
zKx4jb&z_m|Irz~rV)!;G=N+M(7mhpHnpbJ<r;Sr9c1tPG$mJ7N?x>iC^L`0Cqpz6v
zS?4KBK_UsOFV!{G<>Ugwsbc5g@Wr${d6&~Csvm<OK~%*=@$5jC8PApR0*@UY7FMD;
z`7r6ZBOP7&i-6}iM`f4&?;g|ERs%@&i>)JfN8C}EB~noU*<u(XGlj6zk*Ed}ewi^m
zfV@DthBC}~6O;Z^RJ@@JI)lb_mMkBqxdYrjchL;Kl=q;HJ<0lFd9O~j<{?~U+DQkm
zmmhMyJPXSzLc#1A7vm#D&L@`jj3gyQ+DJ&7xkHUyehASRKxz}av_G-7cs_ngqRzf4
z@xATs-6r<w8E4QeRZvb0?}oY&wiYFv7eAdZ0#}5G@M_V>Gnx*S<U-s<{ok5o{5T*X
zjwdT;uf30$*-G)Qn5<t1j?jVaK|`_TpCgilzfg)W=?9(t8?P=PA!%{<GgmFUiZkc8
z#(Udc54m4GaVAzUUHYuZ7%~M)$q~|aLl38R=VO*^T-Bz<tnl`1@#tfo5!qGhqRt|n
zJN#%(ZNRd_i3gS5=?QRu34}@ElFD0<93d}BJ?URg^e<3XjX_Q5zP@_R&iaF+kFE!V
zch4Rc6Bj)q;Vt*xq)73o2J?R^00s(SaR9jWdH;0{+Gaia_7^Sy%n3najYRz0^6he9
zq`W(Kehc@I#H~vFqTmSN3CrlK;nyursZQU)0q5DU%JEWksOFoSeSkV0!DHyI^xleA
zExC(Pe@Dl}_(CFcqo81VhmZeSq5C40EZCl)b{xij0LPROz&G;EttdeQ0cD`&o8<1|
zb`NkyKZph1!^>wqq+ls1;W_V!Xq0+-yV$Bd8mL2O9K8<5)n1*f7e2S1H#rt_d+~+{
z#~Ifxq0|yZBD81}Odu+{eB=`pdjR$UTLq#I4Bn;V$B#xbo5Xi+hcFf^PnB|M!Q#q^
zU~>v~69Os~=@G#}Vrh@4C<}QS=i&CJYVrSv3&04aK@gfPJyUm3Gr@#90M6T~n8rnf
zFXRz6&Y}S`2qAHxG9T5lcl4e+8msuI*;Ao+A_^;51rf3Ft2ZCN?O_g7j_$5MAUa61
zwT-Iy?m0g(aB1+Bhy;(2R#2rKPHQEjR8U4L!w*Ojy-m2EZ2?QfFlp^Ab%EyZilS2y
z26;xI=w0$7DiN_Dh(s`ZBwQQF>jChSToo%$<n_eg5KGfD){Q&`hEhH-b^)cCmYzOP
zq?|P%TV!?X-SfFW?NG-PC;s;xVtTsRE7fR66XKGezdxbGg8d3n8V$_QA1uMYO0F+8
zKGY;n6=^9FEk|NF_`;1HeJM(chZ%&3MCS-trnz<Z+rK~&9J))`8U4xlH{6Rad=Xv{
znDSwNNTx%O&5gc0q<A+l9Ml}BeVJ|Sct4oRfS~2cC@gTld9h5GuS!Vr;BmK1EUZoA
z3ZO><TKVTEliYkiP0x7-)qMo{M_<Yu_bHSasz@ivq(Q=fmg1ku;p4-^A#<dTM7>B9
z<quEnL1UkH+cuvW#j`ki_&8LPHP5Fic6PeYjjQGvk=bE5r!<5VfNS&%#{ZFe;U8Pt
z?FAnn(XxXiQiT!~Rk;M>_6r08o4$%eeP1M{!-V%rW*dlHY2#@&5Lpi$R0P0LEBJha
zNKAy(Wb7D#*M|*4S5kgiK7xXexS@ljU=Au!iRZ#@K;Ny9f8c9RbMxun2;wWH>A$#N
zAa``dMA;8LMAFuBn<}5{|Mo2u*a+TLH8?|}KRoDAs^ihVz!sS_On%PHz!Qm+PzE8L
z^$dL3M!~b56>4*g?>=#8?syscMHs<H<~NZ>5FwZmW#N}E)vh-8catf<cupK_H6x=9
zq~atx4786u0~-vi$OrMeN;Z{FRk0>E*jChL<;TQuyuByO+1uOOVk1Qa5>O$4?mW+`
z*#=5bC=eNQwD3DhU4)3EK1$c0s3tU@3^k=18v9%AYjEXS?~II&Bz|a?Rl!Fz2(0~7
z02H2qhW2o+(YSYDJ>-wdP#Q|WcgqvL6ZiitpYsdf?#ddR_ooVbrxn=B&6_v3Ve&8*
zi07{UXb>pXq=3P?ky#R?{{;xF&ed2$nhzk$m8!!aQ8z~$oMaDCj_t-43dP7sa}4W+
zYF!~#P;;I&Ta^$2@291anRIyNgMhY?s2Qxq8URqDrkJ@Om)xN$sbTTjmCK<JX#&w{
zf>2Y1tib|f%x~w$0`?*5U<qky6)dyk+$0in%N#0PG~f(T6cCK?6%x5J&Z6KB#2Flb
zgsB=G=g(5i+NiD_A&G`g(*j@Xg4$mFv%5+I{i#4}dw+>Ng!T}bn^A+BSi+`d8=QUG
zwq27mizp@|HDNjH>rk1uLLc7M(b13UvJSjv(B2wd_k}g$;&qV|{gB!GH^TA5p47;0
z`Nc7Q6nQ{8xQ7o2J}N+-h|y<#_t9BFD{trA41~?oKv=9GAVQ)*Z~en?6}tsuFKEnT
z?YW$w`$Pa}F)B4fyeFn3C_Nt^V(_!*4WiXbYmI`xH_Awqn-X+8#u&}M$cEj5fxQsq
zR%#JjB-OpamL>S>`sKKtyLJgs+8!KP8^M>G2v3Gsz$Z9TpeEjjo}d-pGqWF89vq2n
z?CH6On{M`_p#D>a3P&!T1JX||YFDAm0Fa*(j&iXUgng7I(#FYX4%CC@r`vjZf3UJI
z?opyq-J)XTgukF;|9ZA?WjA`NWHvHT8~>D)ZCZ=wIWpDSZz$=n%))Zd`i%p-Ae9T!
zEoP*udEgUjiOT_#BBPxN^0fckjqF}p@3%ZU2d2at+IK6?;$NqsZAt1->uX?~^`o_*
z0A?62yOdU&U(cw4BnlD11Mm@IR|MI1p-n3_m=@`eysm;*L1G+yQ);0^z*|tkL4@S;
zlt<s)EeGA$kOwGZ=kUmR2Ej0bhC(dSdhR@0+>L-l8o=nCI}VMNvFV5Zklkq@hZa}{
z<|IyYs}_+7isR6fSx@8|h>Sz#tO=k7NHRM+nPvdqp2)#bE#>C7rK3YbI%td804gq9
zf@>b4uqsTawGh*ll`*h^zu?J})>(NFN<owH7`zr29J~{l4y_L%0YUybTbFV6JctJZ
zZMnxIpZ@knpgUQ1X*E*d=!@gff9Q+1gVV4beG*Bq9(wG-x`j|e+;Oytn~Wf$_&j{L
z3*;HH$pAmiqv?0~?$cAQ$x+B&YH_pS+Q!`y#CB{_8m%G7PGNhs-|jEkRuou|%~@ak
z1U)072vrr6zA%yliVwhf8VdaEe2_y7LPDdA-5!1GR;I_h$IrEG@Z&pQ$oN=L*tfej
zjda_YADROlWB}~e#G+(~A}C-aws>@%bzp0DXcfO3`>ddVnu42#13gw4ac)JFXf5|}
zz0gxO8$P!>_-MK!kW5%x&a?Wn^P<X-Jf*Wt|JQtfyb{wF?ls1bPaV?wc1sF_mnn`A
zftMGX?N#)ym?+2&#hpl@5P=YMG6OhlL?GLCUb+ISBhxD+fagY#P1H)2Z0G8WT^S~|
z12LhM38zu^_&rws`SjQ0G0_@R6XUhsf0s^^p#rcn{ZHPAaR88kkPa+C&jHRvLH$HU
zW=!0Ao`-6LS^)x~GH*oMM0|5$X=%f`63`I!k(Ok`&^9OtlZsTpXH!-NwLlU0Ek+=`
zl<7Ur1ARWN50KtB%ZaZE-3Jubf^ZFu^dQS2u?D7`(nf!}-WP=t|ENCrmDXKdU4^zc
zh$sn3C>O-X=)7LXHc!QgBqBr9HZ0+)GYwZrkpqK2Dhk1b5f0KyO1L=1PFA6aC2S|6
zf4wd1Dh7a+7>4yc0a4taNG8z*MA=DR$REIk4lkxO2^_dEGfE?TZ<7gH>yJ*B>fLep
zuzBM1Q+nJ@YyVR_K2e$lE3yiO9`O)bmJy4lc>IQ>@LSR6IOE`U<M1W6bJr%pJM9N*
zge0t`eagab5LWzT<~;e4Uod^2a^zy?c-Lrl!DUjBr<A4&++0{pkV1d@eI@C%-b#JL
z!mwR1T=fQYTx@7p?|QN)PB4Kx$MSCcE2X5A*yo_Xj5H@lBHCHOcnDLg?;T#^9H3WC
zt^0*&1DI@c$8%TlzElZ5!H{U#%-pm4az>LvPqqbk6{mgcBI-z@o5BQV#5K>r|74Iv
zU`YxO1cMRTEvj|MOVJv|6pfdfgIj={EEY0TU>ZZbmXX2}4>X}~@9+GS1(ZrqEyS6p
zN24_2g852Aoo@9iFxPQ~A&$!+3{-=<0b5#D`26;)5@^UL$Ql_UhbJSOq3?@CyN__p
zbFk6SSVgzst*h|xUaGaztiAXEXWNc^UbHW$D6mo@t^m#K?OT-nkZqG$Z2!i;+I<r1
zan=gRX4Dm!D-a#F+*}?S92&|*U;tz-_d%3KW$AA&CSi0O8<FViAO#F1GH61C%6!Sa
zTRK$OD;5kks+%8woGY;c_)mx`@P{GwA5xSzpN}X>n?c8jW=S~uOBh$T3bA9DRPU(j
ztodDQqtyj)*)gnHqXvqKNzr+K+^HuW=<VAs5m`OxRMSneQ-oNLidhAaGGOuS0Ic!n
z#-3?p(SE33h`9l#c>AId0wGPX?A;kfC*%VF`RNJdi_gV65TCA|SGvq;4)@h30qmoT
z&>i+K9R0*uCh9%ZQtWJOJ_zVla0COh#5(!@>#rd4FfcPKBV;?m%=Hh56R_J-#K~P?
z4bL9I7fIIxMSEAt#?nt!S0~pG_xJzvZ-9p{q0o><I7HUwuW#-^{x&3guzCjoIpRTy
zlqm%#kYM!~OX~|pAnmGEih{8}kf73#1T|fMkL5;Dx$3o^fm2Z-<3-;-fI)<A1#xyn
z7c~NKD0r&uh~u@$PWR`ri(fLy+klQ>J$g$zm0s6i8c_v-8t!Tp&<XILZMQ=2chc)?
z1__p$o;VSFu4$Q<$C7HpHkx*qmzl7@ga=44UJ@w%X^?%BvB5Cih44kl^Y8K;M31Ag
z8ciVfe1XRz8Haw-%#2J30RPVlbZr7ipz$1uRPO0;IJ3C2?*G-+dxvw`{&C|UDpWEn
zBa&1^*$p$htg?j=DkEi&5VE2&N(xCK6*3x<na~hINW)&)TZ-_!&hGE;_x$s89QSeD
z_Z^>cUDtV@@AqrHr`ae_R$ju{Pi(mh=i-BJyw(j@qIEClZc|((*<e<z^iVIudFiT%
z&g9|*d(z#=_lP6$^i+dAqY+#RhE-o#R^bKoheMnstdRB&aS|v`;1+NrX9S2!HGn(6
z8Dgy{;SVtQE$6zguq&0qWN78=!^d=LZUgt{9%`45e)E3i$YI^n<NWux3u(j<Qw|6O
ztTX96VK}=+_$YLl0Id6E7H)6f-iGuA96$sC!5@MrVjD-jNDxRu7vZV6jDKdhg)2)G
zB&*G}+{QUR1a8#(dY@*Go2J&DrDnIq{jgk>4>rb7{cHH#2LmBF-zFl$2q=Z*H(?VZ
z=?bz)+)=PUcL%brivQwYoZZ}lAy-1J`u>l&BKlJZ<$F+g5vMH~?}fPyCtmw=ljQ*%
z0=h!9&g3p#<2key$eJ3&rr!+U3rkf;@{$oj6gsVLYSM_GTqE>Br_NaQ@BHiXzUsis
zv`nn4ynV`Mt*|Yt!PH1hl@tY(NTli^33T4megB3iBb^h}*jU2HoA}8vfRzY~hpBcw
zBoXcR00S2-F|tIc%zMRYq-Sk2{WXu)85r1hA))oe<5OF!0<e-~k?LhoZW?`fVw}5`
zd}73qY2B0OMge&kpV-RYzWQ51jMb~{iG82GeURHjZ}&|vSNqI%R?obqvv-1&R{Kp-
zAzPySiAni%e_%xQ*?SmBe-$xPHfXr<a9%zkeAzVn9iII>&oR^XUbJc+d_&K2pH(Hn
z(+cio8$J(iI&iIR+aVKZzj+tG5vppRaT@9|5{uf_#`V8gR4B9X2{FNnn_w(ByAhHT
z3dq8&uEIbl_0PEh$4{llcuEf>T%!M~6z^8;!+V|L=~>=UTK?gB?!`fDBvW9jh&vit
z{h!-&jI!|PUP7xbKjRHrUh?dhYv%E%;ssd@UD;}qd@W~H^(Fgd=+9qVniHCUa=HRw
zY955uWV-~hSTqzZ4uNc^-^_;5(ab4+uCVUwcK`N$$aqs)9M%MgWgS|W;ieWrJ3-JU
zGQc%K$)=v)Gs9pVh%#K~_6A;Ytw`R)zVkLVtME+0@bpIbURE{8*sig$4FFp5O_#pE
zcgQy-XBloP&6OxChpkqOm+oOK4>zS8pzDRG2Q-ClkJ=c)jPJupEJ}pR^mnXTjyJ8X
zZ0J679Ng(EOTq?kEMi@H<A-%J0;A=r6Zh}+3c6i=il7BJ2bR9u^q}s-wve8Cgd^<2
z8r_{<Xdj@dK_AnxRerV4Jk5S58q9ul_7a)bhmwbEyohrwbvH=?J3<Ud=m3T!ti!A<
zlq9R$(T|{HS~v#$ba5p~hW3S%Wnx$TJ=%AXS<b9Ds6lkYh9`3KWgO!Ao}G#)ONF~~
zA6nB(pKm<(A;0FE5xM13K?n8s{8hr{na%HVaEg`VJ0T4WD#lAd4C~(1oDwHS_Qz%g
zIc0pd>;pSh;@U=<Gx&bnCv16j2{Uc4=*$k#u77)DXL)Txxv?cxN=o^PQ-5waVLU<O
z;F7#UYeL)&Sj4LY_A+)24u%5&IMR7tf_`S*p2886J=RY*YFg1V@10psM`7Th*}O-f
z{h}$ALp~?*^z+~I3nJ=k9Ce?~buMD;&3ywwMHPbnlCbIUWUKhwOm)&;_azJ0mwF%P
z{M<#MJ@R(8Y-qq}!B7!PkzLAl2l6D~eqX*V?39H1j-UwGc!v4(!mK{38&tP9HZp@M
z>yx?Gs-euu)&0SXJxNnbD+Ev%){_fx{?=Vix4v?`n^ci3Sq&-U^+dr@H+7yKtNddG
ziY56@7+=ARM9~`uD(PYdZ}^@$Ga%8jCUn;Zd*>k5POFl^VM7JR&{jzB!a*8uK{D=p
zh#I>FkC%3SU{`yha=R^p4y0*=Wtx?hLiARjvqxi^#EZRF-IZA-WMojZ7S$c5HITrm
zc&PdOm*$L{GYvJP6~*b(j-@8Ld3k!$=oIO`E2Wm<x74hkYvbwR<?6cMx?e~;omzfr
z<MD!=7fI!A`As!KAbHW4zs0-fp%MbZU{rqzui})dG5%jn8A9T^Wo{ZLM*o?-#Mo(t
zY-&uPR&bY`9LX(@7mITkfUOEZ+F|4>0@orlQ{-qriqf;;$$%mb%qZXB)l3Po*a@b<
zR|iiH>L_H>9K9#Jcel=3!p}m;MDiHI!|AZeSiW`<r?2GDt^0TG``mTQJ+G%1m}7p4
zW`hP}cFrm5&Q~sc9X=y_NoX~G8yt(3ikf?MJG8bCQ21TuJ%M53@Q=QaUiItOCVwAj
zK2gIxEOrKjV1eDug-M`N^aM)_icyT?C3qHo$I;(E&c_t}6omVc2)rVhUw|`O-A>Y1
zvR@fg8^_icB%-)Y4YZ*ej1VypTj2BJ0Z{A1joKn<7YH~PNsl9s6piY+iB|{M8^NMT
z&w=U&ppK~4rU>!Ev-zg5Wez=3eEP-hwQKrEZOA!Dh!=L1_as;d$mE-UoQW8H<pJGY
zUjO)wT$d6prXXi#F1Gzbg4DaT1>0RrL+6&slMIg}+#JC`vaBF75=ly`620afF&>nO
zm{e3_s}D>hhwgE#*mbSy$hfq+_L?w8+8j&TU!%LtIrP^8x?{)KGsrHS2WmOs`&1-1
zzVbk0WE+*0g@LxpcFKKTPS++yEv+FtISJNR-pt<WW3$sF{~DK*0k#5gF;A;LiMHGn
zh%H9hxx6%QSyt(-ww=X_-sV+>;Yy^_52GTn3`3!9;w+QXna->#At*0M9+@}REuoT(
z8n0Vris|l}CqNe9Oux%h*Hm^?aF54~_&ywY&vVJEbMM~2n>g1UH!*w_0pOVo#3aF$
zzyP6s^f=rtMz1dyasCrGB~j8o2;B!FDl@tg?wwFq+0L1AeR<1P@zr@xsRT_TkW|Br
z3rw&MqJ01$HX@}EW)*VeCf__zn{99u9$Hg1YfyjpdEa=C-cXf)S2-22!=g+^v1^TI
z&4CG@6b}@2p#Q@&3$s+SqVs<lm%Dcr8WRwj&C>4@!T1n8MBp5c{7H_m3=WlqHf}C1
z6beG|7rdyg_&u$eXr{Q=fi5Ca6Eg1eJv5?usLK(`ILW=aQj})bM=7ta05-#!nHiF<
zffDxwMsKd)yhkBTEsR@Hb6bXwwaA{;ncj}(T3YnT+M&XB@OovLVDy=$TJ)9Sx6n;b
zZ2PF0ZNvw(9sNQDfb1C1DIk1s>cg1>Ne6*?h@ceUyQTVCnBa%bH}M?~)Ueprfoy14
zP@-^+h^7U835BQy8}=}^RQ5u)O`C`mo}icjIN|X`=>@8OTnzg&W!CFuW?5e!CI9TI
z&$nv>PoJUYJA5n0yfZ&bFQWAEV>%v`Fa8)LOYq#zi}TzML`JS!#dE4|{C9rglqx!v
z(`W|>%fdTJWqV^wGIK~0*L|zX&mS=fO#V*E${*Uvd0JOD%`Cl_XN|iDR{s3tA&3-V
zyisx!tB@Q`z38?02<veE*y=otS}d`rsm7W*{)lF;xv8}D%GiOEZ-Q3Zw{Ep4>TUkG
zF8oTSf%PHeU&Fylhq)^8@$6XGVHd&#Zpctjdt1X?nA>hL)ZMK+6;_Bo_~z;-rfcjZ
z1C7iQ0nE_)FLMA9a6h+H9UB;{ADY-=CwUw?ZxRgxg$J4l<8coOdj0H(UFg+O%p$Pu
z@8?BaGd!+(Q)+m{<)KIE>Pz-+12@knCLUm<TT3PQdv`C0J=o}(fHV+H31{W{k=S*W
zqCvcwed9=^M*PS?IvYJ0Q}4p~BKjJpz(89ka3)`i*}R8?L@0rDUtjaEi{7tbJC+9M
z2$K;Iir<A^kZ#?&OzS@SN&{PL@Dx#K+K{E9C>-$1G|x19U}xdIirsco&2)DkyN4`4
z3l<62cw~=Z)92d-y?9VP5(<fE-7rrGU6(E+&TRd=5yFKi3%~%q&NEd@Yh;%#JbPbp
z_4bAh?aN+}*p;VimamA^`Aw;FZWCb~?BwzO`#XQ^XMevHRPtayXb2X8x(RWfM$x%}
zR`J6c?$z8BKR-WFm!2v%;T+C|iXOugZ&_t8G5TpZ2?*1`s8Odc%(=mNUO3OSUx<eV
zfkDVnAp@EZ!bA+M>xVD7_+;l(>o!9==r>@Uib+wJSV_-WDU}qwb5A3Eorcr#=QQ4v
zU2oTCoc`ExO;+wwOV*|OLjgmTgjfOpUyeQ#y${)AX&e82O>5Ss8UmKXzx2B+nDKs#
zLw8@_?7LOTfZC_J-mE%hYnuik89Zs;TkxfC$Jm@*kBSoX=s{TS8ylx0$L>~2{Vt~!
z{}4FqjsbI;hx_Y=isic|EKMEmdrjPq8h3MbC86a&^l3@X7Rg~^XlV$><)$d0;PH+M
zv7$e0kbgS)5#AhrIke|#=yi!8Eu4-_@y31tJsNSs0WjYVmJ8-_kr$1$<%X1ck+7T9
zr<~p0@letdL=5(b`-FA`5r@i&igMmrnJFlbvR)(h*R8!PX1sj2Z+|M5k){R&EMZHX
zn=#LV&4{QR!PR`l4zVylk$cZcZ$n&cY%ml{oA8cm1_m1`ge?TO{}$*X>1W(R=qvZ?
z@_XHPZ$`YMEty44j5kQmL?|{rx(+;m<dh^M=Fr6G7MwRfC?i9V>HlTqAYTrA7Zkm?
z;)E(e8vBEX4~b)zWL6Tq0$;bbRtx**eUvhTzda8>S#;!%Iug~Dy~P7xQ{$0M-}L80
zOMf+3eLBlTa)oAA!s$SbzJ*PdNZ1G{g+S7MU0!)UQeaLA@d-i!8zt$k@m1DPNWuW5
z*TI;fho_9NsO~T@{|x5{<J+N!(qk5Uh8})kEd}2ECZr44L)b{nGJ<g$6Bp68-bR6`
z27NX!3=ToNKfJ9AwU(8Oplm;UIQ8w-h#RprEfy%Ch%EzjIkC}5az=L8<g5jkOlBMr
znJwip21!AtJA}PKM0P(W^Wp@|07kJo5Sxh-r2m9~QpV!uMl+FZqzME%4T8@bCxrRo
z1SH~QRP;J7E>GX|0^v$S>)kX;v%yYO;hb51Dzv`W(MWavZi8Hu7z2QZMgs)13AD>%
zi&Rs_V}@~V+lZe5_^Td<^4*p5ec5&nK~u3xDcdj!2qwdafbn5m6@UtX0qsF0leQ}(
zMz&yaGEp2rV`4C}KA440t09yQa9=RtYQ;jnPrfkRW0u}YMp@~EmqCZ~XL0ehyL)ws
zse^+cbRW~Rvwj0!DY8zFb1{kR2z2YskB;$Rc&P!vZasmG?!da_)IN!Z?fl)hm6=%)
zbRqF0;OTZpl4_Z<Pd0k11%x_~IcY$2;ivcitX&}MGaC+Z`-g{Cf=A%?fGIQ*V}0P^
z`GCAY#AXS=IXQ8WSQr13{l_3X0T{Xt&^(N887vhE>%MU^))z$uz~xBn>+lX}PO2|m
zh~_!;5K4{^ywqbnl^eOZw1`}Nc-Us#BK-f;0%-i36C$fi#C`aVt{xs_@)e*;n#C$I
zz!l#TlEcHqFqM>qfB_v0>ic<7;+j02cp+HsSfCJCjQ_bh9%%rC)&LYh#^HeOe*lW3
zOqt~clDTXj9vS|UtqC6k1Q<LW)~NZmL#>Otwq9#Z|IeQ@n^`MMBD!uYLI<ucd^;B2
zB#2RFxT7qHlK>f_=x!SWsa72i3WxwihpTM>shaIW<yXh&3`}wpx3c_rr19_!la{M-
zRtxAT6?|ZHwyLJtw<cm{g<-?T$#)nEBEr~|Rd%;Gv58aZV6m&G=rln3@ta{R@_+6X
zNS7ZMtS?3z6Le%;FzyO5Oad^Tg-3pxo8b`|9SyAH5kZ}#Tgo4yTu(&a;J2?|V_@<n
zw+?=0mBlcNmwF$Oqk$rrc(FEa-uw>4?1uNgu2AJ*UiJ=gsPG{IEvpm@$IdbR>(?&m
zrG0)+@^*H!t9%<s!D?lpI6(21|3lW`WkM!y?&?6GA#fNN-5F9<JK=q790eJ%^AN9H
zX9{~WTHD7U(1{9PaL=AT8Lm&Va+wJ0YKi=uRs5_K>vYUvYAA%6$0Bx-m=`cC39`&8
zTlSJa*yrKwOoZX_VwrMd7WlUYT(Asi{pxEEgDRWkPJ9Cko2;983KrN4=WeAUJTRRg
zX}FxK<q)-j9+adY_=RqxL-0G9d5zcTlesB)!GC82w5MP|b#XLwrmTI3{SsH54E#gp
zhhh$$btYrSjYV|TO5n&GO|~&dLaK;?dOJRv$6#Y4wgAOo8cfoal6!rt6G5X`2TQ10
z30u!svSS|fPAo3{5~4i)+(iz;m_Pypv0rg_rrgv-i3n>Bq4(Uu3f3oUBfcZs^#@6y
zfSIcSLRkHjpQTTtqInmxGXb?XB9oMOa*F1b;NxL6Xy2ih<gGpqUXK|=B1SwO_yD`)
z#F<RBjnWM4Y#3wt@|`-9F<S^+k2X>$4TgR}&zWuVJPa_sCNa&!{MCrYV0QBg&eiJn
zcJqe0Gtf#@+qcKT5z~aC$RpjmQw9DaJpR?tGM^Gp=vZuV$cK=8mjgf^Y*PBLhu8th
zN;<L8Fsr4=esAH4qkzfCvu>PoqS4n<tM24vp9Od;Xu4AXo)4Ce!7E3tfQZh9_J^=o
z0MOroz25<R#M}4pjWfso-}Lh)X~=$=DAF!HAsyeFFaH$URrKmr402c??|1%Uc_CIt
zcW8|!FvrZ{#?>Mt##lfgCGD}6Bz=E55|a$&1#lt3xw&1KoQ}JvSx@inkSQc065ZsV
zF#NWwdI{tbcE*EddnYri;APi9o8LDyWRekGg7}pbG-4xsRmSMsS+x_yL!0D3W<B{*
zh&`G_8en4x4GCd?@a2N)4SbM#HJ+cJWZNeIU)aN7P~R4SP*Z9J07i!6S+XC6^^QUQ
zosc*OA0*AtJ>~<!{PQ5m^l_uWrFuk=gvZtQ_3m+E{HowhXF(fiIFsh5q#>h9>FPQO
zO(-v=8Mawi%V4fL({L~(rVXx8O)|-Gz#j)3rV(QJt8Rappdc0jISo<IILV&7VioRv
zWSqonlrD}JBki`R3g%j};s_2f{)?pQ;Jx`Mpnz5JFKKvoT=bOtARfn+p<m|tH59A{
z>r7Q^*qxsNVJI^mF!QN3pj@zr+@=|J9NU4^nj4Gw7c!+8)DLVfcaA2@X83C1c*9Pf
zD0@MiJeSC}&_<)3J2rm-n;}?WdoXT3e)*gDU)*Ek)1o;xzIE$X+$CHO?G?qlNl712
z1%q8x#zs#i`r*4XZ}jSWD;5V}M*#~F{&+Sq63cm?Q^yIf029}H;2*dT`6m&b4?F^}
zfMxo!#$$Leo}t0yCTM+#J(mAia>=vKpP@J}kSvMFlaaA}qfi}rd$EdBlYQq;pME<O
z)qG%yHXD$1OSYLYd3($bq`D7S!tKjpNJjr1iII^c0|*ITyz!?D2mVaV!TC-mo2eK`
zGmdq4E4mi``%D4Sx<JtrDP(XP#+nWq>B_Bmz%Xn4Yf%#ZDRAXQ2SavAODE!$6oJea
z&73@|k-cl6zfUv|kU3zinud023XUjDL=D4HJNbF#4urz1D8SyNMwI_cj3;w3NlYcA
zIZ$GRKVP2hhPq1)5(xj_-L$~nQ$gjzR}%Uj#mexqJQ_nkM39rQ%fS8`A=Nrj?3w*p
zY>!0=I}V_Cs8xm9X>NnNzf6!y#N|RZFOP7htAKV;FwbyvR<dTK$h?5|(8%akO7R*B
z8S@1s9lt%0zH*?twstjzNKe1K2foS7$$1647-3joks&k@s&c83=b30MLcq>}iCaZt
zFC1EPmfrnDdq7PAj|58O5|(+%thRnyW69HOIOxj($0Ll?@NMvcKuEIon7*JRa4a32
z0y>_&b3gWj$C<)s;|u2ZLXC&O|1CvkfTC>Rg|=&YxZw<@#Z4~Pq>V0GJ2@dK1&}tu
z>QKFqS!skIhBeae+q0d}hCM~d05&w|F9D}Pnkl5~S>d)(DA`8qDce$w<@tB-UQ6lv
z`SUViMbYPAH}oA4BjXxy7+e0BM^#D$o2caF+!nFSh5ky<X5cO$UQ(5CSy0^&ijuf6
z@NnapW@2J0|G|Gsjh!B9Ay8R}6ibMGS#twnK#dXw@2jdvXsik<E0m80-S*+nm_5|h
zBmAi^hN4MFv5zSPu}%ITK`biZp+G!sC9XXA5`L3!XbEo*<5n<;<uu@9G!f~wHU445
zhXOGN-h2{X8Z>OH1h^Zav{BxEog%O#_3iujpzu707D1Bm2|2icuy7cmQ=xf;u$827
zfR!h$9R}-^;em$kTrtCdWP%kPpxRDoDJw)`LN@5Svf%=8FQbeILn%f`7qp)h$#T9V
zsP{uZ&B{bAT|x@~A7fNW*u{J-UwZr+HNmd&Y+T6%p3TPyZXzQCfkQsRiEZrg$NI#H
zr1QHUK<k9E0FwisSlEb2pjX5$+5KQ_+Q+1@11E|;7VgiBD>$D)uk2~BcP1%dFeBoi
z<A*_!_;t+@6HEfHzI^#23AGyzR57xLKvwz~Bq<5MfhKh`W_c|qY%um3Z{tP07HD{0
zWQ6-b`{qs?TJoe5{kk4(Ar=>Szr?7P{m*v$#8FhA7h4)Oyw!ogg3k`-tkF)_yAICi
z;#`Zd-{sVq%;5cBeH<!J()x=UzR~Q?!v%6Jx}H&Du}{!<VPq)3@dQcQLW<!BH^Z|P
z%omj2L=D@_sw&MUX6bSL0UI}kxMea99kq1WxJ3+^+XvbNzNV`aIu=6G(wwMA(D%rY
zz*MMf_yh!wupQEuf{zIS416S!86XduqV3zZDdg37axi!GawGjc27*F6oOz^#0nxa*
z-JoTrW%~%KRus)phrEY!k@WaDif~r`(V9CftYId)kI{)=Z0zgPU{nkiH3L^jI5)PB
zYe0W0yaeKI#l$G!*YUEI>UZad`~LLOWoKuTSU03a)q2@!?W;5+1?en+z}QEReoFj!
zcQz67j~HkfZ08y`4E^ct{jY9UifLu0-~0dj&!6_vaRhhm#n@{9Js#NMQ53FSSiL<5
zj>M}lFyV-XORpntrv&tJp!NHDdzIlv-8cOE6{4uttzF9tXsX>MWV9|yY@<c2409ut
zfg}eFLgcqz0yG{F>XK&e*rBDH6FLXyGVSSdeyyW>gCgpy<fclQ4)QBd@FN_o5ohAr
zD$Wjga57=Xgah=cT1f{HytVwI?#U7sg|hqhL_<8M22=Ij$FtN~Xza)jnMOO~UZ`U`
zl7!gWVM*HZa9U#n)Ihj8N4)&nK(3R>w}}S9J%WN)u#0I!T2!Q0jNOVxDk{dx;;<-Q
z&pi_>IS4{3qp@4fXvZ25_^bxEvi#C)6X!nbfV<DMWpGAsZcE|K)q9zdg&MqwbJ9_t
zjZZR2V_ZcA#b`Rd^L0o$v38>;oM(|7eI0rV6@<%@=%-2UsFQN$;|k%J6+7j5EuOuk
zZ{iM7(qXG6Lm@~)As}Berw>}v-2huiwzBC<$B5L_?Pzgv0K`n>t%luxJ)}d}PTt+v
zsfkh~UC6%J@tZi(y*Ok^(7PiP%~(!Bn79t5*A&16g599|Bl+4t5^CsJJ12y5-2j-A
z(WJneNF%GNZwc>36P6Gej*V7l5X(OI>C8w!z>K{RQ*8pv+X!)?M}cLrXC&%&{5M?N
zP1&-#83svjr~j_wLC6qr0hTtG5c(PEprhj|>259X+HIqyr6u<htyVOo&A%B&((z-Y
z04CZz1Z>Qb8Q2I#?zCFl0pvgtn+a)riV)zW$uskpW;fUYw28TXey3rKjMdB7K~!@_
zGYB{s%4Q4`+xUimUL1*%B!+|7ouM7Wmhuq)tg}|}n;SM<KtBQ8Htc_iR_6?M)1rF7
zYQpnPx*7Z_O!mU}#Jj*M{kv9rx~HdS-TV7SSsrQb|F=5FUbN}_RjUvc$?8%qbR$q?
zkvs`iJSCv$>@gyOw3eKXr7ujg9|CD7;TZeQeZNVG0ZO(P=4z-Q?PA!ggpqOwNRq@U
zAQ0`AM!s%bl^+}nrKO6Z*O;dCrj0F0U^psa8>mx(jt3eDsuD9XL}Kp4&1=#80=<vS
z^!s}p$`zt4hhztZ1)r=e7g~74u26+B@<icorGYhy(OBbk(KLpHgp!zVBvw~py}_bY
zBUq4t_$fFXwjDgkj@?-ipc4s2$LU*%T0vo`Fb!*j>~*kSQsFL;@gXR2gNuuO<7`f{
z^D=w524GMCCwc88%wRwml^RYeNzM&GD!PV-5Vph!nmGj$FA|guJskikJX|{PVWU1n
zZPANk_K2pYGH(C4`b76|P?ip(!K!GDdM*-Z1>rZ*!vI{ITzaM&8cuWwen>BzoSCU4
zy5-ZS(_7^s3B?}$?`V{i+=#_S0>UXLfi4r1GT<|@(;qjI+`3X;i58=L_3?TkK|utm
zQ<L;`kTu8&r}0L&4m|}1TL*&|dTbzwXY0+^uk3g=$f2>^)*#=ZyQe4UavD$1Y+h5m
zpO^Tu%@hxrOSAM$MF<l)aKsZf3t0LVL`i;H)Y8<f1e-|`J6BN%!wwTSiZj=a1C^86
z-*?T6<1s&8<|owwvTofWFD9C9x~TH4zm6!byJl6Z7}C-SE4UbZlTeDEM58kJJzoem
zQLTLA>_f4)w4Y<Pk-_i6odR#{lV~WXwzK*^HLh=Lp~s0u#_iw)A~PsiWGdN@<R2YJ
z5kMWNv2&hI<vvp=+I`xUMza(96yY;_c=YZSgou)iGLX%V(!hitJJcLmog-K+M{gw>
zCZ?f7z4t}9GM8`{b<zE+WtC4ni4GQ@3toUO^f<&tLOxeyrP_F|Gu>sKAeQ#mktajq
zj%O5n&CEK4c`|swLQV)Dh73k?l)f_7;a=O>mQ2_HuC9|4+4AjGkWhWw!=QC5Zv(D}
z?6??q{2b^cLS#bgSP6WTWU{{WOtSJe7>o5itv1H*OdZ0;#;nf!Hu|?NPl}-sTGK&v
z?j)40?#@bXsW0P|Y1zet%jQN$KkCKBUVfl*eTUK6jgz9B$HE^Ze|{l&LsF2Y&dMS>
zPB2pKtg>U0LvKa|XTt-V7YnI@`dX}4>GzAKUfTXuO{Lj@xBI?OjpFgI7dsAd%}3f^
zvNiE6l$W?xDepWH8u0g3;k}<l6W%y{A}|y<yM`rI*=^AIwM#Dz)LCbL&2#s%-yL_8
zNxiPNTzfAg-6(RJO9|3BY{h^U;B2hJz_~LcH$s>WdcSJLkmK&|#?cAQopEZM6TaC|
z(!ZYO=BIy?Q`P<Y?DTrv9gtIGR&2(z$Ur6~ZU*jgl8(wSZ+)fkV+*at0h=>?TPBJf
zn{?UfX-Gc=j0PJPIO2>l{6eY+*MjZ`<W<%@)Quitf8UV$He{%*b<@UAZppn|w$!Z6
z0!=x8^L|M3@hK<X2(f#_<ulN6!+9l0Wa~kp2^IbmE~>#{t67=_8gq^mWH9}zOtB^J
z4}0!`5eUSXCC;31P7yMnDi4+(rsEB<n8hT7qNckp*Pb739MidYQ4U1wDRg-78wZgn
zy=<eiBhNyZ7`q#~J8mQ_?mlYu?wZUvr7uRAj@R*w1l8)}X|<l(McScx#~4cY=$o1S
z7<Kk~+rly$c$)tMk7t(W#?R*jLzZqh2jIP|xBQq)>vi<m+5S-J>!HgQx9kyk=<(<Q
zo5h|}DH@yd1K@qfl)|f6+vuIrcdOIM^fz>$7f<MWXDRV_(y<BJ9eWLyiYTesXV*d&
zd#|6@=jDo4u5YV5^PDU2FC9(rCA1<geIKsxm-%5pXLS)5U)(L=;L!d}6)sY<FTR`-
zudZF9dVs%sKhRKZyZ_GV0e?2B9|qx3Z&E7FBTpx&ab5^M@PXelJ<}05V>q_v*rX&a
zpwVazF=LLkLQ%^7RhEl#OZav3(PxXjb9k-rsJAJV?eJTv+P9aDoT)8%_pE6}5bv34
z<2v~0G;lgGPO}33+A-%=>wp?ITo0A^_*&yVJ)ei2UthGlTw2$bPF|+2j3q0fna<1C
z>=yZEt3T8(T_3eKQNz_`Do}rQNQqTjVUf`budMVp=HiomFJd^mbClnEzHEy27c@+l
zCx6ja*Iii(4$IJHuwm-ZUq-pX6g|UX@^V$d2Wy{c;SNJ@kC{-e5Yv&M8w6JZ^20ug
z$w;4|audB%2gP9!=!+jkeMZ-x<GmTvbWMNsHy!=kFA_jTeU@kc{NZ#!G(l^Z)~?w8
z5p3)}rCaYFwawBq+lvS;;ImNIXj)sRO72|CJKil?BMuVeDOX18*RMyEl~<{yO+}q-
zwUfl?iQ4nQ=H%N&*H~4w*VJ4^G1xmg`nNpS#nbc5bYJzk<8eMf1=tUq=K@f;Q(it9
zGQXD#e#d!3dR^n4X~}>xBwaF=oR{RpA?8$hr5RP4B3fOjHPPzT?59==KkWM0nFsJ8
zrY@!EKU(bM|1#tb=CZ)WkP*>X90*>T@Fh<$)82laZiYNK^cO#M08i}h?yl>2EgP|8
z|1;g}>2b_7tb@hk&$oL2cbGRa{_2b<JE1b6F~IO4jH;=M_Mhd;YUT}bmStcY$N3BE
z>9QDS<UsOSk&;QJ=kFJTM1CjLw+M`F1w;7&!!u&5t6<_dg6wtYtRL=hek4hIfQO*}
zrlvsG=!h2qOa~LrY_hVn)-G0TEp=OSgVy;?>SoDaMFoi|bc!}Me7j;JAdZSc5^X4|
zcLVZGvIj)#3s-cIz%A!9dp{@V)AV=!V{&AeMxxvoJ2i>;H;y)U9Om{(N|Iy{bS8f9
zSzx|o#ru&UZAb~#qRLs7Uc^_{wRL}qY{IHC*?QF-HwBH&Ud|d$iFq`hovfqIBgYk<
zf2+m*zw;~JwkHM`5UOxZ@BRDsiGj-99GT|%3R338LrX))82wHG<RJVr3?7EQ$9PrV
z_vUqPWoJJE3WkJj@bd$SY#kNTihuQHLrX=wouoLX!Cl_^{hWA$C_6od7tW;AkFMvf
zsytZx&T9TS%4w7oYPj8Vb8{yOT|=>NA{3MgY4=Y@IlqDCqu_)^ZWy5_pGR{%&*HH3
zGt96BatfZV{prq%rTDrtpxcPU4OzsQB?U~WHZJj7GBK<P62vf6^bnXsxI6%i8jOOq
zc!FJH^bl2!oB2>&)73Kb1r{bI_`8>Y&YT2t0wHt9{PmhebTr!#QF!UO3j~L$0;sJJ
zlBkB06@d}r5Z~72n$b0Oe>?qZOQaeg#A+z9K2%rpV#F7o3<{|qAzI$UgKQ-r{HvgX
z2UIgPJ6i<^3GU&d;}!b3t!D4`v(w{%(lIenKYe;9Qr7(z#|p?mAT)f$n~*91zXuD@
zYH)5K0Z$=L4fdZ-{oLwP)}Y8x2>yy%>Yreky>02Aa1VCs&rf!Q*zGG`JCD#*JPqoa
zntXs*i;ARyv~xhckA4#Ch82}3=9ILY&B)DtbpL+&^wpjC+z<(GRJ8)o84?yIjzW%P
zM;c}IM+f}nLvKMjzozJ$<2x`3Y!=>^P;!%c25FZqBb$}nxyGd1U^bLORfZ5mpi%H5
zYT}7NO^X+MnKgDkD>D;bnJ$3de|vdWx;j^u4}dR2oq9b!{s>k8t|)SMLLtz?>pJ{d
zuSCb&{AHXxKWJnsP<W$st;F_%VSgn22F2uTx%QjycSwdJegJ#jAGk<|VCO@NfdzLH
z)X3*A{cCx-P86^83Vu_Lev2ILKBdV(ji>OltXsD(HyoxNQ^YVhRQ$wZmr_)eh)+N`
zFmwaw%oCX^6u-%~ZJ=2NlKc#WzIfD7Ay*;qMipqOqscA9=GIoX$u32xJ<5?nh6ds!
zBwZ}iN^~vX_OxwMB?GdaoG84pu;2qYsse2!s>`gsAMN$eJWl{Ji00>@*Eo?zoq`sA
z`sOtzrG5MV-90pvQWvIo5VlHKI7d6#)!wQ0j09}Hh;+FEI~B16*qgE_Jx;A-dp?iE
zaY(B$QH^Xr&}BYWrjcp;zGfQ65TxY9;43p@ei<eZqmXzhtW9V2^Sk3fM?-@G{udm?
z1xdoQd6iFOQh}EdUB~U)Z1CW9gKT~2p{_(H)3U4EAJe1!r><HOZwM5=qth|NFHy%=
zAOeFdE~kLSEkL~r{{8r=Ip^2#V*`jjoZIRKazU!=k63d{3;7J_Veryc0OVf^tJ7K8
zKH(e(P`?AH0pdH+{P5Pv)HO6WS7GfQK>#uzUoh~XA4NNOL%5%sz7o1Z;*Str&yU9%
zu)!Bjr{0EZYJ4P(>d?v`Mlf{w%JPdMA$bg0a(iQe<e=RFGIZ;t6EC#HWGLLxM+9Zv
z`{U(tx<+1;oo%l%;u@BOPf!(d$ap^4*OB@b#=?Wp76CR(&epuJ2hc4jsLNRRH$bDB
z-R29)V>L3lbzv;O6$0@c81ezYCjYY2Q^<S5fJn?>Xy$aSM7{HPo0Qq@*{?v3-&w9K
z7!zwZ1b5jr%t|lZGV9xNPZU<*JG{fS`{B81l!7kQ7H7^&;WSkwYGh<)TB93zDXdCo
zMQgw~k7KiEWTYn7e@-a>vQg-rJ9n}*?U(@xfhmTrqc~fWS?McPOXU)F@{-xlnx8K^
ztby{j56Ly%a2t`=Y5bJoGp%Z3!hsgP1Qd${!}>O#7jW5*5fMz&5OI}}qkUvALaJ3U
zK>?nOw8jrO(mBw3Oq9$ppxR#5g<HM{%s;di;AKAXEKkzT`p8M&9ZzX(MqHnOyEHce
z$#F3rX4<Uh<+_%dnp?J@L3I<3;C+`3H>&EwfSZX8bvJT`vBiA^5WPv^&s=Um`{+|j
ze}ZkYE`EldCYS}&1fQ4}e{n6vB!ac{^ajw)rVY;dsOoOqDJrTn^d<X__5M4XRCRA+
z(WXCnGJ3tWdB1g4_~xZrrSK@#meUE(o{6JF#zup0S`J&9&c?UmTaw`=qd<q5%{dRZ
zKB*BgAg^*d5jJ_mI1hkLBm-$Fr54c^xwe=9fkjVJ$!ivtDswWj0k-;`$&N)X0Ih?%
zilPDBB}N=(blnFHHq7Jf0w0k5tV13_j2l&Xp>_GavW0Z^c&}d~my2)f0Nh@s0C1c_
zxKWTuWUGRQ6$7<`0VVR~W@tZZvJz!~BL0%8!sRjNI6gbJ7*Mlm19=p3)!~DV2L=J@
zJ85b0@p_RDelhD^=|%Yw^sy7(?a*tNyGnmXh&N_e0H{bptTZ&Tb=VQ7Kb&;{r&!Ru
z3;jT<!>dk)yI6JmF0Uy+CGG+wn;c{~zhNgN%2WU#MZOgUwu1#L+h2`gLIDN)HB}xW
z+i|oJUnUX7qjq~@QM!OjZ)9pJ*ex95ExC_v(y5~)!+pHXBqBQ6*zGuO*l9dD8sHP+
zV29~Pqizi1D-MTLItpErUW`H!4D6YhaQfp1k)j4R%{Y9nnpVtFJkBBI`oLO}6SsLI
z<|p^#VZk$VrP0m+i70WnbaK;)iLf1PL|7o{z~ph^o<1&HD!;i~9~n&iVq&^=R#Kj$
z?05zRQ4@HTjE{}gq2`DONyYb`m}JQ24%(VR2na|VZ)10z0#J4XJKeE>1$`99(s!fa
z)H`0}xQ}xK2A=Uk8UauR3D#tWECy6x7~HpyK#{1MF=in9<wKDWXZp;`zDPy@q|$@s
z2M?froEQ!SENPth5Ws14eG%8Tx8DtaEG9d)u`Wa*hIS)z29eX}FrpX2G?p~Xor%Dx
zhoEA}N6U;Xgqa!VLc*bdvVMdk*5Jsh#<e_#>(yu{8T~i!QDnvVt-E)%eSD;GWaoM8
z#@>D3=T&i0(K8@+@_#3lQ41Mz%w*-3QJ)0fjwZs?qQ0(fC-R)Yh+!@A9UAqpyzu;+
z7J6C$UnGF;rSM_wKu@0aJL&7Qk~zFBEpf1kLe^!ruLJGI@Ae7*!|1P2z94O&vDnKA
zJXr#W9Q+Y9?2w0zEdp}XJMdabz>@xN|6FAxb4KLQp?iSzS+;Dc0T%pZYBRSIv{X1W
zY{7oul>7LlpN@Al#E|jB(IZEo{#*l`d?aelb;=t~bm9-ic>{JUqosa!J+CxF)<abs
zX26E~vkN*Z+S^Op$EP~BqKUvVJBlX{#Yy1mVy0A+C&}z21~Dfm3LBfgsp&^2|Cudk
zU#GVdS0lRqbl9YzR6{VZt=a7zA$bx;Qri~KBZJ^1DgcCoi3cqEIh;NFqvzb%XQ+`h
zAe;h#(u>jtlDKdGPAZ33G&X>gv5<hHva+x!A-Th>G~XO!2z%m;Ev%6ESMq1z4*s$U
zkql<HcZTHYpW8f65h?q<9h);^PsEUZf>Yr1W=05pV{qc$NKXEQvdIhP1A()nO5sNK
zP0qC5JrM-dvjIH^IUG3=ZxJkW|D7L28}A4S2%N+@i-Z($>_^~X(O(sKm?+WFHVkw#
z6ue|0l2;&ZgeXM@lQ_^!2G;47h1S<t8`1>W#q1B#q)c?2i(`Ef!o(e`U9q$uwJfYU
zL<v|gH_t^L(14X?5{g!8;Z*b4z*aorplcr;vJ$pK0f+j`4OV7Ix9bo|;Ao!ARPp9`
zendv$r+bI$K}nJ=1l<oLa%2P$cE_gSz4VU#82mSi7*ia#=p8Yjzy{%>Bn&B1&1Pge
zgoz>3Jaq9KCjI(hk`4)e=o=Uyha(2{RN-_28FQ_L;hs@&ssL(H2>)yGoDql;tUs10
zncu6R5SXpGQOTV$Cf*h>h_~aCb`M8E1}&Y7mzOQ@2OzMQ0MAN*Y`YUyidWjSX{tV8
zITkv`D4s*GymO%X?z{c^?@oy`wI@;JdoJF-%v2$tzv#S@va&q?`W8i7Q?nO2B9$nb
z@BQuLN98SnF(}FY3-;JRLFgJUxl{m^Ak)I|NMke@)a<v3TAl`u5wC$iKkwP%{-J_K
z3vf!x&y0@udm_GgJAN3w#?uf-5a$42SCxPJ#sRvIL@I%bK+Ey__wNQgS0t?s3irCY
z4cIx+e$oU}1>%ev9v(*A>3z&}M&>wb9b&L0;|bxdAT!XhwLxx7Zr;yi-%s5ITQ@6x
zWh`cxUu+ziBfSZVRiMgRxBxi6Zv{Xoi&CY+aladI_2H3`Mu+@c66zk(+(>Fcv(gK_
zE6FtR_m`b()(1n~0C!}vchA~F4#5z+RxaVTP$s3Xzooo*Lzuqs;{0G@I8tFw$IGR|
k-xYAqJcLP!&SQzErPP`I*?D~?3jV089#zRvIvMnT0I@OVP5=M^

literal 0
HcmV?d00001


From c4721f5ab15a841e4d5f5e4791e49de34d333228 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 10 Oct 2017 13:46:09 -0700
Subject: [PATCH 73/82] gan design with graph

---
 doc/design/gan_api.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index a1626e50d7..f99d1e6540 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -28,8 +28,8 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to
 | Repmat op (done)          | ?                 | N (Cond) |
 
 <p align="center">
-<img src="./test.dot.png" width = "90%" align="center"/><br/>
-The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes.
+<img src="./test.dot.png" width = "50%" align="center"/><br/>
+The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers.
 </p>
 
 

From ea56018907d89ac541bb1fa0e184159142a160d2 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 10 Oct 2017 13:50:06 -0700
Subject: [PATCH 74/82] gan design with graph

---
 doc/design/gan_api.md   |   2 +-
 doc/design/test.dot     |  35 +++++++++++++++++++++++++++++++++++
 doc/design/test.dot.png | Bin 59401 -> 58935 bytes
 3 files changed, 36 insertions(+), 1 deletion(-)
 create mode 100644 doc/design/test.dot

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index f99d1e6540..2fb30432cb 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -29,7 +29,7 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to
 
 <p align="center">
 <img src="./test.dot.png" width = "50%" align="center"/><br/>
-The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers.
+The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss mared in red and green are the two targets we would like to run.
 </p>
 
 
diff --git a/doc/design/test.dot b/doc/design/test.dot
new file mode 100644
index 0000000000..62c69b8fc8
--- /dev/null
+++ b/doc/design/test.dot
@@ -0,0 +1,35 @@
+
+digraph Test {
+    z -> generator -> G_img;
+    G_img -> discriminator -> D_f -> d_loss_f;
+    label0 -> d_loss_f -> d_loss;
+
+    img -> discriminator -> D_t -> d_loss_t;
+    label1 -> d_loss_t -> d_loss;
+
+    d_loss -> d_loss_t[color=red, style=dashed];
+    d_loss -> d_loss_f[color=red, style=dashed];
+    d_loss_t -> D_t[color=red, style=dashed];
+    d_loss_f -> D_f[color=red, style=dashed];
+    D_t -> discriminator[color=red, style=dashed];
+    D_f -> discriminator[color=red, style=dashed];
+
+    D_f -> g_loss;
+    label2 -> g_loss;
+
+    g_loss -> D_f[color=green, style=dashed];
+    D_f -> discriminator[color=green, style=dashed];
+    discriminator -> G_img[color=green, style=dashed];
+    G_img -> generator[color=green, style=dashed];
+
+    discriminator [color=red, shape=box];
+    generator [color=green, shape=box];
+    z [shape=diamond];
+    img [shape=diamond];
+    label0 [shape=diamond];
+    label1 [shape=diamond];
+    label2 [shape=diamond];
+
+    d_loss [color=red];
+    g_loss [color=green];
+}
diff --git a/doc/design/test.dot.png b/doc/design/test.dot.png
index 768e5cac4b14201f378896bb3bcde9e6ee540414..4e121a40b9f7b2232d7cdda315bad15926446f55 100644
GIT binary patch
delta 41475
zcmag`cRZH=8$S+TA|sMf$jB^P_Ff@m%Z!jw85xntI*qLCEhHm5Nl5mJqL3|H$jaXP
zcbxCf_r4$B-~G@1*Sj~a>pIWZ`Fb74a~)UcFYL;n*dJ|fBlmTNj_}qs$+Q%9$*^uE
zGKG+jW$SB|PFdfsq1F4^!(m&L*-|27d?fTNbby$)*z<v%vGE)yn?vg@6E3_CvRhUi
zH>M9xVq#xq@jF+EI8BsL-sGK5-+K9#m32P-DDAD!Aptu(I~BhbzVMS_-_w&L%e~d{
z{e$_p8?BUP5B8DuX?%SAM!S+aTsk_s`WA;5%JQ|F9h`#RhtKL;@{_1uWgZB=UE^J1
zN&3Og!STw}dq*!;ndMz^u{vhyxVN`AD^<i%x~%xqC;pLoZ;1#Rk;v`QifjT4zKhg?
zA^rgY>`YAluU@_CzlNBbGt!AaLw{y7)#%3(qnvM2^BfU0Z^jv^a$!wKN-C<;{M%}4
zYH)gb`YtEuc3WH9WhyFVgA!BkCATXV0+-w>Dk|E4{fa(^jlH<maQ2sRGf~wt_NdVh
z+h*fWt5<MO)>w^OlokKg-^0WCu?I8lCdS68ZBg`R{ZA_$#<TV&k+*8<>UYo|M$)OK
z3fpJlT|%x29$C7tPhr`Ql!u}}9vK<2dh|$<-fJCeyxzO!x1~)4+@<o3xR~N%?$B0?
z&CSiMV&lrYHa0eOk=-2~S-*a1Elt!(U}e;MAKkUKW}BOvdrJG_M;>2$opGf@x$~S#
zeT#L><qPN0n}@40o41x07f%TZiH(b6=S6s<%gf8ZkBp?eyUU)Fle0m4(y%(&pvnI_
zT-<F{&7w5<_~Zm0VCdRMqqNOU=L?rEvEICSlPpxh>fyuNcMIa-T~hm3b5xVqczL5o
zDjhY&#l>gW)-<)XwRKHQigUjkQ%2lb)z{a*OikV4M#XKa?q6P3rgPox@#DuSVlK#y
zJ1k^NgT*u}V^wi*k&~Iyarb%hRz@pzhCZ0wVT$?n?|0eabW5;TaLxu98JRLiLhS3;
zSK&qILaeVl6&4oWwUar2{`_SM3dK6ly&IU>KY!%l@{=@!Qq(MCYyABFTiZ2Wqll#?
zTXBzVwig~Au!WdZRaHky5zF9=xWl+vXEFQH&k^t6zyIFfA1`b_@>S9AqOOrqA$1Nm
z11l@5QK`lGoZMVt)w3iXldM2O%Gl6Qf(rZ5#E8E%vdpobuC79GNh4*CFHL5aQ;Ug-
z%`Pvi8W|atyKh)%XhaGpH8;x{m0Od-Q;T`~mY$|{NWSxOXnlP>!VqhF@7}%Q0tOly
znpl;b_}JKHRml|eJ)b`1!^#N2yHwcl^yba1nQsG=A8;jH{u13YHjaDqhVG5DcgmVm
z|KI)s9V<IKRbgS_XRBjk^(~}nDT~pjbsnS`bXnW2_&eI$rzOgb$jHg>z~WU^RV}eN
zxVww$>FaOOr63vB3eK>Nm@v=&d?nZsAtqTFFFJiL)bH4EqPQ|r!7L-wKv8AEC?b+%
z`eN=%@4!Hg%;|o>niEduqxcU+MXK;OIuX5rtL@}9bP~^53UoX4RZirYV~^kw5@1@}
z+TijdV~?(l$n#y!-hE!Rz11xfr@)l?V(V|reV%zlg0}1Xobv@DB9>4EIoPms611lM
zX`*h<&Ud+Q5fKvh4KG%1Z$BH5!;~7T!D{4w{P@x1X8V&+L8dCRdau_Q$vyuR#TmA$
zy?5!`eBSTjBQ{4J8)D!iqGSfsl2bWEMU!ERFTgSN3hw1fuXX?a{d)*{hBxPacp<^L
z!rdO>1|ROj8a>J*Z9jqa#PQ`95Ez&gRciO9x|qGUAXQQMBQQCcs<)>Hz3vS<l`=vi
zqIb_%MtWc?>=OqHT)ld=aOW&4s#jG-WuG`g=k@E?EOG7MdwNO-XMDC7NGCIe2qTnN
zNl8g@d{d*sX|KD<Y-Se->a8Fvg~m{r#6(4-qoSe|xm|QAtL|%LD@HKEu8VjRo3DEN
z_IWx{Cto;*a2ld>a<1RWxm)eJOaX5x5h!;nPvgzqx|rqV<vzG@p+B{9I@Vtw(M4U8
z@>gc*&Htk%$9--0-FZTajPml!#-E?O@HyF~P+^LBusm4I#=+5yGzStM5a+c!jD7K&
zntJeTsh@!ShMKI8kM#4kiHP!Y!S7c5qGDnjbQWdgVXbHxZ*Fdme*HSMr2g;%oX~*E
zN|CX#F%mMetdX{|+tWwmInq9di>$Vj^->}cE`rW)HMbV8d)!%BDeS9qnO9~Z>w(hQ
z<;GS!+JMM}Dj-ba$f@LHrXZCu_JpWV(aY`aZJg3XvvGL5$k9rrm5oii`i!==!o7R8
zj=S=Q(H=UE6_rIA=uSv7CEKOZa-kM~#&(fLI2@V;HxExF)Unse$y!iezYh&1HMg`3
zeClQA<P6(g9ak;VFSh;l6pBUd_UPr?w{MrbBFp*%dSAM_y5MoKg$309N<%;R^k9B=
zcJ`|q)x}S*^kW3Xw&>af-bi^SV)9Qc(!YOKrWSY<UdYp_db3X9X5BW~&dO-g@$s?s
zOk0FM_YY&2Kc?SV4G0Md*}1sF54RWPG5K~wAJ7kwkTACQW?^CZb$qa;x6+`jr-xiA
zHmys+y>v~DEc9iZj{rZv_151`w18sc5?Mc^zez?<KgqS@k`kcEvUf@5IND=psTHn^
z>`qL%zy3O0*LfVA=$aY{R{O_RRwS6Ev8u8qVb~YQ@dh5ctK#CwTU%R&3hh4>Y3(lb
z1pMuK^TUQdqG#+1`Hzl{CFIRmXuObhctk`&fk0jNvWSpS#VtOe)-n#Wi7J=it@?r6
zaF+eg6Iw6)%7hkY0f&{JpFhj7{vfDKI(_FYuYf=t7dLl1Z1pC}<S}?n5Yg4F4<yUm
z0|sU>rKP0|`oz$Zv<(fZs%mP;G0v{8*I%nCLK*7pMdZm58`wmtX=7&njxN{tzKz$u
zh=T{{3LmCl6##wvQABp8pv4OXD2A{z_KCAwzxbSpU$}5#YGq~opSRn;VbZtWyH}wL
zZR`x04IO<64=36`JcQE|+_K;s!>q(4BSXKjv5~gB#U*qUsgW*uH8C;Ku=*K0E-`J$
z@GxTd_1iajjs)AEiW}U4Wn}`;zE{U<f?I+wwG0na!EN4vyW@M@MSH)&=gk_)@;fLI
zJ=scJRYg6ppC4FT;~N+lm>#|rv?2Ry!N<hRyzKomk=r;rI@<UBd#;~vB$CR8$HvDq
zOG={39(OwrDw#lSAtfU_2j|<}(-WELdLv<2=|G46l!78i+WSz@PSUmN#nz1*>M1w{
zx~~d!@+@FEzCg9nX>eN}<la9xXo5>iPfr(p{x5l{9-z<5moFa-zQ4D=zV4fpl=L8O
zCXR)SptiP_<l42!{(fEMoVyCQZ)Yk+U4y@947y08b@y(wTk_eTqijUpJN&9Z(8P;*
z0AJR8u=eO??vq~kkLrenva+zq8WbML0T8r39Zc^$(OtGnSND8{dft2JTO8sDH{tB$
z#K4r*(V@aGD99orA~J9y)W?78)~(A|uik-H!MwP%6l=((zmh{uLLwU)8d_*sTUXcK
z-0XjWknrtqWNpjh&*;n?@kqIiJT5LSAg=G~bnkI{|7=C%)V~S~gN5q`*r4d(zysjv
zMZ_&tRYInikRht{)}KF#U>l`XyiG_56tWvcOWODLc0Z^{XzhAB)0U~8Dx9`6stYGd
z{#zs+7MoIOZf@=u_>E4(-eTU{=Eg<@gV8cIjl3O3F}+;U&<5wS)TZyolacbp0!!E|
z1ZHMt4)hp2c(kybQ*(2Hc$coBA2eM1LI40Q=F+80KhvdXu&}Ud?=$3+w!i7Kx$5XR
z6{qPt-BM*^=BuKr+VjCgV&bsm{d<My&xPS#+{30{08onh^T(oV1evUHyIH^ghY-^U
zm3X6_^=;$WSX$X>|HJm!>ufwcZElm^!L_xLy1KfuJV`P;o<gB?m>X)zzHr5ijEq>6
zH%(s_6>&Pw3w(KGZte#-ta)^l#?R01i~9!C2a}p0?QF$Nm-(*FYf*|nt0*ZcT?{;?
zzLPRYq^5p8-vRrd>fqp@c88vhE~2oIBb-(&wvs9x5METDrZ7Cjkg%`-U5?gBI*A9C
zme?{fGXF*^FI@=qFY`%Vg6nm1a|;d#z)rkrW?F(}5KK&m!F>Mw`Fm$)>v)aZa#|9_
z=FEqvtSqJ{qn|HMOiV0{*NDK;Wr%B!PE2f5wZIc(L8_AC<E_UC2AD3JOARs;-t!9H
zyeuFf&}HyO5^a6bhYQTxO9MsP75A5@*$a&|G>GT>^c@@=3a?N}JpU*A;)PhQ+Us)?
z5)wFgc+A(Y2lk!PSi)Y)FD&$rin@fE>$rwt$a;J&E<K$Xv%a$v7#0?`^m1tipyWrN
z6OTwF>#zQkCtQDjzQ(q*v%|r~ZHA)-*cCPj?z_7MOPvvvUYdtP!7(wUfq{Wf`$Uy0
z9`>@ok<Aczrnq7+q6a_<Fx^xA!}53UUSgu(yfG;rwY0RfffL(b<?{GdRFtDrO=M)`
zyP~2Wi+_JY&jI4GkYwgF{XXjP9TgRXsX$kSf`X#fk=|g)#rnYmSqvLLf9&&r!?*ta
z{fpykVPV0>%^h+7{(XF3TRI=WnVqR3QEd@4N&^D}y88NxySuwMzK^=n?!anfM9G{c
z!|Bpv)qT4&^5R&!zC^#fyF2gWN8Y91Wsi%OFTMOMQliM>22^Y0(?ddN&`A843wx4m
z1{;~_y`B_K<?QV2fr(wHIo7Zwg`3z=2lHSHp`f+yaH*)==j2eap#au84v<J*s0dK9
znmX*I&^70}_V)Jj2TkV&T)sT&r0)3fBYS6gC=Sp!!0lcrm2857@!{d&)?;6uZm&<v
zzX}g$fy0GROM8hz3;Tt(n3Kbu3fu9qI#0BlejgrA7Js%N3so~04xMDB8B^B6f^~|N
zr!W=+H#heM0s@;sb;WcpnkNq)FzkP7qK+ABwcuNss7p<46H{5u(N$sig+=z4M+RtK
z4&by+x}d4rqt%-JVq=l|7A>1%8hiXe<c8xu<G$D)Y<^t>19>H-;59931w3&Q`S82(
zn{+B~ay8#ZzL9Xt%F6Oz(_$bCHQi5$kN^6D7JAxng&ieYIJ=GtZ^Gt*D$Byk%FMxW
zftZ-seXVZq!NZ4vVlIEDIuba&f`|V|`Xqb=+zt&!F_Jcv)8NBYZ;mR0Pejxo;^OPC
zxhmA22OLCFsh5{2Bm<i+J~MnxYYFASm#<tgKiXaK3O;w&$KPu;DJ+a|{^^t7pUz*u
zemz;-m1EvpNVn5t3CIx8*6Ef@G78V033}|U7FUaJLaXu$F6e=l4G`^|r>AH5H7RO9
zr-ltabxR!6>4=o)CY?pg`tIw@^lQ>y%9w?Pp6sz_joMOfOiVibIcp0N8YB9_P=%6!
z9OZxclDwwnzPHfB{HeVB9n>Je!U6rdVf_OG#d_Sa1#el4WMJ>cKpz42n!5HYMQy#k
z?(gj6Vqe|?aZZ!CHtXT5`acrkk2|`$ZeU|$Bd}P1B`!~J8-ETbx_FTt`Zhd(oQw=#
zz?u2bc5r-Q?*sj{`8P9dWMRPs=$BtaWZwHBnE}q*vADQ60*pzmdknnj$@SuQ*<X8D
ziG>IV37Ja`e~z;<UvxU&e+p!;9Ey6qymnYhPi=j@PuQ6Bhtl*4I>sbb#O{D)e>Ocj
zS{8$&qgHU;W2IsUWy8IFeQ>?Mf%Dyk&SA4T+wK*d^&ALtp%t#Lxa(4XR^0Eh`_<1v
zDR}NHgHlku)Ty!Fegc#N$Jb$^w%v`5TOUBQ>)OPd`z9u-_RRQ<(3=>c058EKiQ7B9
zT%BSQlK2tvzVF?yrn3W>V^C19Gv#nlcD-Hd#*H9Md3BGX%a8Bh8KAapZf`%ia)$M(
ztgOIsvfj$kv3vQvYUj#5RuWi%17daVbXWhXoT%Pqru|tT;Bn_NHY8%~{DDrzKzSCE
zkf=;P`{!B(tU*j%JT^PqGR*Kwk0BH;76e$z;Gn+i-KWshE>4_AJMOJb=KW7os9BQB
zhJV4MEl!w<I|33;fSH@0-=b3~icUzl?R~U+9V+!9^v!)@r4?dYQCaA&vx|#&e*gaM
z70isU=|F8^Wu*YksPLlj%Y_Yf{__8~fFdE-gphbS%Q#J8u%;XYr{``zI~EnOx622l
z(zCtT2b8+!PivTv1B*dYZ|_}{K!++YrSMte8jqe42_=n6h>GHGI(zCzW}d=k*jpJ@
z!q{G+zsnhE`=cCZ^s_y)x3{<RvuE#sEo{<hWS@ZSG&?mV3pk_P@*8n|OAkIG#5@Bx
z28>kOp5AS#zu-~S+=iSjP<LP+cL*tY-YpN6h<I|PC@^<ivbM96XNp;eEl<~~ef-Mo
zfH=D+SL4mAuU{41-QA}W3{3r?PM)3~Io7unR;niP6#M}#MFyDn#-Op|23?{eNm#40
zib^xoS6;h8F0YMNgfb*FwCPu-{Pf?yLDA798kI~q@ifkOczD@5c^b*Q=I1uIwo2`W
zc;IN81J_;vsVDEmPO^T|#MBCK&AYt3Zzg!Okz<d45p6QkYm7KJC`HmTYHDhNL?h@h
zCVI<^&ueq`&gRa}bxeN)f@rAFR9ZprD7R?60DpqOe_t8x4ZK(E>Fb-Z66%Pfq(%h>
zU5=a@x3$8YlR*X8)<<g%tU5Y6tKw6<Rd{z9Uc^wc+HcKGH3bMhXvCb8g=U}Ux;#i=
z5$7L#iN3dT(nl3KG>$JW5tS@}f<F>m5juK$w*e%<UF?i;KL0=52xt$YhRKJ80&+y{
z9V`BS=vzw4-T%?I%G)I^j>J$KC%v{Vf=u#s2rIb##Ka>hsE1c$<nL%}7$rLcgD)yO
z!4sREo4YM@yp9d0RLGd9prBy;WN$JqDTxwQUDDI9b*4+D@|aZjKjAB90Zt_N{GXvM
zGFk5(FhMmlGou$;#Ym}KaCFhXKz+U4duM3?wn`(c?Tx+G@?_j!7>yB%SAb<k>O4gb
zj*b9OT*G33f|?@jEp9*j(GNOxety0(_eq+BTO<@O>$&05SqD2+jIyR?)D6vaHP5}(
zD?eWmtE1hITGWZ*bxKO_Z=~!o&48kJo~~`~bGVJkrJI6(Vkx5fPIm5UYiB;K7_+mM
zr_sy*BSECGxV$_A9OoOHw@6Up;A4ncHPgR_lC!S=xY>~L?i~(O3_i4~AP@?lJbCiK
z#)iPf#f9qX)mtoNP2a!cW3;riFidi{Zeck)JHr7E1gi87$<1XF61t**BPAtem6&At
zDw}zgULpt!8`pYFkw?yDZ`B0&0j{2&o~Y+8SM{^S-u&m!o;{mdTf2lH2+Q^B=CBF>
zlXHRYnQt%jEWn1}{r5Wn&cycWIE|Ut#`y`4Wo{~J>cRt3J`4ksN7vxox%D0;dPXLu
zb4lVi&Hm;FFe_o)+<XH}I+85KP8;dJ1lid-8k3Wdu1?g2efc5^6pdL*iVlQhd1q&S
z;Ch)wMMWxcYsPd)wc8r;k;WSa+ffRJbI<=_hL@6VQ~^V53%?owsPzx*(S?k^XLtz-
z38GMDK-qgaJZ!jsbYuy9X<@Xo#OTO8{l@-m{j<fpX_7C<=DXj{OT^<IU4CmJ@|>A8
z)GL^Y4Z6_n64Scq<mBOXy2MXqSgNXX-MF~27EBM2hdHMV#(scH)Y3#rfsze_szF3V
zv`-wW&D>PtlJsNlS1;M|KkYZT4=|4`ZcLivM-IreiwX#oP2<V9udj7KeAuYAyJsH`
z;#uRjZ+IXf$SWw|sQ%-Q2Arl|>q~%9G{2l^!H&KXgwK)B_M=~B)8n?*y7lyAtq_*=
zt?nhl4_6Fpo^zv61%Q?XRPtYggMTGhg0SJ}TnLo2R9aOVS70IqCQFkvMlX${kcFJ7
z<8T>b-sKeBY@~P<!jPa+3Q{GAJ?~9wC@e~+bTEC&{@p1llFpCQJ3BfqKv!_w_Y)KI
zO-yv|H_iS;mduAO>QRn7|MOUvgOiibvW@5%I>9g~=@afV7whFML~ra@+6y~7z6RVt
zEyKMvYh6}&LDYwK_=`4oX@wvhKq?Uj+SB7LO<It*uJ7v0t+&647&E?MXSdv@`9s%0
z##@4v<T4HmtBBMW3KmXI!j_<mEdxacPobOvib+|EPJV<y!|OWuK0Zn#`qaN>_q8|_
z2}Wk->}fn{cg*)W*U3CBHiK?_PglpXI~p2oP-nGubzeed&2(Fv@CyDa$lS#0)}^$n
z6v;NEA<)^;)P%LTxS0R`{gYw8y)_w$7Y2G*d79Xh$mBLgXbj2?pVO2q6%EY{2pvw&
z&VLXImfY6_(p7ewX-C{+a`<u?z5o+(zO}a#TnIb|pJ6}j7j-l6y{fCwdJ~?khsR7n
zpA*mvDlumkP;vsZm7<z~@2y2sUBmzHT|CKRNy4Wn%*=B6(%!tm!azxI81}nFKj`h5
zD%`PVWF-4mx`RDM-x|7``&<HIu)b7Og6{HPLqqfM#985q!$YGy+nRsKyq5<zlPMoe
z{Ka)y|Kt+Wxzhr9Y<v5;Pf9V$_aYhC@IykhCXl~Gzp4aSpA!Basjx$zBiFr8I5KC)
z0yUxq3FKab{j)JOI2~W^IhUlCmASMit~!flWMe0%VoOUm{y=VWDlL(-a>UhC2)Q*~
zNS4b)X)G#a0Qk2&JwAXn#`m3ytGN+1W*6%zv}9QQt+Bnmz?|95-hQeldxVlh%H!Sr
zJ)dcR{`N}#D_5>03)@ql?$2EMH&Q_u$EpdtSdjroh8sSYJ1H(sr)7TZZS6TdGkT||
z+Bf#}k=%?745+1YR#q&az1*{SY)g6^oxBy}sT=<(T>{V08IcB!;$3cT)~8Qo_O==!
z!fmxLd<zN=&-GzoPX_}>ye8@HJ9FJnT^WO6z}!)|rTZ-^`*C+Vdgb9$aT4>)ZZ~gs
zr95gsIe_waE>O;Iw9>I<V4z~-V%;8+e(2weAs`@F-`MbjjsQ*Ift}qCH|1~)fsc`5
z-29kCCi%N)Im&7%B+4H?eE8MdD|i1s?PR8PI0(u_^wP9ojfg(|11J8c!2dQ<4c$M6
z0HUA=o(h9GKnP%)HN#>a5Z`7#-uOoEelVBB=f3_xK}o6Q=T9UYwhk7#m-EnUjKr43
z>gwv3=f$q8e4`{XC)?zpAiWfsaK~;q-Xw&F`wcFu@xRab{^kXro12@kGvWuB<7bKx
zA&9cl!j2O^aV6C1@K5!!wAc)t9h*G17yRPmF9QK7^_F~_p~beoTC--+m#dCXNGK1i
zYZ77PqgG&6Tz&Gc*BRu!fDq8V8r|6Z?}Dbnu9N!!Xv;n{VR$aoKm^`w#Yhy*cX2}3
zTv%Q%dTjgmWG_L2-{uPl9H52G43}DlgonQ@DY-cb@2d>Vp}IO{RNd>Il$$94zc1C*
zg{)O)Ye>huP5>Zti}fi48r%2A*8-1uVM(Cq2*#s(jZ+p&j>MnKYJ=`o#?C?yHmLjB
zO7gzAZ_^^e!djvk<uh_}0-*>rwYCPr`dx(juc1I<vk<v^o=uk5KPGap=Ba`=Ksp9t
zVT#Si_HPZ$R#$z4c#0%_jzvM<M76s#X>V#_VMG|ViM43rnAq2jOZ}P+J||j6MkCvC
zg{!Nwwo45{lR{s?NWrD2rw3_;4CH@aK`Y8SKJ{<062Z2yHMHuqF`u2_sab<u&SP3T
zTpr}Ht>v(gd25bnu-G{GPTUQ1Xr{xJj`YxUp{dv~0?rkBjo%@1y;(r`*&~pz$U<?z
z_HyuWuB<p89xiAx@g`&)Iom_<FI;E_I<m1+Iq6*E<ZI)K)8KnQP6=q{n-`@(<ELS@
z)$J5m&Q(7@?=YdOsYM&}V=h_5@!IDnzx}~SwSWc*H&-spRpnyXU586k)sUQ*$69}~
zYaAaRp8@DU!f^pvE;(;9S_jwz&T1P#5vQp}to{A{bvm*>If7FchWYmKAA}3P{Ni$d
zf3G_|+*!t}x_{aq`yKdnkdUDJN9V^kusS+B13|RwdMgu^pU;*!d{u+MXZAz;NVe3w
z=w{p}_eR4+MMWE%o6iBVZ32Kp0%8^D63oyN5D*j>Mk+Wb8HL*ch4MZ6Nu{f&Hw~WQ
zLT?Ts>>}<obVGDkE#JDt|9ziW{uhwkkeP2$%;MrkuCdx&6aeTm3JWh_GKz~kUE<YU
zRz`&2`WvC;AtyVO;!fLlV$83><7x>53$J#&SM`g>4hKdS%)^AAMMX%FHkh1;CIGZd
zEp&sSoc{qF6bS3IyEfU)VaDGX)LA7^`)gp}_MJNf(1Jsh6NcaR=c?1d0T6L{wnKU4
zO5no6g815KsU@D5mlrQUb_E56k6*h4IiWS>KI|PFY)hB&G~I8r=a@>k32I$3NX_t<
zPT(vUA$!Hpulw>eztz6j`Uq7%;;oT|U=)-snySkC-iO;wc;F<D#dgW1$KI>+5bEgY
zU`YDG2gny(2;cvA+fOt}`?@Z-ULxKVhn~`+1WtqI_V&fsG@qXR8!nxWa2UU!mNxwL
zd-~1m*Ux8WW`e{fhkzgj@5yWN6(0pa@KaP2N#rr<upuJalH{r?^H{C59sSG=>h<}o
zyqnv=#UoT#b8^xe%#r=+;EGf3q#qzIMWN4AQj)f$;Z!{DMBrx_+6_NB$1~$FD&M*=
znTc=oR@(a`pwGPA$6)3?FgNd6$1N`QLyo;3#?CjwrGA8Fo2PNkKu_;hrq+XjLOr&*
ze8-@Io3M!L=`J_rulkdQ8-Hnq+O&tB4o&rL678A4V?b>u!RxzhVjo7q?eX`8KL|nh
ztDMinb`FwjQjrokHgA6;|NdC^1l(Q<phO>pq_VZxsFTKu-^WjiA<C;jaGGJ`r`ME*
z+~@h$6o6Oy<TroCUN`SfD;#pe_Q{|{kUhZ?$fdr*dHGeF`j2M|7;77>z10FCY3L@`
zRTFESTAth6+k-g<ie6y_5@dtl#JZR0dwuWF7>s8C0(u1fKYzZTjtFV>&B+;wJJOUn
zc?rRXK}2kvi6V@VEcAheg+II#gP7P=fPXr6XGDD@?~gIT!{ur%1Mkx}z*X}#ay10a
z9bIjxxA6%HneX2dVYKeu3yqB>!}uz*_=CtPxwEZBMIMkgYzVuu5wy@7H*Q2spooa*
zN77AXX%`T52DDPBS;;bpG?J^H+V)(>0>|&V8}V{gna%4%Rdw}NC?BBk27zz;rK)Ng
zPCb>7?PX^d7x?VS4D$Fc%!Sz4=&j|F3J1rb4|HHYnL9WT8yg!BkBnqNU0&!fApf_r
z(gQ*r&=;wJ3D8SJ$HwjtETadOi^3A*nEk1MOTWqx5C=~Qs1%D0o%c5Xd{{v~&YqTl
zu>Q5J?GGpiAyH9EUS5*1v9aqQSz&wupmR)%OGv;uy#~fr0N7Ku+xAbM5P=}Nu(*h&
z#q;`ofsDKYoBNZ1e{kO86B7#+kA)`7%F2L1Miv>A{DB=R_vlfstxa}9!j=1oiOKZR
z(yMDy9)M~H_+P8Jk_`Zc1mzsXF{3|INV(7rI7ng77<if3X$q7s$cS751c=fpwBqgW
zyR%)sU}$92JU&hrfO}C+Ru%(>>MywW<m6<*0+|?P91xTT8tBgsUKAD;#lX4(xBA-J
z%E-q@itzFA4VT-**>+X~IRJBk*M3A0MHSW6)#|Sj%)hW?Rr6fDOd>BY?{%^>1W_Bf
zgxqU@*q{Cz=A)JNLbYyG<d2LbmXwm}0fAt^04(Kw;tEs9MDUo^hm*5vFi1;Z17f5E
z@*A`vUaRjU;nYGGEZf5U5Q%TbLi}p-fGTLHs2+5t2t6S;$I@ZDEb6vOySp+P3`Lol
zl@$*Z(e=$uf8gRk%$OHG)m_|~YQjzvefnQbhePvpZ9-~zbhM?r`x3-&P@j2{kw1Jr
zuBN_Tdhmlu7_|6Jx(iz`VY!aiTP~qh2Al+51oQ`#%=NyzLoLAX$iXD1rbdK*u9?rl
zc>upouCBKMr@$`NU$bEh(iTY^dyNZy4(+Hu`_p)7;;t9K{7nW?8MP5$3)Z*HTp>jp
z$S}l-(5eMT6p+l+xZC7y&<YNSqii1HAf$0ON?aW86e5L%AMHnl(OnLGRuMq?Q)qx7
zabtt1VZRefr<MMbPSTy%=XgU2eeiaJ@0;d+Bu!6GV>voHe*OA&osOyR)yLVcr79Up
zQge7c+P5Bcr$?B#1Wf}32zm9&A2cP;8E;(BvcRAC13g{z*#e1*iVCmO6e5eVbFffn
zBqVdMJZOJ2;DY4BHnkZ3Xr3(QazinKIuPoF1<*<-C+j*@37QrlnSgF@17HSasi)G>
zjLWo^3VaE#;I{GyD0@&N76bG&`lZv2?c!ai4iA4MHnp^11D^r2NdbJ^fB*jN6RT&x
z0K|p>4}vmI{}JT0FJHb)!RY|CCpVA`MWkYGVzc;)yksaM+<89VMMrd%9|sj^>d#I+
z)5KhG0k`~>c#ocFW(Ed~h{IT8zIHa+0fB<N)Qh(2kA$jXV>JEo>|_mD=*zo=?%T!Q
z>)^N<Tb}L8J+?qB{6RT>0P8mV<tcLzG2JV$3fAc;`ut%{8Pz#xKRZrJhttbkgO;xZ
zTci5<N{5?raWf=DB0vWK#mF3-8f8`086a|jphY?oyi6Yh(iMr~W{@^opl}<uk31U8
zQKf{<qN3N!045VqPYTd!>#if=V}bvEz7{|;Y#@?Ffm|AKVr(Z4@PJmvhZgiuDjA>J
zLTRZMK?>DXZSBVO=`=y^ZJXCn8f$k)?7+j~(l0R`=kk^|jjWUgftM64+isb&wCs?V
zr~!@I_@6#~;sst--(oIEtGa4x+94$(#?n7q4}=|o`iOc1Bw<ya?D0uSGmx(N21yrl
zTU(>G8B}-#o718ro*muIk2eTHK^uZdL@9FSbA0$!kr}$ge~+Aq?_G2jVf8v)hfY_T
zSis5A9<!jJZpGqx132&>t=h@ZEe)#Sj`Rl*mG^<^i-o`6=<P%L#Oyrx*QZe%0=oVy
z(51Q#&5eZr*L5l7iLP2LM6QCT7k_p<FH>kV+nEV&3`BUW*PqHsR)HjDQhV?>HEnle
z^6ZonbmQUA_B44Xyxf$ktHoOS`V<;zqJIBHZeF#ufB)_a)BwaQ(Uot2&)|`MLqInd
zTM4$Q!fEKdrn<Vc2jZBVrsgFm@PFS&foANr+0F_!YEWR{`O6mqNA!ImCT0QXAX=FP
zdYS@IF;I{q;Yb!O<(bvGlY(t?O;S=4ZKohLK>CUP@bIv^ZtXUTieJ9O0UgU?b*yS1
znApH(@)E-Tu;ViNAisBYDdI(T6Y_DB5YS>qz|@^IFJ}YeH3u>X&VxRBdRKtNMV&i$
zE+05eNQlE^y*ea5sJCLVWno3GGXWrGa(@0i_X?+e{%SXvBan~#R9TsKeC)-9{^ABS
z0lhPTCAo0?C?|$~xqpd|i$mlw3xMO(K%$iNf17CDe-m^vh=Qe6oXteZ`~fa*SnFQ8
zq>}``o>AeQ4lDk#YF9#+zdyCdQM*`E`Tz%TQ!8?IWQIXQKTr}-!jM&6O%3hdpmS1Y
zO`4GpI^~BCJbA2+uM@+r@96^@$j`?I5k0K}p}%^S0Q0f3vc$DUJkdBE4mBn{MeeHb
z6DrIU6j6*Xlvykf4-Z4o?g0k`LvJ3i^qKIMvwZO2*OVPL74C!fI=w-cwc%1`(8{~-
z9f8V+jhX%Xx3F#{MI?mvW>3lMIAK9S9E@6uAOSGBCa4i4G&EtLYgqh161i{IYAQnr
z5P~kKgIh0eE=Z!fN=tYPKQ<ZyJQc&k!%jvG&@RtooYtpqRaBY?$RVFrQVIwm#)#?2
zNe6RQ)N7v?E%jCRBVk2m1NR1!?(;Y<$-l^iV+!3_Bkxx-MflpCGfJ!>R$VM6;!!-m
z-Pn&B_-DxOs3QwS7&|j_K-}(4vwAeeqo$_D2BA}M{ujYZdskATiCKV<$BskM|FMYQ
zEKPzuZe?$;4n|zzr12LgMl`Gn#Vs@h&7{IPR#H#^#nKAwVDLmer@eQk7Z#MEr^lzH
z7_C`STY?jS{_-L6@L}3+OS#&U*@+7W`}?wHW^c=WjCz8y1d=2a<i^QKdT7Dl!2bMI
zWN;HI6`tGbn4esxySW7X<Cc>XFNOj1Ulh*g=Qo0Sxc)cQ(E_N2``O7Jie!^tzh)E{
zr-oQ6#ut(%D2?TRO(gK>XCsJdrGUTEDJKlT^El<@<!uCkjAmn~paBZI#h<UC{*u3c
z;J*zCd3kStmL9}uIPR^p?QvC0nBf}AseErzh2gNeWTMDFj_=@%qk1sVIcq?KlheGK
z5N@F|^*MM*EAgBN4Ozj@s6#*>EC8?%p`N#uBiu|e$U>mmojaYLALQ8h*e_q;GAhUS
z@$um*6^OX8&#8OW!O+lfDmzNL87x?o^nj<7ckhVu51wM?*KMebOzi9gP(5cPE-MC<
z#z+M3j0LU+T#{}KA!VMLpAY`|Qw@qP=J@nzZU0|c_eY0u@n?%nwYFK|e__{NU8*8P
z5)_!4KxfV<Dhh+(oYN<sHw6ur-y%^o&<4Gc6yhvyqTxZ7?QbmaDo>xItZ#8gPcl$E
zh?U}jqN!uz`&F=H5;>!Tmjyzr2ek9UO;nArF;?Q6Rdgnv2ZX#_AW=FE7mx7;s5Aqo
zT2iUsgM@VOJ#r5lWcb>xE(uUx^K8XfTLjOmt`cDyM@Fc@9C!&<G3XWk(3XvOS*eT*
z0Yrz>h!Fos<ZhXNi$sFzlis&N{CY63E|%&FI6QD&Q)S)K@~|%;M*u;Yi}fwGG0-u3
z${uUpuX%nEbsRoD{DDdl=7!FX5nFKNqE&K4A+`kg9<0qvUVsN|N3F9fAPR{BK2zkx
z6*ZmFqYItsfH=&?4_>*1no$7LQD}npZovDHit>k&T`%9K42fgVFXT|wJvB89_^l=M
z;=(uCREp!EO9k$XEvo_z7%4|=l$Dhm!QEZ|{rGLu#Kbkgeb<ZlOf^B0O9r11sMFJh
zUyR55Gf`ijEt270r2gt=tw#7Q)|2!7{Dc++FZMw{bpmYi69jDttFyl^F6IMkt}E=Z
zP1dVd7yeDu(Ev9MPEDnOGaL-z3ub`bu>E}%n5HEZkpDoJr5wDOA-owr0YMY&{q?yd
zGn9TT43*HsQ3|-(a2(r5y&-2{ViE!L#=Ob@JW6o@mEyvhAFm;2@&K*T|K;Y^UkR|0
z2FZnsstIfmui#95D6}ZmLc=_;5ZRh<rQu`zz}G$8TSKDI9Ol!fPrZWMtDyPPN_kuc
z^6{~vqV=PB3u=CI-Ty)YtBLv&ic!}(6GixIAIKBr-U06|9NkR8^gfO7*Z_7Azg7!E
z^Fz>DA*+KjHb{!)Zb)2cpL2%#ISs`O&}ege9ILjzJ}xA^aBy&t2SCLDWY0;c1!2LC
z&AtJJf=b{K5x{(GK+X+kr!+A!G3e_6sA}oyorfB|y*5b?=|)*l%cfRWiOI>y>*Y(x
zNf|*_KsU+3);uXWc>n<DOwg48LIHDVhQuqcZ9g02Q$yFBuKs1`suHkejZsFX78eP?
z?DPU@6irJ(0YYE9HQzmx>R5jZ00gG7x0eir*98be!<*oOn?C&ecT-zi(AGnLKs4Vw
zI^=9^Il%9K3cCPQ5e7(A-MMpzjh)@kkV_GQ`@dkL*6s`%Q}I4Pnc1)|A+tMtd|F>$
zUkd`20u?_7?7?GHR#QU(1<E!d>u=eeE>*#yv$VL_G&o2Bx53ND=Ldvy350z62Ub=%
zvM;9fI>E)!gA4z#<6>jOK6~}5b!v<sRVdDj7cT%&w836N*G%>x$jZ_(z+t?4Iw#4r
z8R`nS#pn~DLa3+&hu*$2?+?1Zm(SKX{$`EMi0*$x+5*hD3WuBQ?Aj6U?CK#c9~~R(
z54t`%C}@fhr-YvQKSK~C9FPUDFyNqqiGYVG2cZL64Jt`WgEFW2TsX0I0~cL8P`S{6
z@bl--VS@z%|20NBo6Fh$4g8yHb;J2-2?pjR1B0PF4YgC)(=BGmN#N|P6+kB8T5|~r
z*~~__Yu9A8qEoUSQcYWs2h);!NV%!U#Ki@GsU__2YPO4xfm{7xD@91wwZbF7k_yWL
z%4UjIw+XqV`#J{lP$Z!W7iC2G$?~f>1ck1Edx1DQ5ix^8DL~B#<rGd}gan{b1R3l-
zSAU==b&D_Dw{*74`ej-gZE|X=$gynTIZ+SY13JvCGlMv_^?8F|{KYh?tKo_;f^lm`
zado~MJL=j%y}B{Y{;I?dP8;tz5OK0esimBwUfsR1^-WI?OXiGdcCLU`G>DWTaSQ=z
zIwCqL2~S6-FNofKA=;Va@0|;Q{7nLCgz(Cu`46O^4ZN!sEtq)q%6Hf><3mLKobz1h
zKuVQ+5L!X!4OI*0yYc`Ql|!<_9FI=w@K|VNZw(tc{axNjrI_*GpFnvT3&DT*@Zqn%
zKD%K**Um~A4X#H&m|i0kVb|Rh*Fk~<ah_V*S;n0t|Gn4l$%XSyOP8HaJo?UA{kMvw
zwW!8pZRo6dM>~=BSMS}(iRJ1~dR(~S5q>|_c5pg)&oy5N=JOxQp9Libpt@|Tuy1JS
z=JMX8-;bn(IXpEr-?qyPmQ)v;VkMZ}BUDybMzJ{!29bJhK5en{mX0ND3sj29P}g_H
z&X+veioorzk||KS#2{mKk(4id9UcO7OH)YeVPut*@@y{F9TskenHym}mrJmGpYGYm
zep>ZQ`L53AB=clS3ud>-c7_=VoCt%H?m%UDx@srTotfQ*nB7W~>+WC8tsg$4P+TAW
z{su9p29EgOKjB{|j^;Q~5o1?#r~h{$n)XEx@9OxOA$sx(M9g0QsgeG9pKV^xeDf>N
zNxeqWVvPamzHO;W#6<mWUbXPJHGbf^2RagJoN$}p45jvR`%d$2c+PEg><!|nuBvf1
zqjwLZI#?OGvt9aQyht&QXN~~8=d7tor{bzw(SOThOIKAk+f_~mSML8!ah0gg>Ae$G
z(L9g4t?H^tHHMtEkdY~fR621Cc2ARzvGd-Bwhx~gC))AhGut3Gu3&0<4f-i5B_$pP
z@I4-6pJA7wK~M<en;&e>Nh)>lHnU_QH_Wa!bH^yZL=o42A*E))xXK9+!kXtRg|4Zf
z=_MvoKzKC-AL<Mm{skcD{wmh|WyBk;hd6n#<={wFI27Gk0P771(aX=Dg<%TmhtA9Y
z)=R*joP@*|pt01R8^F_P$Xu(z<NOPnC@C$Zw`vhU9)+K<U3);_hH{?5#}U-Sh2Hk%
z%a?Jg?wihgTa;2UD@A%aY(7pNgh!>{Z4wPld3tW{Ja=&(<ujOQg2s?L9dUZd8U2;x
z8GkXh>gxOSng`tXKE3PesR2n9@ILj#RGL_0sikd5xcSSd>u!;y9;*=|9^2N4sJOUv
zg>xJ9EDa5fi*{<6n9VIL7Z)ML@4U@$0RLN0@=yg}K~Ib6k|BRIy}UfS-sbEaU-6KO
ztDlOo1s|wpct#!E2&nXWC&oNUA`S#r8j+hIp$in_pudC&vjx~Ng$aBBVxgHkx6pX3
z%N~GRjl9<PNB7@^_nyhXjo)=1JN}`ecUoxL+qDyqgoO27o)Iyt!NX^E2L%iz`=^J?
zH*5Jl1fyfHB)#x2Q)17}b(nQ@8~6+j;%;xp%g|7<A;JPWA42#=9*_R3cz^~k00={x
z@1l$FNgG;Hcz6c}8jFo(y(FX45TddAi!7{`cPyAr&+&Tf*gn`<3aaEJ!&(_(e`}|t
z)Y!f_xmM><TIGK6IYh^wOw?Wl{(c+!B4lI(0I~iT8{gb4bd{?Yf~Pv*GC$N3+_*6V
zR6zC#m(u~imhL-R^U+~8@{iffis4j#KlJn#2J)huSsWZb(7r~_be2to#_!%Eh>vGO
zczE<b@2qD!Lp}j}tf9oK5A?nL#XLmm`15Dk$@-2G^A?x@JhM+FMMug0%L|m-kORfX
zg5f7r+lmmGXoU8UMzB%H2%V}8D9c8d#9AXX?6r@nS2z!Tls>?jQLhcXw$S9phBQOn
zi#uovkbNfTbW-iCJD!a<HND~JzcNAht7lp|J@^t}5dHenuWg`SowKp={lLR(h0p8E
z!fWpDOvxJL28?64+LaT$&v2mH;Z%I*Ag`7Tj;xM>z+~+Ua@0rlIX!qR>9tP+kDea}
zhxp8!U+QaD!-Ee(JmFdNWH;7(H##=3liTjCc6=3ce&_t|iS>84__JykrGqwdy5d@Y
zF;={V-{i@H1j+p8ou2=dXx5(t)U^*Z;iLD_b6PQH9FQU1w>m{o@Ci+JG4Pv#zW$%}
z=@wME0W;|mG+(v1(sU4_y4XOa5f|Nvl~L!3>}}HF_EGJvk+)kZ?CvUHVv0C>UR_bR
zbH__bAGvV8r$9%6kdo})J$8Df$$ABIbA_B7FQuU@6G$$?o!o(F5+q7gfxdyqdmG?T
zgo6NZS;(lmu8zINXo2F14zYmP*iTi(3z}NKncTS^V@FaOJMz~lWOc|{{iEyKkG0uC
zi0PQ9Wzv<I7(abvI6QXAYL=^Kw&Mdm0ewA09d2XtVdtSkFs!Uf#V5e3VY-6}$O{-;
zH$a|-39xq{ITTgUOGC5B*YuL?iDy@^;EPG6>G^E0@bICRQftKav#cm-CVpN9em+K{
zs@VK|BxEgLP`{JS#K0hFduPW60(@{I16<yd4ai$LdHMFv&f-DqdgYe4nxOpVa<VZP
zm4_&+hUF_=%361tW2+SU{zFGY=qe4l>p$gkZwYPk+>1tW!24p`t!5V%3^@y=J+?zC
zldmChJv}|z>gR2k636OlLn7(tKb#~wbR*ZYi0MLp{tOX*Vl8#&<jy5}vd-7p7m})#
z{y$-Y<9^Vxdm?DOlvtECHG)B}(*{A>q{=yP?UyY(8(Rh%&IW@BivhBs8o9w=zQ#((
zewDr|IF_xXAn*vvvx3V!htfx)o*Y#L??aNsMbeXFfyo|xMGp`0cEpMwG{e3x<gbOY
zUw}Yd@n+bb7}!JF*h7|V1(n(N-li<qNnIDSXp7GdSm9Ngx6k+B(X&nN=W1N%xH&Ju
zWD1i7T99HtI6RDnnK|IHv5@_Ph{G10Q8t)F+VDF-qcBYfj0M8CD(dQU66NCXjGj8<
z`60V^2@kEDcHi?CT%wYFwss9Z!dL+(CX=xzgtTLetXSdhU9$g0c3EM5&(zfP^0jLk
zAZ>WT+#Na$@{KJ<SvRV*u<+H|FI}U@J?C?uM$&30+EEEAztE0NxH3sfFq+KQ*dEu|
znc6{EvAMY)86tpYuR*{EhKisb(Z8`Oa)HNPUr}rL%9Zm7az^2*mk-nzwlU!PQgFvH
zjq&U{kjw~!88?8>!Snd{+Rbw7dCYK4y{{yTk`6bRy?7zLWg+E(dE9+2FtBldL-yX)
zNzwv67!L6F_kUn-9|5sO8c6yAm6QP}eeSgya0x`BUVc^=<mc(NHPpO@pa~Nj8$S4i
zK`=`M7g!3&HhyC}4e&tiaGhr(X9-)F&B=D$CmwMf1gfF}PINRzEw#-}P+i~9Fa+k<
zGN37=of=Z9sQzMQWyJu-D5x6I<;_FN;LSi*>nZ4_sCm<dw6!G**-=2miWePGsu`>u
zwf{8RB}q@pYe$h$a_RVxvnYPpaJ-uR%;#v2bCN+w>;IZ|IDfvWr-u|AAYd*pbvYgi
z^`9|Pgh6i({9l%wQB;%)Joj6`WWcfTLoW^j;779i>DvgYlee4@2l9%IGpy{d(CJHC
z!{p7oM=tZCYaB2#daD73a&Cj(lcQIp|MW5`p7g%#i&!D<h&AP<>o8q{hP&anh1bJD
zd5TX>9i9D5b98(~aAQ`Gz9cne=LBeS4&aU$m>)Qgi`xr<S<&S<X#6(MoNuyg4{E+R
z^X{r<<3u>K;RAYnv9Oi7C}8>v6xtNv%2*k|mYpfyjwB?Iiiwe)$62Rq7mzAiReAE{
z{m7y9!WP)Xx4|aK1qO^pe?1$}L@acj6!6x)w|kTXp$fxP_~Y!O7wIX7Vp1O444B!a
zB^8(}h5$kSpbx<E<oI{bkc1DLbc!?YO;q1yXTM+lo(cK|zp!v_kWVxhCud(R<Vwn)
zW`D|0X&h7<(p4)*h0;uIU|*~!LfZ#+|CZlLpM1Dq6$_d!+uGZ(4WMbj`y!!orS=bn
zIcXn!!c|@US?0Q;vb|c<*{?UGc#=2peb!VvlZ(FAOKcK}iN4Qc5Ybx!Gu$vQfljr8
z%8E|by=`c#*GM07R8hGA)U*c_-btf|GTQ+T7~$1B3aRFSaTd_R6(JgZ1Je)g22{NG
zftT!-fXE9*$7>oRXfUHJw@*gOwRytAL#DV<znKX(#Ngl{m6V6z|6<7qkJaPH%v@YV
zATrK#fByT~WnKd&1VJVIG`NDcOHe`4Zr)`H@}MjfK8?515#ZsWVF6UC0ks7{Wdwvt
zZUOGfSN|N$p;H-xKewukjaOK=`2Q>5pI5Qnm9ozQXZF2O1;m$e5GW;JU&@1MT<Ue;
zOm$Y)y#yM-#)Rh@>REw#Z3w9cxK<b=I~zOd@A!s?PpGrH*{@cl#=*gXrd6H{f5ZYi
z7^Sn1ekS|Dz((W5MEyMr_ZYb0KOi3ZXJ<3RbY2Y5=e7DHVkwXQiQG-F<_tes5g>3s
z7a<J<Np4CiJ_{U71{md&K1bXz0}u)utQm5*;Q9I0)Ofz}PKU@4CVj-R;!#o_{I2lG
zTsjG6x1dVtf4E=ZQI9&1oSdAsC%dCNNPYi)yFdAZKK^6;3ry64$}64vdV1P`vDp|H
zWQ9uHk9O@LOM$4TC&JevFo9VBBkF-mr~m#vLCpe4SPj_H!cSo{>hH+B*tCLq3!iCk
z+l0hiF7Kzi?+-_P<iHt6OEbv7V>z4d7DjFD?V+@^v>_q+pFYLHFa|VFbyuXbTun=h
z<hG(>FH9Gq8Z_wLi%WWQRmHx(7(_+wxH3Hs(mVGI4Yj5%Qwt?09HCqgU=pND%k&}h
zf|UVT;`}o|kzWuBfa(3ake668>4m&Nu0u(KhTzkzkyI0yf5ggw*o{#o$TK2hRR|ox
z|Aw&bPP$;F2{7Cy)$t+FXMf^j0)v8L<uWrMmS1!_gMdF#QJ4=x-e`l_iG7@Nt11|j
zFWlYpPZiPVv52Ft`nr|#85maRpsB(;FCqZqkV4O?Zn#k;bDH)tD2NP`0$zRV*RLeL
z;ti)>@4t`FoDkEA$78g9lhgcw2ml#o3A{(vs5(B_G9Zh13j*^X3v2Dqlt=K4K0moS
zUSiqvA8qj*_wr7-a&h2icXvsbcC5HYJ~6#?8b-3>cMBX40?fs0lF1<<;7==oB~o~W
zasHZd%USJFzlKt<^|HQ3O1<YEPlZii5GFb=FYiHRlqY``IVlvMkiil&O;nwPX#S9g
zN&k*GC{&D?*=c0@7Q{W6L3F|0k?`Doa&BGMzt(LHlIdn<lWo$HT_BwJ!iY+~$4sG}
z-X)1lk6HZaH=S;mn=KuPCB5=@AFYko;9+25PtSAIqI7dEEjIS<@-Eyy1_L_)3ln!k
zy%_{|RC`0kS};q8LH;FN(<#bg%BZD9K7Q0fMltYJ5#ccH`Z6#uv$mENxM)ytFdk~#
zOJY&-);LX{g9+H_sVQIZ+2l1f*8;!wOE&ypgMyO55UE*`oapSlh=JMid&|2=pnzaN
zx$e#Q)|dca+kgUOh#dyN{1j@$H8#qk&i$=h$Suti>Ho{_xj2-Gz5X@Kgbs#=Fc|QH
zbv;KxWG^aP0Vf9o!?Bc@DVei_OSE)!Pxsf&(U7Tg742Ja3DH|hX(-OgIUGF=E-mGQ
zQu-9KU@+oC0M)7W8$Ch<(xn>A4?{=9B7HiDSgNH8BNtI#y|ICnp=OOqr+M7D1(W!M
zfaCx<F|x3{8(afL@d0=^uGeH-mvli2`vY{o!XYX$G6<;lgx6N5qRchd?>~PkC@Y5)
zQr(l2I|s!dO+&z9N*#6P{g%-bBoX|QUgLim=iG^0gHx|`p3lMUd(UM%?YwRx|Nktf
zzb}*Cy!rncP9}B}-qX#2g4Vwn2HNwWzZUT57rcMZ0#tr!sALjdgO9vfyDO>Q%uy*T
zGoW2(r=&!;TWv$Y_)$c_!s2{X$oIxZUyK&S2Udnl1CXDco$xIWU%x`^k-F;37ZQx#
zK>$3j6vz>x-^Buf6?lI&SJFY?aW*hXwGGB8l#5E+^!g7z5BaacY*LYMD-RbJjLxwd
zRXIN_JCVA!_kQ`3tp@c_Fl6_XS+b-&cb(9|4-*aNPW9FE=R1nn+}89x`&c#7kc?Xa
zxae#LjJp4VF;R5xQV@`kxbrN2``jkw=a{rKH9`t5txmBI_sW8xy@JgZ0#3CGNZjS%
zYa78NxcA{=p6;n3*?C#mxw&Bb_JMYMr=)iC)sp($i@kz@mjoSK)RK_?CF8#TB^vy_
zd>AZ7XY}z9V%q$n$%caTo#SG&hIB~&fA8yy1E)X<a1B5=1#maAVJ7A-tQR2cUf^@+
zzzPMIp)3@zk}F$K7TUYJqtN+#$Y~405gte#Dmy$uc2_013N25t_uU9@M@RZ*Fpv=L
z`d_j8O8qfuI6A*6Hosl{54Ady72584a<wM3DR$W6a)d||PieNlZ&sFdNJ3V~xnA=h
zkd3+N*{brpMeOnG?ydEF<3|(cz1*I6{glX=!-H>w(F9*5o0M7c7r=(16Sg`ycyV_N
zSRfpHQkV?Uidv_LLP<mClAzgBiIc87M`>VU=es%9XY)$4vnxD89x0>>Z_p8tCP+zZ
zFvVzr0HU0!^$YImfVfx@zF>l5>*S_X<T?~_JzIlymsJ=!a@z8ryZx(`+wbMPGRI<B
z_cK<WH!^0$<0>q%B0l3^7@L==auT_*Y3cl)&g@6xU><<dAmhXYwhJs~3uGgRz_~qH
zRZdW8bwiRBS>YGpv)_h~=7sntrV*f@)7hyvTY`%Fix+wY-k|WY9L`SEi2N!b=XK**
zpCmLM@O$=_&Fo0e!(9JaZ0s`$9hoB{UV#~j6iOJGqEg8rLmd+sWq{p{MoLdM&@b@$
zBcY(p37QeT5+ts4AQFik$BJn3xX@<acgSeQ(xcupz<#ZkJK5hz08uzf9X>zVv&=K6
zUZ0APC{})<>qe#iYsz0Wvl*8LBl2DCm`Ld;-oWlu$%%!)L5FS_G>v%)!5+5aQ)^Iq
z*O#J7V7P%N>4#WR(a(zB*;Bj|5IpoiB2cKXB{lid=*IGA7ip!e7U|$L*oUvZNH5uP
zfG>i9sc`>shStj7ggfv(LGX1rGc)q=8887_n1jGtykok;u6%1~VI-BF%ZlSlkHi|u
zK!H5Ww7LoYxlCQiDf00t^JEOwR{(~}1-h}7m2R(lj<;$8?6P{bt06`K;R#h4l@bTY
zjoi3_+$ab6qQ2$9=4OcaGltD=mcx1a`fSo9`_X5fxb8**ofVJcUIVmJPv-SYvTDog
z6{!Jvvgl{3$zt^jX-Ijd!W0z9|NF!Nj7%Nh8Ehv_-L|i#X0g-f+!e8SE0MIOENbB4
zMQheaGO(iejr799lve;9;A@aVaEX4cARlJ)7ZS9;|6ul5lVU1}x8AX>&n|d={~G<b
z%HA1#8bND@1;U?jsgE8#Du;M1TxojFUDA_fvue`CFHaTj$A7(2_C<7@G?18BVS3t2
zsig?40(3SSM!d)|-gS_KSpy~<c$tz?2bgdK49EjsgGv_-7=#52F$(~XqM%kjI3(>&
z=+Y4D1;QB#>eIQus0?6C+E<k1z{-*Bc5>)?m+dkL3T$8G<xS^ceEXdi8_a_ij0Ica
z2SSyrsuHuG-kgysnl}|%TvTXks@C>eo5(WxYjB<VVoxNUf`bEmdw_!7(2|3tY2p7V
zVc=>ZFirj)xC_JR%Eh?@XClZIz&oRH1Fv9xM!4?^+kq@FEnT2~Ry?$gcbN|vsp#wE
zyF{bN5*?<JcDE?h=OiKDMrV9EUGjBE@X{-btN~k_xsC+%OQ-;OWWbby(Z>f^10`k+
z9f6+;IT#uJAV^amZ3H>Rz(6EWPnA)K2ZSMW=bO0}cVlB6Vt5r<()w&&zqM7^WYe@?
zqRL2hp;$yB4Lmj#DlIF`<;TgsZOnvzFq@a)X!3;b#zDWp1itbKzN04oPD&8lQ{$^(
zaeCVjSY^^F8D52Z09$7pCfxq(0VnKI*GU$5>Hfa+ZtAslMedAGnkCIH4-FM>pktTJ
zMUoXM3&VU_84Tka?)v%)eYvITCUe_EW=IS`Z2*S9AW3L?Gz{O0ahD?j@?Uz7gj7sT
z)0_UVn{-;j$S1f=iWMn&mjP7V*n84(gJc1pZu*_Kwz3M-;Xi@#5qIDGhpHxG0h|WU
z9M8!i`dYHj_IZ1b)|Q{opHrx>uO82R!uqHxUzY=2HGtRDd=@WH8R9qDOq8VItHuiT
ziYNfY=_o7X!*?U;b0mV+bm8K~913nD5{&IgIXioT%1IM_o8o%=cyDhksyzNTb@^DE
z!cSIdk&SgF?3Ix^jLG(dUsd4YL8{kk$^?3bORKB@B4?e*r<+{;CKBLXjklV+xR|7^
zjF57h{#0DN@(iqeG=~lIQQFql)(ImXdKNy%L2=FL<JMNTwuMLl(5Rxjk`@g6v!*rA
z;R~?lGI3!A)H<x%PaX<Vj}sf@#O7<#GZ7MwcQ>^K<v-ROr#|dNQbU=Ts|tqi-OW<%
z55BZ4Oa=KV?IL{z=PDjz1A{CA@(`bcKxM1t(m)Fc9q7V4!z5}>!$nCtgLL+U-hwy1
z%zIywM@Yp9Vm^lD7~i{R4)q01xQoy!vp^|_L?EuCIN`mI!BR{lSjaw2t#`KeeWLsI
zME7g)zqTTar?ozG|4&WV0hVLm_U|+(X&?=yDG5nR(a@H(Bt=R~Noa|tuF%#%QfX02
zi!!28Q7IbQNtC7%8ib<t{r;Z!`;O!Nj`w+9>b|e*KhE>FP6^4@9;;_N`bM)AAAhLb
z>)EW8yel9mXk3L>@Pz4@%&h{WX(bUM#kH@SmdeOXPJUGG*3}QP@>g-4Sa-|xYsyF;
z?ePiv$ER~=ojTdPzHad^XV)g`3?i$Cdm|Vt#&)zdLt0Mm)1CaghN%NsP1V@LkRnp+
z(Y5^3hQe|m#lLe2jJphsj5eTR2JS6dF*4<_Gzm1ae+><#nLqBkbD~A??S9LKh5$AJ
z537vOfa|OKQYjBBH#b!;1);dF*QN$vm1w3U=KA^>u-$3_<EeVjJivtI-8<Ti8v*H}
zYclRwPbDjQ;TFqFlxth>>O#4^XpBnjJQbLho=yZVLn9-D5Lpna`tCep=&ZrR6CK3B
zz(4`o@Vvv1m>?ah!uhZbHDKe%Lz)*x4B1>rmfKn_bMar>l(FD>1R#j{uIQm7Z*N>{
zycAMzh5&S^NX_FZG}p&nLMkAjRHzCmdYEoPlW*5;JQJ@$$EehYD{3I_>g~1tM~;}y
zWJ6+9ffJOD24pwn312j;>^Y=Fcp=y6fos3|X3LV*sEvYw)b-kIvp$~XUbpgwkvg-B
z*OWl`3~1BD<N1Lgk5t8D=x7j!MdM21lX=K@a^ho|V^4`3t~APZVa}Tbu&iWGyttYL
zvcq7yTWgCIcOLwbjXJ$$g8(#L=sMQouOSA517Q+1sKE%MAS5o%gbQ+5ar5xbZx<y+
z*<$s^`68K6qh9*^JlC$>Nsed_(Wj>|?%{5YB$YWamBJ21@J1pqR1_PgxIjX3hUgm9
zTNJs{6=!U6TBvVtUk#D#rQ0I8oT1B*ICT-CMS!Q5fKA<a$B2{w@J76}lA)tvS7*Ti
z*x1?{4?h7tOUM}r!h*vavvBY(0}V0rv(0du=ePw_IWOM4iJT~imxu1iP_VCk<M{jI
ztaepQ6kagP?lF|-e>Wx^KOxIEH#gN39NLftv9}d@M#wpFg<ldX5F^U%CMOYQHp16(
zC8g1GbwlFgQ_No82S0B_j1IQH?`WUuJyW~taIR)Y+2HWzESzFQK7#f|1I;t1pddRE
zV9Kj(C82$_Y-p45eoa-a+^oi8Jj0fuy$iVyCp#a*)Jg+b*uisEfoMiyYkdjD@&oTF
z2Q)NLLfpW&V1YcC`_Wym@Z$K(vqepw5^~0xnlG16jWr<w@6eM|hvl@z{J<6j;mHvd
z`q9!*P*w009oUQOz;<16+}j&AGel`k!HQJ+!#`5{65cG4MtAVUN-LlLyBcCDEr^)l
zyN*C!S7|j%Rv6!rk)taP^%s1B;!&B1vC;VfLUJnn8`LYBZNdT$ipUJ^nHLUhG+uxr
z+6G_XbyL$TC=HwQt#)Ijv;O<yhYsp2AZ%?Z04!=fLWyi#T-X7)5O*F72jq`i<KN)N
zl~)bLn%}DdaC{l($Qn>Ls2o=TVwqvPyp02|Vq0|gVfT?}R8{3r!OA>cFD^a+qu%7t
zpF<lZ`-`YW6_n`FxPmtydBXb0;f?KCn}~?Mk?x(`oOC}wSL|)0PBr$Zu3Smmz1x!c
zq9hb1-edfJ`rjc%PKP1i>&Fl?4u-=OTSeFZ=acQV@~mVJdH1?3TxR=)Ga~_#H@?Wp
z$=z$IEqmP82`vi;=wAG~$EUikO%(7%6`bH@J6y%bB4NaR76!y_U3tE}xjWWm*VZaa
zNwMJ>Q(6uV;*f{^)4A!ezQS(MI9G6KrEdG4^W_?xPg{qMR-c}b+g2R6X6_4=?P$?L
z0RP>1cM=X4PlN>;G-i+LQVR$5P)v#&Wt%W9)8U6=A`sY;AKJ1REa$@&`Fz@wZ8E@K
zI~KD>@!=~)L`0<B1~lUnsDysJiT`=MI`k$*J#TdLYOJ5WDeb-$av(tc#M0m~_MR(F
z%F;HhEEw7X?HShV-G$~`?i?(Y*mR7j&T&Gz0#756Di9SD8||Kpi~1vejo?GM%hxGG
zxX~x4^`fpSKF0S%#a3vxpoe<!vT@mNs`!iw(L4k-y<Owj$(A*sy*5(Y&SR;*5?O;a
z5jKp^<GIOGRIaf*RVl7lb4;P~>V&J4lHJ<kR3&d?t83FR@PDpoF*vE6#ITqsH9V9O
zZqiU~p;Q4;l#R7gttECOTK385=@9IOfx$s|DUZI;9_KgK@H}^yvY6=|=Bwbzv?rlu
z-v?ib!m&uyebC<iVGHAws5maLZG-0u54T4{QmnRu#S>kRmZC#|WUHEJLesRCV_7rk
zO+FvB{dn7amwTLAce<9{?G{JPgl;g7+W=Y+uLn-yV`0e~xhvQ=R>KnBCR3Sanrr2o
zN#$^$jIl*CL<G<%kRfn}HjtCKg56HRaGd|`nd7xRs?NPke&;XvPUp1iL1PRT`ij-7
zpXx66hncMct{23!L9xjtGLu*3?LIv{l6PXqh_jHEmZ}B+m1|3~OjJ#8+^_NNDN$wo
za`p21VnAb}z@dpfZ<uOIB?>s*>uq~kpEIPMeW|o%kd9sAmg<)F0mapBho7yPX`%vN
zMW5ZPq<F;BxAVTeU}uS(>!@*YBA15yZlDB+gGIfvB?;Ip_#<LBzQeGapW}?XOQ1IH
zqV4f8krtuW07-38oVNDf$fvRyo>eg{hIg5F7IRaG=7BPR+_yyNhftq_myIT264#y`
zGOL%4l|8v;GLzoiL=vP9i`=I6p_KUeP=n-MBs762-Jdx3gk!%?39J!y$H``#ZSq*u
zGxM7HQ>I;Kea>^~6!RnRL;VfQzx@LYx{5f^;g7&OiwH#95KrB;kC&X888j0N-Ex%%
zZ3!2D&+y2|atBR_EFy8c5NAw0j36*a189Hac_@ODh&M8As%XbY;(9m4(bF$AEvmhE
zaS5cdCff?+pPirG0ij?du-3Sb6tZMwT|V-|{DYeP)ol|=t)~kqoGRG6#TIOZ=5Hv=
zr4VaK*M54@kWjKa)hz7KIU2yMKwwUE7k%Y$F2hkS1bzTGqU0H?6BYRgxVwdegE+IM
zSq!#p)ANRRC^0_XHIb-c`dOI0y;pc1Idbb@E{lBJ)vFhfwZkJ+V0J(<HZ(Glkro&d
z0%yW%nkyKvpWnKuRq*mHhQOlI56N9fS()<P2hwth<k7IpMeR{Cwc+Zu35`n^m3FoO
z{XXR0m+Tq9;{0WQwu#r@pBwoFis9k>KA=VJ$LA%NLrE$NIcp*DQA1iAIXu_z$uuW@
zD&E|V;?!z*Vkh7u?RIE-t6z1UYIWKqw|;7<7GV{~MW><RN07tspbt1+5VS}n!9vJ1
z;hM9}iNt3@B#EP8o|(oQU3Qa$U*D>ZG%g-lv-i$317kG=h|*qky`GOTRH;B!ax`9j
z^ZZjykqZgD9~x>*1ow-1K8V6yA%Ep~oYoHKyxffk)S?A<Gf^+}-J?T8UqGFA)cdp!
z&I(wtz>TI>%sRxI5$mE;*5NIl8a%sxQuQMA^qV>~wY5pGS;bac74IJmIPSKZ+2q5&
zH~=b)X)M*|sMcha?zX1llegf}*SJ!Dc0DuF^L*-W=#+Vp2sDY!n;B@hb%kJ!2ymwU
zRv|rT(FCwP^mU@35+J8%+U?+if(Ob<pTVQ7D8K*>lj0_oS}0btvyPAB^c(J1D&C_>
z<F_2LZIk)*??Q&w0*3rO2>2jJ8OkhhsRLMaxAHdou~=6GGH2DfT*6HWs?6xFfk4r-
zXH`8tlmJ{6dr+YCLkB7-m@4DKmj4KpE0JvHmp=E14o3bZWURrsPk!EeS#2Gh&02AM
zUE|g)ceogMs`EsK&HxnT;N}j&wMPWbD0HfbeK9d{ISpd`Cn}>p-rl#tRl0h3U|9>`
z*SpqyJi422>z4IGaa6sHy*;Dq(>dv9k2R$O&>W;8kN`~<HbMJh0z88dKw6ah@d8HJ
zV{U$PVY;ECgA3@P7SQaLhxYqmt>sivQL)!1A<M+mD0Edvb#aTkcucLTrjW>EN3C=G
zHUF<AXjwvJvuL~#k0#{Z<#Rrm>c2wHn!EOMeIdL0&b37}!xxX+amh$yvIbfbU|~#=
z+VlXVA3L@zf$F0IJa}{7;q7Q!OuP*&M6O=%3NrsFkZpc%4SD*IA){hM(n%=D>{H!E
zoXAQoc=(W%u&D1s@n-B=(zHMqbVN69*}S=#!tDy-jx7SUkyDV<`qer<=Nd;@_7lnH
z1pX%>g~?+gTtO&r|7Z47L%Iom1wg$h@F3|p3jXE;dz=z*D$eR~BKlhds6lHH8Wp9D
zPec1`Wd{0*Q1DJpb!Q&4PVL4)3G6gP{kE#9DTNqZ>HJ$Qj-(GDTJ7iazt^HKKvE-g
z02Hi&>D1!F-xI6shynCpg@y!x3-n8`+`P#G-7xv=L$9xX{yP`8pS6k}u3KUpglFZp
zzZ`=oKUoaeTEyK3o|7apk>C!<poDK;^*+eSnMS3h6+m8q+jc1$8%Ol->gwtot5?7D
zzAY2%%|(75HO~c8QHtY8Ph=#DK|I0-@DULE8~F5>#59Iu8cR8OV$w@XllUe9hXtc<
zR!wiQhKBc_(e=KiVtJLF6VLurwzwp|Whtj*n@FDODWSNq->YW6586051cS8i#x6o0
z>fuPkZvo@B_hBa>VZ7K$NO|HS3V6Ts=UeU?3L*~lyMV}nOQ9mg^nK1d#d1d$%rygX
z@T4K^k61ri%6vVM?n68oXhcqh^=>+ZWEC+n2GmiNI4X#X7+oYq9&eHRCzjs+ZF0!@
z@Ev8AOCl=2`B>r|CF=zj5OQ|5l2j~0wL${o7a}f+g-qNgu0KETNApQcVYtPjp#`q;
zbd2Ccpv6{!;>&K0=hr^z7c&6G9)#3AYgp4rgrj!48mxSvN&3NmRd_n4y~Q_5+D)wk
z6;_2s)pBR=s&Bo|Q&GW+5)ZbiHDgrYvR)gL*T|%9DLiSGcjPG>i6(7KQN9d6GeUh@
z;9-cDv1cK)EppUtn{TPjbUt5LU)SrR{SI9Jm3MXQL~wyT4r1iDwX*}<H1TLBbv6pF
zRbo!~Cn7__JrQZ+h5Ch5^Kjzzx6-pA#MC?L&Oo@4*v%mje9o-vlz7m|YMYp>=971B
z@YIp3#0!MfllV<MzP??7jffhwMhCY}XU@Upw4a|WtMBksONZ?2wJC)d>I1>Ki54tT
zo^lB;E}pd8As)#mR|%YgY%A>f0X%qQH}KHmqDIyhO%<U}O;=5lP$!b_O#H0mqJb->
zOP19TTp8&R5wc)C*;jFD&yEB3o5|^{?Bm+%!5Pk=>~4tc9JI|O{045(BJ;m~PxxKD
zkv&Ec?!Wi^@wuguAG*5w0nWdKTY{6HzX7h7*SOiwsB9UWSbQ$p&8qiVbQ%mY4|qS6
zK1Yh9!gBL)U;0h=cz*6K-j1%QZoU-GW>8}CL|}mLS##JgR!x&I_(+B)aXIAS4!=n}
z=-i32(Z<H+9R;P$N%kc+@$w=U+8kwphT?ugZla`~8cT^@+#j~V3{*ZOMjl;RPR4W8
zInYa`hcDD|ty!~@=G0xZZO_$%jd&2XK-)|txq!U4;LmQ{wd=k0m|>E_Rk)(F3ksU8
zPs_+%?pq#0SCXB8&dLVvCch9{S&9fVVPL@8D0=_*RO=mPvV!pA5nV^91audD5)Z>q
z;(rKzEtLK%ZtuQ*8XzPeAbky?y{@fG?Yfa}L;v~OGaj@0WgU2UZeQ9r;^ih!7-??W
zxig{dauaY~9^xUM9g}bTYaNl;7u`kgF*R;$u|<?JdA_)y=+t>X5{VveZ};FmlQ3D}
znicS)^F`KJZ&HQqAe^+Hux%kQ5`Gy~EZ6U_sC@Z)_tndneF%&skt;CZkO(O3#tP&@
z7fzSeVpsz)ibBfU2Qd;cwA1=<UUjC9I!OTkAS;3b_r`WS>i+U=+7Z2nZ~r*ap2fKZ
z61xfMh#Kf=vT(B}73LdfXAyD>R>@YULQTz|SSDBW#B(Q;6gguP&NS85-o_Q(iUiP~
z(<tr_;Q7>oMT6}XQUv&*arEAupxU$cUr$L{sF{^qwaN@f{4!dyf*W<Q&7WuRWv0E;
zEnKwixn|Liep5K{2pS_u%{}n{f;ij{LLFkUy6fS~kVZQ*e;P{RnSsk&iR))>ws(<<
zhVLwlFcDxq!KS&@2ph=mP5M<b8~76HD1gs81_p?z)D8p<cHvj|8FzQ6!QLF6g=mD4
zqIvoF+$iqhsNVwB!*beIz1toW9|dTxh~lS_s>}$8rytY|;3er%X#C9ob9S0Jvas-M
z&@_`Yt5DidV^@;zioB7dPn|6ZSO4fYx=T8mVT#f=uMZh698E4QTa#m+Ja|AyBidB7
zO5`vFoYxHD+o_LK>7U4H<}lq1b(D25ryz@-Qy9hKwxdS#OBby--cVoOFTj2%|ERLT
z*Erb|tqe3GdAan|a+f+)KGvGO9G4{Rg_+fPM%d8_$&M7+0NhgC5Kq+128tn0f-f?-
zZKG^W=y&B&t95wonOF;F+7whDpNy+Wq~;_2MR{vkKi(IHX<$(pVYaAak{(lVX+uzi
zq)J2RJ<_o4P(JXWF5;w>fG{flP}kJ70s4jh+l!vSHBeBHDHlJZ6Cq|hJK7oq!71Lo
z={@u72?={OHL-4qT~c6|hu7fwPv22)FHuF7!zX{rz#dAVaL9<dw8b-xHMhBQ4>ERG
z3{|qY95sIx0_h2oT*4oXjp1smK`}|L24Z0ZCGyV`S#tKIUh*z37`}tG)4lB!@m~mN
zKu3e1s0ydTehZ5Tn2GGr;6f8D0(0mp;-Ms-QUAH3g+o*gv`8f94xC{{MFlY#og44|
zQ#1Q<2v7$})1mdWHv=z@BzD^24?ndb%&)`&!|44Wj#Fj-H=}u^84#fX%A?B3$bJ8h
zd7nPF^M>7e(*c#h!>PV#Es5nHcJ5!le0lmwT+~PZ+0*zd)Dn>6LyG*`ftuUhxj!U5
zyRv5-F~^-&Bpe4gz+eUyrum~L|BIBo(Dis(z7-@-8AZ=Sbg4HT&t-+5HWz32zGyIx
zJwZNg`NH3mto#ZbxaaP@T?m@FuwP=^c^t?j4jG}^hy;iT4YM6;9ChN2`cgb1LU9+`
zyc_!*{2OM<H00zV%~lgi0C+WNB*csOF(H3aLqgXKTL|YitV{@;a5|A7WuPW1Tj*&x
zmY_BbL1hoUM`dk^N9G&>Uh%R>j&t5hu><yLI^(w88S*rGX})wE>5-%chpP|#I&}+`
z`QH^fdcXccmWe<FRZ`J`LhuWz;-SN@gBJ?aT{>io6GnBXm1+hpOY1#(KK(!*r?8C7
zN*b|wBIR?EfBzfYGzeQE*}T&qec7Kc{LS@;pVA~q;+pU14fh9;AFE$Npn?)#8_jGp
z$_O-yl%6y<Ph<yd+0ww%$<pXOKP}*ov<HJ|#bZuu_RooTy??)?x#PP-(I5mcI0kJ{
zK*uY3O5vqeb?w!VvxH#DywE9#1Omh28NdBF8*+=ZwO13II5!$SASzwR2U3*0FCvU!
z01kiocWP_pdeEixQIMS?o0pqFw2*T_htoh>{FPen=tjSEKCs&ZqzXsq3gki1<0^Xq
z$wSL&@m1ltH;JWzK*Zz+10zM|`CiI>brsJ|bQEB05#cK@I?abRsTBVe1-L6%Ou}v8
z^8=uKiSXjCyHu8Y3DOn;qO=s)Xz(j}{mUIgc?7jGce#dLL=t|Oi6jx%{qKjRB_$<I
zA`)y6h1xPUwi@|fKYBRXh_D7(CVn9s6p#!^bn(sbl0t^bO0Wl5BS}vJ@h%by%QcQU
z(eP?j<>f@F8!ZC|l&YV9cezthbng5vm}8Keu7d~~d_3X=gp%nN7BdO_zbiKoWRTl2
zfVV^t5eQzQZ`~3?)zgQI3<_9H^ch3$`%Px8BLW?-J&TNtbR6y$91tG1Q%QND;OS?6
z<OmQwUQ+F%ra&EbWb<L_Ap;6pSnx$jrUkl}NdfT@2-{teu0m)Da3>_O6-n}8haWlg
zqbz%eXaMv(uZR(oL#x(`23sW&k%b|_p67`To9;r5$eOX1b)K$ES2Vw{9%D(+&8Ymf
zwOqAg;<=sCxgG0vdoRJn0j!jz&?Qk?IH<I!v@7QKv_Viny&W7Hx{QEXXw@Cx=J68C
z28n<_(Uu8|s5DK}m}@0R=WAu|Vw%uQr47>o69xnsw>VWXS-;(^Wbu%JJKv%L^(&&v
z$UG1*0LV7EjV_HCRuRV?0HPB5$drlCR^g$p)6}>n3Fm}CoaD$vMrr}yLnViJ&}e)g
zB4y69dalyjFVnDGGp4&*c4UFUP$+2ixh%T+ZU`?@eE<!f8)wjtyvV8wBX&#qdEbWA
z-GUIDBNkT;6*3SQBJ3pYi-7x2&^Vv&8yuuZo*L5GUQ|_4^vG!)^h>yfdz;iwK=vUd
zB5R9`iMVg|JZi1HGl43Mvsa_dCNB!aT?in!+jt+Ra)LOOTEsj7)q@X+o{+Bl#GLYi
zX~&g{*JfI{&tBZw8xY6s`dy@NY`!(5<G$ruh)jqnnMT7}ok0gc0rQ6kwe7C!nKJ~O
zdjKVhOzcLC`&a=94b~Q()jIek|L)e;*Z(yuD7G?eVL~!i*Gld2&k6L_MEC_W4w08)
zrV2D_Xw}p+jOm&WzRoktUx%2lW*Dy_G+Ti_PXjk4DLuNLUa=_>xrx%@HKi=F#IdFF
zmcE0?DmGVX$Ha^341P1hlY`L!<X*I91SE8wOuYsRHTp?wFn_520xYCLaKmF4Jb+(l
zeh}i=U!tR}`K>RL09TO5g{wm{mZht!OGw1A(0u-SGX6$v?EUtKh#1<uc{2%?{hy1r
znyO~RW6e5nmkrwqvk=$`EYr~ud(XipBHYnGacAP0>1lbd7{f`1-yFjh57l(U0xr=P
z_{zgHds5H`C(0Z+F2agn)BJpz+u?*ZllY_U*SVZH5r%#LaCr>t0j)IMaodD56PHvR
zFN*txRNQkZ_x@xFiiRRS0D?4ttF1!wuzP;Lu6Rs8;$4C+UE-uFZFts&i5Gu(|6W91
zp4<GM=_L1~zqbW@g(r!e2>RX{lxTIU`FvF#?8`95O1Ht4(;*;I)y|W)cO37n*{Kk~
z_L^@$if?{{p__D{dg7PN@A;3}0pk1TzlZbFDLl%?X`K!Gh1C5(rf4sKs-fd!f+N41
z{d7kT7mV8MxQ5@qN{Ejq7Z#7KqtU^1JDy=VO~kP(`@B3OCKsT~=r^~m`Oe<rRi%33
zRfv3RhE||+-Kx8$xujdr(5jPe*c2*RyiaT;F1|^Da5vu>4(fBy+IK~qGoQF$v_0xJ
zEG#H^f#$q_VBklqeGnKG!lc9CbMkd4Z?HErmO6TsLr0FRRUyG_i)VgsZoZ!^RgtQ=
zG(zA=zc@-TY$V~hdMk<3o8sypIJSsrXw^$MoD3B&rZ2hnh{=#x^BVvDdhx-9{`7|>
z+)|W@lU*m_#{d@~!I5->z$bPu-5OI%Oz>%F*n*>yY~tfM4!Y+bxEs}X*WS60sr+S}
zgnA3~N2@Qdq1*02L-`pPLciEkXT=G=lq6u0+#)<hQE6!o@PLfBHnI18`xb%r@m=%P
z?W|KQn~T>TQW|)`LtVCE;oLH{oMj2yI)>q<&M}pHv_kVp-t7+m8r<iL^8|E&O!OD>
zn}Uq988~ga9@<MIKwiWr*ygCZ?ALBOOLuOc?*$%rs+bL~gpZzYtJ0W0s0JdI=(&;S
zNrL6#q+F74FB6C#cma=yaj;F@UX$9ME~<+cSe0MMck@x5knKr-*e;!cGM7|AI0&H)
zpMO8}W~E}4$!^XSTj_9Y-^pN}wP+3Ta{O7V3}^)fZ2&N9+?R)cIPlP|v~+K3XxMzf
z(#Wy(DjRnZk{>SLKg!5z<aHVsyw^ywC*dJUAO^hQ^V&7iw$%C(SPWoZzR=GX8A|i{
zDShb7#iScu59mL)Gaq`A*Q)O*%SSFEz>QUi2%N;uxQ()!iS?QdkH*T5q-RjY(0a6a
zn^?Xj`E8-^JVS+sxL%A=`;aFa%QExxfb|<b%bgcSobJY%VFC*wz;PG!69EKBGgf{|
zqDnegJZETpU;u`gV5n-!HM}#t<CpH9`7Xc&lVb!w^UG>FL|{g&v&?ot>P;lHyJ+YJ
zasFU8$+_NQt#VZ}skM}fOUJ2vu{>)R{q`9)x*~T*9vKFs+zT-q&aP*aB!OH!2$iw0
zwB$r$A}-LiI}Czt6e!W0oum-3Yu@Q;7ju&Z0j;u*w-g_jTnjr-9vPGd#37V~*-h)u
z1#|MYtoKB*ap%sRNr7P3;$k{Q&n0`JmqbNz`TGFYL6&>YbJr4JG$2D<Iq=;qR(3pQ
zrIB!V)DwOo!l&ryVtvffYmo(t04a(fe<-iEyIQkWnQ*s6b}K6>F<4zxjX=<NAg&>d
zBw@tW2!1Q#b(mDKykj#Bb__WFcj#TpOF_5wjt-nU>CCvLEL3s8gFUl}mt1M@-sx0`
z#uI<c_TGs9ANSZqkn*H%B=;A%4#A~t`~k@n;aTi>f?d+|#{Y35M+dzUI0A+E__=p%
zl3XHg&_1ScuhM4`$P!6i!0xI<4O!sSk;83ZV9TRjs_CiLfi4#zv=<XSo7C6ru%_3x
zX7EhDlR;M%OxKxD|LokTX^qK04Y<c=BLM%TNQDT#>;k<#PcY?@wzinQvNY>vpcw7N
z5wFXMY*I(tBj0Y{e&MrY-}=I~dOPUlNn1qT+}gGE*bF=y_ltW1dqon_Wn8&B>MQkZ
zSUii?mVe)J#A9uY`4j9qnqVr6JPLAHEYKBPBqi>vXu;!L0=zY$VKT+?PVLL&T|N#e
zQV}xOt}qySsXKp8xGzXCeDpOmo7tqqU+^H?brn<%WX(Wq8-QFcDE+@gSK9H^D1<pT
z3eW((MSwfF6e^V6myl|)SA8X4+U@?ez~%}b(Mui<+lFWujbK(1AwjA)tW918)nJ=I
zo$I30cm<;J@}1I*FDX`&rsPLM<HM$-FUt`%1{41}JWEtU_F>63P8`T6MhGu0z$VjS
zZk~?5L3!~KRkU|J7KrsEnSTkqjYq{#0Y5z>Kd4Ds?~PF1(D=|01LOo14bN6B`|v=3
ze%qA^qe-;9B;|$F9OO1YvCa%hBgWTNOY6z4`jn(N`TTMY_*7zs0XTa9XjwRnwCOfg
zUj)?l31$1(Zr#j~<8`Ws5Qu&=t~eElfF4yrp1B@tRp^bt(9j8?x_$RdMafVhDGP#K
z03#1a-ivB(hq_dp2Dqigg?VxaAslc{S2tj^?d&s3pIu;yr1u3cK(7QK0$Udd8nv=^
zuh5?PZLC$x_eXSJ4i9ezO+Y446&3NI-9s&g5@-=(q7}4Y#UmX??(PBa3+YDpIlO=>
zfm77uWG6LWXdFgTrU1nY6A>I~tri!ip9`NqY{gpDE`gR1*P9NtZ(rMnGhjF(wWlfv
zQC*e)ju09eqPdi1<SG@P_w5>*Z`!SDEp#Ojf$?2WP6s6}BKJlbPXGlUJFentqye+?
zC{;<@iiZoQ;Nr!*{hrAyh62YXneVaJ180x&GaIC-g+=G+2yCXp2XwY}n_}Mqqm4#y
zK|!0;kHP@^oAp4j#5F}4fq&5M`iwQ|Zrz$6F5hrad3xxWbv27>I^AP)%QP!c(tnQM
z@^tBW|99{e9d{gyTyBeF0pw5w7luRx3CMZfUmbXAvQW9uzW@k<Ft@+0vuMJwW-Y#J
z7w3#DjNUD|vyj*Ih&r2?Io!xtSsB3MU*|P9vkg<tmSq@+5r`76r1I~bO6dq|R1MD%
z&WE%A{3xRaN`RwMnNk%i8HBUGMYgoO81H$po!gI&rupJynd;&KSGgA-z-V%Kp-3YA
zE~-W@(c?Td@nMqqM0gebsKJX%VNa*P0q=9)ePLDeKmzBktImN74=-&E^=Ol&U%G@^
zv>aD~aU~i@A&Isp*j4lGOLQXtev(vi{V^Hr$?^ho?erECU-wScOA?y5H<c_*)bu>N
zu$((!QR<D~_q}{VTiHK%vg5lIj*TF$uQHH19upUU0*63ZMpZ$_ACMnl!()e=kc|eL
zBxYt*F6TDOovj==Xi<GYDM)b6LmozZm^_9zaz`>1`EnmBpNnNLMjuDUaG`A=lDR{W
zYaV&tD6Ll<w&TcVtMbx@YE9f|K%EbsWIgV-=52xY0ojh80w*r)*~{F<`(n#^y?rT!
zU@QTz*&1@Xr-TbDpYV+kX%N*3TiU-bYP+hf9*uxCM#Rnl>$%<GM9Q-o`V`Dg`Hm1w
zLM%a<hOXCZDT!yeV&}pRZz+Ha1x3ZGuC70kW69NrXDX0i91y|Um+0b={$nKesNgcz
z_E<jDf3q3sv(PLP7>i&_cst+GUO&>1V<oYjv}QnDDWW#<Uqo}>%QUq)72?*dM$8ZC
zD+nd083;0M5KEW<!)PH1evNA!F6y%8;N1bav=l4WjWM+-zliLfhyj3|?E$nzdbM~e
zF_E68)OUd+&{}2q>$TsR`in<|MdQ8>_arA@IMEisyFH+@-8T%J+p~%C%lMe!thjJ7
zZ*P##4#r^83ZkzBkRbxu3a#(!S3*r#BZ{fu$&=xo>cRatvSJi(tv!puEX?HL!cz+l
z=n}31WS_I5^PotNi!RCNx&L|0b>aq8jxf3q$f==UKuR!OtZooOGtkCB&n$v$JQCBh
zZ(kL@gBzL;H0(m#x39uUO7PiTrEPR4RgPTJxTfJXt8ID9`%I5VgvI^Y?Lw+?&9gtf
zy)6?KMMUzBmhmA#PYvAQgY(qvIsDqEe9te{d;)Ix4nbD1w%+P++C~zQ6J!`^1#W}d
zq)5P_hhw3V(64|-QbZkiqORy<+_}??yKN6TGf^5DID-J#G{_YXz@!U;@6xn4WDZ1)
zt`BuK(p{Gg3(2#~mwX=6XW-bWxa#~oCAVK!xA=X1V`C;v9RHJ1U(rtA=G7oat}dA6
zjB)8Nqsg6qfBm}ddS(B)nV?q=$B!Stq+l|?mSzM~#ola_Rv^1xp-DDHqB%qNJDf&e
zUlo-_&g-@MPf)GvKr!S2z)ccCMZdk^S5*hADfh;yIe=oKYtOXb_ZMyOMtS+^$fn4s
zC~@5FIcQ#!RnC|2rO8PFK*mecz|Sy&(B|QjCz;5c-*skKulNJjoDgPJVul$s$hr_2
z-2sPboZG>#-6-9mAxmoHi)6S9VdBqWfUquFW()%6Ub~jq9Pq%Xy|D9<sbl}_jc(@m
zZa}EI7dHdu2+p*&te<ki3%_F`S^MF`vxW(E?~g`S83hGb@Qz;h_NHP;BHTXFAeVZ|
zd^-cKZrA$-WnwgP2{4%q?n?3mi0h|UxXcta-+7D$1y!I0^cPL*?$NtP)=|nnnxHbR
z+oZYfv^bL3Kj4St_mh#l_=u|o1zje~F<ey*L-T-qk@3aPpYH}Zvw!d2u!>n*9x3J>
z#m7N9LB@vhyv6_*7*g9)r)R#~{T+%ieEsreAQ^%NHxaQP15WyH$nUD4|BN~maq2)c
z3Ei*R>C|_i8cEy&L|9~CG<3Dd!~>K*Q0fxAjX_O(ddi>g_5X_r0QtFldTxL;;5`y1
zdizak@hq}QMnkM_Hi&yT*s48sE}9}Az#;q&8aZMtz){z~B@<K&5zqJc@24CjB}f)u
zJkPK`LtHlkV)fjMlkoc`kRmbR%Br5*(TcOB7GDERaMQ}UePZI`#J-6_;d%mFzir0>
z`!XjVol~ij2{+ZGiQ0`w<1w`uc2>p{h34fNGDVOgko6?z@-YdsU;eO#cs3n<&7g|v
zZ3BhX0|abP@v*!QhG;`P@ALKEQ`U1faW<@bIchC4j0u}ZrcIJTBjr{7?YK0u0sWZZ
zz&LuBZ)P=?5~^+y-1h)s(h=4PDQci&Z}UDgU07N%Cg-1Z1(FJAHnaP^49Bs?#0`>k
zFdP#TE~#2ig$@ynKn1UkHI@i-IP(5F(mbZf09tIj6Rj^l?_iI0&U6CON+$&13QtaZ
zd)e#I<rB5%F(891e2Acj6|XyCRDq}y2sE-Uk;sKFy1gmN>CKxrztTPI%>I@DRXV*o
z)qv-Fzg+PmFn+u!>I9k(zu<~Qyf-o+{5+<NfR{AFP>WlzZ5o5=t8dtYVYZV3=7pOi
zBqYvQ-nOX<COf7C>eAlZ+q9#e4yAx(!O1BLlV`a)M<DhZ8J~<Ng;8IzO3$ys`56pd
z!)LrnaQ8f&`Boe7+_4SF_%McIk<e}HO4m;#IukitN$(!2ieNr2tK6w~M%SMS9WXYA
zjmQ`W+dIBVcOU;P&(U#WS)#s#c4M1%VBC;Dw3p=Jqt?3+J<d1+l=PUhvjK>Me#PtV
z8>OT&aVqd3R>gmM7z*~!U{($~lW}B^Pg0#ye-XjC>1SzN$M@_g|J*N#G|YsR#0Zzn
zudlODPj!i8mF$gB_<pS$jK3WQsmXrU!Glh3#|^zim6nA7%XVCapil)!&;W=?I0Um=
z$4(ZS1C{4`ldwI+=l8e0cqSRZ#&|X_VDVf9OL^<-8;C8an6{Elcpi6wIDFpsy&Ozj
z8cOUK2rsaPFHHl`B5$ZY=inwXFb};~D=;mEVu>8n++S_x*el>NB*er5p_Vu`Sss7$
z_WKxLbfO*E2SR|f9-DlE=Z9tF*sMw=YNy}cy*L+xd*JiErytY6AL~+uWf<^|AC0x3
z1$s>uJz98F)DDx+*W=?c%nxQ$rju<PDf#iA;cLMegzVa?!_+7Jam|wDHvn*%bfhyM
zeVdORiLl2OXc13f>;rlFG)Ew-2fuvPH$HFx_rsS)RxgK~O=K{rKkuONSnWLamDp8E
zN8!kp$()x+d)Jndk{TKtv&@bz#f6@U2ANmnrP&uCuH^D+Q>n{+{I(dWg$bDM_0Ky5
z>(ITaa4}Djo~9a(kw&Et6IgL4z_i(q??xaiosf&mgyi(Wbs5~*8}#ChG9Auis#W37
zrqCmW8>W#Lx*s$QrEqfbY#3Jb3Wi(8seC{uC9k_mlEE`c)8BW&N0foi4+y6y4fL@n
zwjL<tqxamF2^XjCJQKW5$;)Wi|DNeSEM*+*2S1fdN@6cVS&|J!p^MV#aGOvsopIDC
zJIcTbAyl{IC7FDOvG-WMrsKbVQ?PPF6||m)d_E15S%IJmcVmqq6qL{+<jn8>_GoFO
z(zBM({1x&|i>xjmOZTGQyo->5W@tXLv7_yW)Aq&8BL7n#wLiHvFcjVcxsCI7$T!E%
zfqW*0+<xuPJY7oMKh>R_J)dHlyB=O-%n-Xp(OI3YW*!@<UUr~x1Lo9h#aG0~%v`*M
zw*mImW%Aq#;f))^XeRaVU4^3=QWuXtN0cELI4#YRlL!jl(GD@#{Cr03d9XnqP~$*!
zo%$N*B{YHy`qoH}#zM*>F$d@2R||ud;BTFODmO1LF-ooGrnrw6beeOi|GD$uCjgRe
z;CZKfbhjgr@+@2$OfH=q*xSrB0cD@-k5oXMoPgen9D4ZOWVmI8Sgvssl^?g|h&Ip=
zA(0-X$8c#Qa*}+g`7TYezL!A1qK1kz!%FMdTqpE7N4q8-L9B6hg8FDF7}5-mi;N4b
zt*Ozb)}7MN?s!?6#NUwzB_8Z=IpfZBG$MR3Gz`JjyLJ9~7)F=UKnkQjQo+>^LCkV|
zY?3NZGB4q6QTW=#vG6OO65hHM6SpVn@%h06vjPgQ90b$+ixNbn4<S-5o>Tq;U;>El
z3#uE_Q69pd5;_<}QMSig3*xu}+QtZo#}6m-JB*G4#>&CVTkGk_YIvuk=Qb3D&^hEh
zJ3>R~)~nmb+0A&*NB>eJgNR13!Q<yikT@I_g!c!});~TTg;}m>(=R+y$)(5?UdNU+
zMjQ(N09SarUHqa@C(~X)A_Ex#8JKePZ)SFj<HvVz@#uiwliBS#0~KbM?%WY0{tmQI
zBy1J@%LDuRH87h@3h-5>I~k5+TVT9S*0?jF!Lj3Rqpd{TjA`Mt)w+yc?mc_<P>p|K
ziy{&GD5;1hmuMZ9v9s^NG!0VD<Bm?pEYC*^n?w<d16BdW5{-s(oJQj;`>_+6Ia&au
znFRkrO$4}SZ^A1K+7`gZIDfhq3JWb{b3#dbWcW9jm4h>9d5jUBLdH829kJUWRYQnP
zB-UNVcfltYl8^{}YWO-r+GO^z9`1El=nxFvOX=%^E3kKdj+w~hy<xBSV~2931WO3)
zkGp=IN4(jL%u7bkfU)9lPVCNrl!8n<La1y%_9=#?Dyz&*Y7h{D2w$LVX~r?`6qZ4?
z*ZT+KvQ@=gV$=vkWQcx_zB%%<rKP3tRh+>72B>a@MDh+jSVE>dz>|U^4QkaeqK$-J
zmK-i1_TBm`8PO(;#Lh3^W;c?I1#%U?pdS!Qj0u<yk%l<bZb~C11~e#1ya%33l1nMn
ziv^et$ttjGwYzuWqNhV(Ude_(W_gFm^~_}T<m_+*FG6juEvO%kUZ+f4Td0h6a9*Yt
zp6oPr`ZItk;paZ#^(&c)wH=-73O?U#9AejSgKan+*I8fpi{#T_3+$Yjg^ODjb^Cmi
zE7XKUnSuNFJ~@h<kQJJ|n@S<YH}qX3U%&@o1ljW>@f`1wh<|Q^b;Y=vCk?^52%k2>
zq5;Qr6>GI@abfoFCH13kCm=b>#)io+<v5b8RgS24M|7Hbr=$-EccgUZqw%19#!Kkv
zDZGXD2&3ew46Nnw38SA!&o(n1F|<^EcR^6^;H0ra9;m`3=VOxjW|4I%^FHTJ%f`W`
z0wLJ49~K=qT{TmT3ex}bXk6te8_rq1hY<FDzq^iOHs{6M+>>WLx9X4(3uHT^(ZS4^
zd@cYY=<{rm^n@bn1yTnvPB$$&mC8=Se8N1d=j(`u0q_yvB*;I--q}BkST^}R%J|SB
zoptN}eS4J>?j*?m!ku%z{BgR9%T@IMYggG80QjVK{qg&^)Peb{ms{89y{Y=JR}a5U
zG(pdy`C+lGiII^izETF*9Ta65Fbe&vqN%fAT#ZM1;*}`=+bI8n0t3-q#?7!rZj&ip
z?}?jjqI8N4gBs$xb{uUZs<Kn3Pw&i)KAN1eTjl<LH9dp3!0nRt8#WNzKM1^Y{K5ye
zc<n=d5S;;0h^+gNj#{ef%UC>!SoBNSr7%QP#ENhK*LYB*^B`FVa$jbO5Mqk5H68nT
z-YxLko2YE5QgKg_Gz<_ys0Z(xq@6JnMmZfQv0t~hUssv`d7xaQzdbioxaj>RAqFVr
ztlLajCs1K)YWn{ECY+$BjW#=*^Q4!kh)5RBHCbRkeBXWcA-simjPa$JrFFS4H7$iA
zQtrCYHN~srOLD`)m|--p$l9ZT*Fh$h@&Vx7|GcrY#ZlG+I{`DJTLcB=;lm9N4ee8W
z%yKz0GJ_O!Wo2dM?asjY3;FlFmd+QAA2PU;m;>c=8qRgTo@*r#F#kmaxDJ$rcT@e%
z)3v|lA1FC-mqPl~MMc@6gSzr(QMQ{}(WjjLVhiU;ZE`57Q`?agKvMjn(wP*9QiGy)
zKMaOMYv}*y6T-kb^N!aGEOUFNtoSjCGsbD_pILDNuK>XfW*3Y?OHUBuBBa9f+8or~
zzn_!bFGR_W<BYKFu9T~EuMP7JOtHh0C#N(yRW@zfXVrR+=pYOY^MA+Vej%gI;ptk5
z+5!M^6(ENYU_8W-hc0-m!9Fl-!ZWUOC#IVrME(k-IHc80%g(=TntK8p5M`Y{P;D|!
zwe#JdUqAjr1xPBSp+2fl6(K9_z5Y-z5r;d5>tYDb^B<ZS#%^osr#AVaWJ7tY>H3OO
zrs3<dUB#<LhjYkXkEKP-z&Al?VC_u_yxqsOdNnyziQrExu6`5+S2|1p<y9+7QMg~i
zy3K};!vmL8oXYT@6~^dS(2!C`|IF0sAPXnMcrOHD_NG%PZFuw8gR`X|6VHI+&JoVt
zp@9K)xW|zGt%<go`(@}1UJWiP!K=7_Nj-{F`9qd5n`CS_3Ia$su}*GdAcMQ|1lkF4
z>{Em@+P}SuUfE26snGnMU`G8XcOoRE0A-*m(+&Dy%OlLG@4oe*g@qnqxU{2XKIox@
zf#ctVZj1~b?s#$(1oQ#CTD;+CoXP66*Y)+6*Pvx)7gWi+!ShlIlSQEHC$cx(FqyiA
z`(t1FO%3Z`DpQk=;P`I*qtgiG`z4%Aws62AQ~RO`C8>o&f>2z<NI;ejj`m!?xkI=%
zv2ufPIKl8C0;LlY<Q<!D(Gf2Mp73qlrdM<sJ<pq80rwC@_!Z=Oy?b<=9YdD)f7;R3
z+Dhiy0G}Oz%9}*;qyHn5S#T9CX--z;!;})-?Sm8=4B{ojB}uGe5JQ^oWD4e7xqrj-
z@r=872Z3rRoEc^%3<}tJunw4M^y0&ZwGhp?y1A*LNQ9ta1vFs@4FmH}2m(MnB*`0u
zJ{h@$CD0x7v8v;b+3j_=nAVLp@&hZ|L}qrNOHB8sriT5VpV}|{50xVUNw{tb;5Eh6
z2cF05_e?JXQzCaIN|F#%eE|2D(q?eM68SokV~9_igtYr=I7HlN5t|-C3N9I|M8A}c
zB)7EOQllrz6Zm#e{HlQfMbU{h1uFszCcMD$LRBGo7K?+(KdSb#ySabft5>4Pym<T<
z<?8C%H)|heipl_G+XIk8eqbYwBvvO)6JH2!lL1f>O#jAOpuBZGcWxLkO@CsG__Q(Z
zP@-7+NG;B@?DiC58sK27;EdAIlGL-QQwyw_02xWxX<c1Pm)!oCi$>-c)_msub-cCp
z#F`Bo?y~HD%CKG`EQS`Kcg$A)+**Z_n}iUUoSM2660(#K&bG(noow4{=OOl?8D$Tx
z3zAr!X0$l5ShdKz`TosY;9d@7>X88v4sUJ-K~%Iupi;Uc%c)+6;l5<8@)AzzppCjl
z)RHZai!N?bh@Sc!(HP9u_8BLf&C5_uqQHX7LK@?l;8ly~3yJKu>Zh^YVp#u|N_@dk
z|DfCY0dFq4*;RxBw~!(y-o9_FRjbnfMOIvljXfBY5*K;OqUMv#)9hQ7)TE%8Xx9~C
zz7=5i$&6dzwWM07usUD8joQu{9idWe=FslX`byQyjq~bi<GE156`VKs@734Wf-;v#
zU5K3pHx=f74JvL9&Tfg=Rg8eNs>a6ql}(pb8HTnN^WVP6vb%5TkZ3PQKkvT*k)54r
zb$H=LsX-e?s97E!Z}+D9dJVD)=St}*1sa#h*0OvW<GTw22UqC-{Lk@?K7%k}5Ek;>
ziJ*ZZ|GqfZ<7?7>cH!0ckp@@Jvl0wy88TXRJ2q|2O4gma81GeU$#qY^y4v~K4f8yo
zhR4fqrex_UXAIkq6+I~y`+T@t&TNJJPLVHZMTwzV_te}TT<u@U+TiC;QK56Cq9C*X
z{P6dSesyMed8+qU!L2HO!?O#19s=W#v;Sh7=69dDAn5GVd*PDTfJ@P-HseG0OiA_+
zP-ct{03zp99vQ%q=H@Xk0ZjG8SmyqxC*_|Z<~C|Rc$zExK$7iNU*0;SEp;B7%+4id
z-npZ5;2Ou9>-Ue3Q!ALKEx(nF=5^DvmwvAaxbQ2lGfnfUNKdAE5xZqmcYR$Q#QkT=
z%12m_LwUS>>-dnt`_>!ob<Cm>1|F4y)cckwHCA_nmN!OPT8tR0V2$$&6<!EH6g!y&
z3(FfE18eX-RwNV)ViWMs^wGcIjN;JdsXuJ6Ot?{kdUQC(`G9bD%>6ifgrI*B82{+i
z$)srXY}HU~PtKMgk3^RKa<T5O&0z+4$JVPoPSVf6^TA}<^xd!a3HbBcp1OYrn^+wj
zP4-X4B_`Ixi@Ve3rvx@iOs1G{_x1Z#J<d<-mr}o6KXql|$+TV<3tZdw>tRIT?ATTO
z-k;Q-&h4HdRl-Y!IxCzCWiWgVjDE|p<B2T7Nrs*MQroAGkk9vXUuT>7vmX3o;uZ3b
z`kl2St_sJ7{a5V#U=n&mt+W4r^+P?bgQc%8^N$p1A1pS2`4DKx2DI(~Tai~KxqB|K
zd81{>vo(TQ?+YH>D0HSuQ#=wWmX?Q~cXdZ<i|Wcb_nuuEwa>12l#0dIZjl-Ndx95G
z{HZ53W6WoEcIy}Q{vgkz#eH-aB5X_f1jggo$6TK#JR|?|VX>KQN6ec3ap!uO*3*)K
zw1nc$+T<Jd<Y~Dj1o{f(WHa5QiuOIt<`~c&fAF?h)jaSS?{+Hpkd}PO36t30|Nh{X
zo)BLIqNUeJ+YPhNU10NQ&XOF{A%Dlg<I&~=`}eEiUPA3cQVPg_U@f?E{W`HE;?yUa
z{gA2(v;fR(Z0F9_Mv9Qf=v;k^@584yJwH?=gySmu={Ng{N&Hz)RF8B1JhdyPHn7^Z
z;?$03IqljX0Axds1G{%D!WK^Gu2i?!BVR9rJ0MTlI3F3ab#=O@c6>Nk;VjLAV?Xr+
z6Z!7K&T-C_Jt%}Q*<x^X)VnPo(-OsGWF9i-wi@8fN83i+Q!ouhLDMMv*YA*Up#P|N
zHysW}O6%;|U-}atPVGSVce6m`G^8aQNY%8l5yo<PNOqM$3ClEwa9+W2IMDAl*cA50
zVLJwMo*2k+MuM8%w+~Mzf~U#TYcVQe@eDw3-|PC@StcPA0{H2LPhny!4NFZwS%NAG
z1Z1D$^JODcp65E0b35v0zEgu?k&)|RF~#jVUNp#dw_P}}S+FN)76@nYNQ$pC4{K-7
zS`|O9vR5Cfr4|}{3nj*O;qC_s!tt!95gH!cwM<fH6Mui{xD0VJLM}x<EL$f0Mc=dE
zS(YyWU-)c@$Gfp}Xr8_*9~BOIe7DT>QwKtT?a{%Q9Hb7~JpJ$o?2k9Pr!EX4B+rvi
zU%si>DjOdd47m}m`q}rW=IZgyzAC?^F*V1e<Q>a!_;t|K6s!|FPH@kTmDgGvUpmUl
zPz!!YE0oyN$1KdJ=egap=x4<!Y9S}V%SgaK1`KZnUp?>kX#l(y+}~VB4^gUEF#&Y+
zV()@DBGc+!mqOX=aN5h%%xpV!UrdqdXN!Q<PGLgb$DddAFIBw24zI1RZ~LYk#IYsX
zxk_19_Bz02RwjDw^s6WcI(%6b&q=xOf8_eK>lFCAPd7r&eHc5JXS!*XxL(nx?X&93
z+%}bTDSXq$jJ`MHJ=8PiuWPk~To^}o8g9(EZnQ*oV}^;_iB6r}b2LY-O*rakOI+J6
zE-*M5H(gu54_d-<r+tBI-4ysC^n8d43st79w~Mf5eOe;B$#RK4o>#BM6O(R!eY{Km
zt;XSZcHgqPR?Hsv8Pwmft0ne+Acwkq38(rp<<Dvow|R;du2S`DYa1HwK%yp}9GR#m
ze%+WSQXB6o0lL2?r72cC{1rJzmSKC_7kwyjyK?E<@!flz?oTBxy(KWVtayOs;nJ3h
z_6w>@n1yIoekb-Fp}lWr+A0-vmvP_NJJY<?#yb1o#O_(wJEeSgr#{=xxfkpbtIKCv
zpAGv&yz;uXfteEOEAbgSrwA$XCp=oSv8~kUTf3(kwkys5Y-`OE3v`ry$~?XKp^iwH
z^0$84Qi;1sjUJg>&yOBB^D9$+VMej+28-ltu}8m`)^kR=CJjogW}lMQ;HlKn{vl>)
zTS@GZO|VJ^?7J}TssPpbF--Ts9I!A*ni1sx^(R$=_qgW~8-B&iwXJ)FF6hZHi(PT8
z<TXBX_V%Aso=5byZM;(uS2h<z{SaKx@O{6^Un(hZoY~}{DPoSR!E%O$hudOZVd%-*
z4*TW89ywh>&sMDc(A8-E_F;(f%wQ8H7+zqGux422{DXILXXAt9u~Q5F+e3PH>OJcp
zqe9p5zRBD1s3`H7rDY0-`p8ZFq9YF;UW@f>cy*{i`esY%i{+Xe7h2w=zH1P<T$uOC
za4u}6Zm3AOyw|0rUN1T_n1wy+KJ`0S*ZsIzf8mmuS5TaHf`Ny-{b7UsYK%v^>SS)X
zU5nA=tX5y6`+B*s?8rk`Gl?CK5{er#F0wehwV*uR?{2C)Va&!ac14xnEon$>dwhk+
z8n>lCu19dFuazRtK2$jP6915Q%7&;Saqqf}Vz)kPfv;Yd<R@QTivANXap%O|Wi4L%
z<?&@#BY)mzk2P@*)$pskyG7<5T?CK!1@6}(ms=tnR>t7R$kbU_-sGr!BiGeXc1_nd
zw0}8OE_CD2KxkD=qM$?1%G1Xc+Y+L7FMP9JI_;L4yKkj>QMNeq^ks{o&bgbcGI&E_
zFXq{@oh6HhWTM=Lu2l8Tteqd7_;M|f!};}SzyF2htPav^Hh(*|fn9j1QfEezyZAP&
z8=IwyJatB9abkh4+YMGsTg?QyR@RC}&wq;>rjmWKGH-6w6HYj0taE#VvZduUj><@d
zE!|w<%zD;>vkEWhO4@pOUD8kPIHkF=min_bWM^Le@x>sH#{S25?cY915xe%TW(V_c
zrXQ7;PoJuaNEp@I)3W(ySP+Nvi;evY%Dq!QZ+?jx?i$>}w;6~rs`iI4$(hBuAMyKc
zFS?fcb2I;NlaE=PXrSvVgTpue2s|I8EmdfAEB*e~XNGAO<I)d-MS}K(8JfM_P*ZuF
z-TU<Pjz@o%Q~j*RCo5icd|D=abLX3?d8Lt2ZTYta!H#K#{N}6Bh`^#<WLY#4W5`{?
zVW{+cWct3;o@Ff!C(1WJ3YKG7`P${ry|<Lc$uvL^_oZ&DU0fz|)6w)nT}Q?;;qHLp
z{_>dQ2>U#b)Lj(}h|cMC3*LTTN{jq`pC4K(e~w+LcvhPGsemStK|UZZtN1q#JK$e^
z<M8Z@zXtHGRe-!|k5xK6azh9N2EkV#6f+E{APr+xq0qR5-1_&0)X7)A6J;U)Zs6V@
zDL2-QxHZJ%nRtiA$qF1<c6+<7MsJP_7fWoedm+E+#*FrGu1(il98FLI=x)rovH5Ts
z?brL0`5nHSReR9v;NRt$<{}U6An@ayveeYne2_5*;W@ruZM$E0BNrcE4m#xPRsBZz
z2M_`jxSg-1cMJD)q<@h+xJW(XNV9M8I4qv($I9bq=({C^(HiuyM26GxDgd`keL#I9
z`8)v$0HvLR2k>qA8Xlh4a2}Fzy!&o!rh8%g!1}=<VgzCfxdd;@qjU2Mrqhq#USOMh
zX=uQUuOxUg*mq-w*WlRoU%zcAw9?l_;F6nJWY-QsUTv1!)<xH%bIkVmRWzLOOD4gm
zAmH!_iUl$uMPkdA>dwxmjlSmF?P|h1HrnCH(X6+kKbMot5+u2Qy*XR%b>q5z*Q_R$
zSv~0avLUs#gq~Tu45`o<*_89q&aL`k0|KA8ZUK~~^8en%C=JFe;Pmf1!P4AA9kTis
z0wvqg>$-eRxe=V(FR5LvV1Eili{q{{>>|qolzu$sUqi$7kJtwVuSp>YvrhGvCqsKQ
z(BD$j6u7midF!IKYZnBBbZDHHTK881SPUecCc<~sF)s><s#!k-?@Q7s8e{xkO*NCL
zl(#=#=gw1W)I$?n9*1RLWSRSZoN-gAuy(Eu9nlODi9d{V8xY(AZ3Vh?Ol?0|wxqnY
zGzp68W<;%{4%mbFK;UhO8VTOwHn^8)4vk5*V0w{`<bvb>4hRU0h8zIqh=H0Q%qYn1
zk}(kc$j3HgPA!jH^vq(?Hy+%K+7j5z{YKGbkIYIzKOG^pdYdj#FHOqR{HQa{%GPmk
zyZ{2k3prh~9j=#PNNyeh#i0>$03zUUw8)71mO|^N^s`wbUQfKiVLQOciVw4`znPgX
zT>!cR88WhuC~@ev@&J%S<JOJ!|D+(uF>PboGOMIVnOg&!m%gbwkFz$&OreT*SL^<m
zZL!AaLa)Lz2u@?Ug?T1w5h9QU!G4tcua9h?@Z&Tt#jepW#(^Afl7<CxViNQe-+$*6
zJh5XKvCHtp*g%6`n4h30K3+cSoZ>|dFauoJ#>0Ac!R>C@vHP*we6#Of@edc3tQHPj
z+pN@G_I>#5(UDau!Xf<nHFKcK>4=>g+pjPm#)Io1bx1VVb>p)OqaS;er(-?)aG+xs
zGi`fKPNwdF15f7eW!~K5z1pR;tI1z4o=@&07K#^C7EmXPQy_#tmiZ#nN)NdXAV90@
z>-VMCCEjJ93HZ~nA=zWyu+6{2k={VjVM5{6=sv6AZP8D?Kk&M=h}^kaW6d<B{bqe2
z$J46prN{D*D>qFkojhFck=Z%FVnDL%*pxzaTk3+?wa}0!v!%Ey8?sMuYOwB}q~AIv
zqcS7=L7DoPn11SEn9#-h9-sD=U$^E~Z@=8O%8v6g3XNJl@o1%m*Mi9y)G@&k!c#3#
z(j0W`+6n!W|GP?d3eB3=AF6S`u|3*b@AxkJ>vGi}N;33M*)G%gyl40ChuqZMzx!l`
z+}<Mk(z;x+YdS0U26AdM@#_D2>wG)jHsup_ICEQMoL89GHAjuLW^wBST@UE58K>Ft
zXfW|IaB5P2@uOufFMaYbcHK{0M>9(og;Kl@+i|PQ>c#{szOd}BUiTY^`^iHZo+VR^
zR(&s8+B1qQ#1mNUC1T=VN(Z|o4Qg`qe|Z?lv8#EgpUt}R<)%{gHHS4hD$Xdqs#5Bv
zY}FTf=Mvo_(l;n?%#1fN?s~y=H_3W^pkso?%JDeHm9Kr?p1HV6sZvzWL56*jCOEZi
z&F1@?_0OF9hD%zMM@{grYt$LPYO^~rjp<5XzWl&18f+KO{JX1qqe>tBlLAJ?bK){r
zH|p64(XTBFx{#YRu-a;p@#|qjsp7y@jk#2*uGm!`>7o)P;XnhqT-WD$fh!jpQvXCW
zDb(#>!hCGA9q%gzCYdXy3L}#zwn(f_ICf2QrPX(?S33<j)HGMUnNl)bCVXFN|FV`P
zvh}XdTQ?ef=d!x0x@6DJaCQ}?8aq6POT2o9J|(i>xWZ38{KLIy%kKNG{^*a|yP^Vr
zRy?O{lm>ZU(4Nk}(OA}>a`SwV)hIQWze9DpAVlyFr=ijxuIFj&=Ra$09UoLt$90q}
zM<q>e@D~Z;e^@IXqZjqo!Zh!3rX1`^8oYHJ!Oe*uc`RqI26Ln_p23$f-O<J0uXFmf
z!<7m~DOz9HhGbL+yH={w)EHV)em}mw!jp2}z{H9@t1@H#DvE0pE#tbe!Oo!7#+sar
ztFIWU9N)QbV+IZXJNplzvXl2!e)e;&t5|;CMR{|etDv*MxV&ve<><{#A^iA0p=s}*
z*Gr_w+wv>S1X*R>TCyth{r0_Y;=fmf1l-J39%B{>^(~I$Q2(qgKRzJ!M;Z-d-smo=
z?jR2JqM4nZ!hsx(!u?Wse`5ds{}A0AW^ye<JNR~yF8Qi#)vxHTUK;*yua<%4eRb=L
F{|CSBH?#l%

delta 41969
zcmZU*c|2Ba)IEL|BBGQcQ-#nVlu(2+XO<))QlXNNS&jztOiD5pGBr?S3MFHMS;;(;
zGS7tHI?wxlKfmeu<LQ&!=RW7U_O<s~Yp;EBzft+sQDs^w5(aveOSBW?WA}I(p3>1Z
zu$;`i<R_%0Eo!Q&(0EWsLF0L<-=Tdb1pi<`l3zW`b0r<~Pi1`kU$(OpUfLrbO_S`m
zo9p-Asnyl)x93<Qy-viu6Nw+{(K+ups5aSQRul6io><+PmX^jQ<1Xp3^jlyPw=`R!
z(|E;?S4NZNtQSp*rc@m|I=YV;Z;I);xVVbVb5A7gG4WWu5`FagjxNz?zDJjSbGiDA
zvjv5J9bn<&(s=V>ie8m(Yr~Hp{FI<#D=Vw|?@uM-TlEzcX+~SJ?(=KLs4T@EufLoX
zW#ZgJL(jzYqN++||Gs@XZmYe$myX{a-a<o5+gSHZUX>?IJ4KU-wVdj&p7|<VqJ8B`
za_jQtxHqdqU%I;Dv$M0ST3VvkQ&IgMY0l7ce|hSew#Uo&@5Ohrv+J6g?p~R$m6hhR
zDHMB=x0&ns!$*gVzc>Yo4|Eo}@kLZuRa7XQ|F%nLthKdO%&H+tAkOK}4<XmJmHFzX
zria-%IaZ|)HfFm_Uv(uWx=&^hOq;o-6Yw034>oR7F*17k?c2BE`<rDF+H!4jT_4Q}
zo;VSE`S8wPt*zk(2l<7Ch3PkMPMMmT;y)oPCH3@_ynLF|cqg^zhD~g8&z?Qg)7JiI
zJM~`6+~4huNs;C3NV9vTWdsX74cGD8zWf@|O~aNl{QTa&zPh@)f`o`jDEX+Zt<g!k
z`;t>qreju#v91!W!%qSWoW?aTWkvm4S=o2&*pqPsYt1WH_9`kUgg$=E>Zic-_U&7q
zG`-XN_U#k8Hs6q}#=^#?5-a7zk8PZqpHCScwoHHZ>g<`RIYo2-!pO+&Uf$kQT#4T^
zghL%29l!LB{r#(MKS+Gr<EL<{$aVJlMUO=<++>$d{OLoF8-ux|QY~uZ_`*~yJ3qKQ
zeEhi5>0r>~s3;ZPw1-$76`nBiE^ss28X7@{%lr24f1a5sGV=4yu64UvSy{uYFMs%V
zHMJVM!@<T@H~9VOVsQP<rXN2V>Jyc#8yh1Oc$m){8aC7ru~Ia~O}hjhf3;ek8@+tx
zisFkGFUXw{r`OYQamlxP$mAIl6Jyz&e)i#`M?dzjTr9Zlw?o)Oou8k-Da%}0Jo?;S
zC6@WtR>N==zHEzH?&8F3j{N-m{y%@z3=IvlZ~xZSJQV!?5n|8q?d6S#*Rc5bT^!{S
zCs$bfii?Yh-Tv1U6&3HN?dIU%2tS?tFgp5+8n;FQb9qsA_CreDr~9<cU3rgL>nHn0
zY|h<}J!bFy^3|*AnwntEc$x1`oH@rFMp($e2nu4v;s=jQ-a8}9)sSm*alIT?mFJ1N
z67I%b`TFjC`~0xOPo6$KAl$2jb#rhy+Eq_YDDwKdIXa53OV}o$nOb7&X*q5(o15{>
zV{OH0q$zFN;^JaX^>zmQF*Eg5IO6r|Lo16z@$Qw(50-Qa@HdB%Ce>r7SqmzK|2mGg
zM&cWc_RU^o*J-!uD%PL~9y+AC5@fH}K3~V}pKOqm`!)HMQEs>b&oeIu*6<65hNK84
zi+;OfG#fUMKb)AjRopl6e6=(`A|k?W<0S)ws0z)JE0-?|vaa_nzr-$ngU!p!>!^$B
zRih;4@|CH9uhiX7B$~uHl*@}Y&pr$JZH{k<>>}1;`L=Gn^Dk@g^3AmFZqtT_hUZkY
zOcPwmXQe%+L&L(D@qG;gUmq_9zaYf5mO0;C$S-xu)=$ATo6^oudm^%%x+Im$@9+gB
z%@3czMZH>D3>xa|$=jadN^ZvIbyGBBhfBQ}+?9LOuU>s&x3ZR)IP>=H;o?Nw{Ra*N
z;J6+;eq4Ov!!8;3DqQ;+bNPwGhr=nirVsEcmv@Bxv)u4q+pTbf(7$j&or#I5zppP*
z=PPHkB4WQK%e=a}TGc#-kNwQqvx>A#oVy8vKzv=cvQkC--|Q$TCvR6<a2U7yh1KrP
zojcK@=H5yd&RK2@l6=|M^Vw&Mp8|W%v4`3co88)<D+Xwrm;_t5y+7f$HYa|XCv3O0
zH0R#EdksD~6QW(VhJP!9IOv#|lyYx&h`3B$X4$dh()?J5ik6m-I{*EMh;3cWMzID5
zD<*nAGw;~p{qp5O_u1xid5)v(ABu{41_!-EL$?fw##z&My*Z>YiM)7bs3tbwDkd>(
zS!#x<NJTik$k^ey(-<d`#!_F1>_Tv9;mY!SgbLsDX=12$noIG@4MykbrZ-y~8XLEj
z8G1N0`u47l=5~cAC-0RAQdZt}#Ui8V*XG~yBg!jHov)8Ga)<|-xa+hC%e}HH^JNyZ
z>sKi@7d!9nF0GoZdstNT857s>&^K>{ksea7B%eWek9HJ3{_*mBV#Bn*zrXYE+`Xqy
zpB9vszTiTzzJB-aNcH8zUj`EnW%CCKj-g;3!+(=p@_VmaG9HUK(j^)<^7q#}YPnH~
zX$3{a`i059$mNC)3%vm|y#c}vp6jTpYHLHpty}q8tX=*Hw=*pEUbD3oMWpJit*&gO
z%>Mlqfp8e;?@xUdQdXwmIuz^la8y=HLxb^^F;V!)n~_6ltKWkNk0VEpSbcpY)IUD1
zCGWk-%5hh1L}+N;{^g<U4Y;pe9t(SBN828cv+ehLE@-Hju@F3|w{G3K{;{zd)wU;3
zo=|%tVZU~dKlLm@cI{sTCnslBb+un_U*FSa@t=j-uFlSN_h=X-M-z`3x>c589r#(D
zo)ZNpECYjr(vpu9)s9FA327v&3r%Ln3Xiwve|G5_dCpvZr~bL3*rhVMV)N{scKI=Z
zfq}zMZHjwdhNP?reEaf+M@>!5?)GiH#SIZsPGfb5k>>4LIv4NEZr{nl!Ek9n`{c=W
zSc<U1LaBAT&z(Dm%=|DuzH9$VhM|hv++X5XZ|~{{52K7c*%4%EzR1&QYL5>wzL!c(
zPrug8ncm<#z?__%T-VsxC#Ce%c4f(dW&8F|ZEc0k%VVuM2V|E29!W|{>i+Yl<mS}$
z;Uh<S{`?7OZ@<9QxX!#H@T8g9o~>KAN{$X$jns0*id(ZE6cWmEU$ooAb)3c4)>cJD
zh49qd-yC(nw~gMy!lLT(;r2qO%|4rVd1qybb~^Vl8W<X0TK%_}=dp6Ui<u)C2?A&9
zWnM;ozWtD?r6nC6$Yd%0xWoRkOQO%8Kac(wgHyIgRFwJGuU`h4rt2(zwRdzpd-Epb
zokgwP4`p*?mMvSit``sxaCUR+tBI8&S~AV3$e-e1t*$KRUW=<}|KPIiOp2zL-v08y
zokv8l^cM;pNBu$y9scb2_wS$SjT@V^w6r*-9Mu;MfO<}yIwj&Xc3~5z<Q5YXlY2tJ
zAvjnQQ&XoD6sVE;c1NCO=Hg=9ym>SE8?q}i+rE7HvJku|>%KNp$I?eshNR?nc|09w
zv-Z~8pS+H0!>v}@xamLdaP*!;MWyCwIQ(wQ6~433;}bJKoo%oD#<;M;rAuRS;b$A7
z3qj`Vn_5s1y@`>r`ulf{Mdy`lb#3h}C~5T}va2cu1qFON0wOKaHc(S*>*|J29qa7u
zyliH6vb?-pYjpnn6JlX**pU{<WH%KR75%17{a1G#+-^0wSlXaRZu`%lL)dnd%*|)c
zoH03KX=QbiZ|h`qj}<ULXncI_`DJpUb8m8$e*PTx^eKaljm_WAA`a9t1s^6Z$v?Tt
zI}Tm&eQjL0K9!?9Iy$-+nNG{SJtf|Q9^XfiyFy&q`{>~Ldv8is*~dRjGl|=D1_Fo4
zPR2wCXqEo_`M6EXlj>3y8&(!eT6&*$^A>LIU?HRT-MB>Q$GcRbHl5Pc)YKDp_Y;&@
zoHSFed?>oBC9_T`sQ1O4b^C>dl~E>~|CAIIup_7761_s7-`yT---(Bd$tiIfCrsRd
zgDIZn0jzxyS~7R@^N(4qq<wvR<*T-#q5pXCG7}9A4bH;73x|l_z7Yl~ih7*%PPIn|
zPhPsjbI9Za8G)UzEpB};EnWZhv9M2Wt^^>f?PRYaf`-w&GFTCr+~`Dz(zpD_j~?0O
z&VCHzmD6OBux5CO5T|D#7+n6y^7C&vdh{sUk*k|*cE%+qBzQJ<-MDw}-j-dvHmB+N
z16Gt)RBZ6@@My`k*#(GZvGTqNg|K^UjER<(_T`2Ax2jaH4;dFwC^xLFu~#7g_~OTf
zh5Bm?_aV<_-dT1;v?pFH@etg<|DHO(ysPVRVK1(_&|VXLLjQ#=tDc(ny`UD~8<(fK
z1~y*3dNt<FahK@y^!@HL^~%G*!0+bltZpQw-UO0~(K5pS_nkBS+n&EIGBPqs=FaiI
zfB$ab<P2<T($ieF34U_K;`xgg8)Rf;C?ooF6bkc+*<(9*?xavIUc8u>mnSHkDzhI4
zltbcXzu?haqPMqKgjjVqFf!s55!q5vQQ;dLj3AM~xrr3M`Jvmbp=2L&p5VcQfln)r
zwHoo?KTJn`T;8Io(@~AY!jB&9urP{$6dB1(dHe3&^@-KIJV|!ZOVkeIow3M0dN+U8
z*N5G9a5#1%CO;|Z0f5QM(w{dmvy)<|@qCfQ>FUzbd-HvL?91HjY;0R7$ji^M;^7E{
zbqPQnr?s>M0BF}IT=2PQWTeKxdML?nn}Dvhwa{5Tz0jhf6YCO=*$>tsmHb9&<;MXS
zplME7`76D|*5#DJx?_i~fdSj>@5Yo*E3~Jio}T0NFANT5LY4}`<Ln<J79VQu))w1n
zV^qH+rDA^RQd8$#+L;u~nwVo>MroCT*cA~&du3&1T?Zo1S@u-~efQAW$i&2ttgp#n
zbNjZKlLf69b64B!i=?DF0F8%TpB?`E&_U9-ZqEzbv}scnz&3798N}rB<;w~<nMof$
zNaHDaupjJ4iEAu0)YRlB$%`}27390k9|GD8Lv%X)Zty})PzE`2zFMN7ASx;<hU2zg
z4#1ja>(+*JpFrti>&&}%?{?=h{_yQRv?IU@(cUgrqhl+aVz>S-Q>cj6;_qMY1cXdJ
zM2)lEn;t71*99k92!7UNvbl8&y@m`iAwOY1hUiay^~wti)1{;LoQlhLkJ7O(i@^=4
z=igoJd3@31<k_>^yL5JM^^0466ch8!CHTvi(|=oY7;H*b#E0%2jm*h83ginuwFRjb
zXX3+rr(5RRx269Ur)>wmY%k`0e=MYP?+ptJ+9&GXqUIIdUm_3W-Tu9GJrzN9w3j`h
zJHYME<nNX&-da^X-g|V%>rT?_U-+M|f0mR)E#>&PTZ%q)Orpx7I6p7%R-Ad;duzc1
z2R@@dNRF$Wo&A%>%fk~I7Z<!)_TLsP)9}wX@l#_}C}dMyXA-Ymy~@MKM?E(;$0l);
z6U8Rewohrmxb)7_Ob;RT{F?J}>@z>mNjeJG3D<{DnIv0lV|}Y0s{fW+3XXW++|%LI
zwfDe*1N()92Bhf6xn<l{9zA-LeYtvTabjwcN3M#oamR*2#lsrY3u7V1g^qhc3Uh6`
z2Bi!GHWugoy!cP*&XO3`^+!-a{kfU#fK&Sg1bh%0f|8O^#JJ!GqpV9bB&)u8^H7&g
zbaHB{Z)s_1DhLF%=Y<OwRQMyT{tkW4NlM}aKe~MLX3g>!zSnfcc_~RrTPOw2lRUWg
z`)K_}o!(&yrlT7f7<jV`zIE6rv3CQhkpyyj@VmRRPQX(AhvH(7r_wU7^Wz*j_dHg+
zzR0&g^u~cyQmEfO(KVVG$#@fvtpwo!S7=6x*7^C`*s3+#3Lh_=nzFkhN+s7(;BdXM
z{I3=Aok9SsFu%EZpX2N$frVa=#i=Xn<s@u6`=w4lJ!@c~Qsg$zf9~8*eJQ$AbawXk
z)#c@0($dm~?sF-1&06x=UVVMh&pn9fbmT?G7il9un{sLI36EuEWl<+|1c@6OUA@YW
z;ue;YVo|uTIW!_do@&FU-EMAfCN>7wthEX~qr$^Woy2mSTWJR0%bd0e><(B)W}x$|
zh*x;M5gb90q!oYvwqIR+Cou@UrGI!>B{(?P`qEhidl3Byb$(tWBf@Avu^hZBGDNn&
z4WN9VnAk(t`LS0Y>IKEb^jw^-yY0oJElilLmjjA(SGHnBmb3ckx$g4St4hS$ibR)f
z(-`XD<!jeY$Hm1xjEMN`#Cg*BU)>g7US8C5J?_js3L)I#&tARK@b>m5HG;K&)0@FO
z<tNHlKHep$s67koAN)sf+p}LiBctox)$crEDhj}X&3R!L%)fm7YE>P+mv5_|<=o$2
zH>}srG<Dq^@9y4R)i(OeagzqtzSyQqW__LM^q!nvOPeR}O+n?8-n`Mm)82-@Qrsuj
zc9B>i*W<FOsT^Qxzf`bN>aLw3$UpwL?$kma09TIdBV`HQ#)bwG(i(nn7Bw#MC~AIZ
zxizT8L(a&E0~mc=aKDs);sRsIBv<0EtKXvTA}C2%C0z3FFre8)t?b$aS4K5rBk9`-
z=Lu83y?cAUOLX;1JyO~dP_Ck>S%KV2qBXMU`vwJ-`x4B&-T%X&G(&i(N0|C?lmAa1
zh5!T)t6ZA>Q(x$0qq+Rk*}bGdy7rNf5q=y}$Rx>`U;E-$zr0LLc1LS4=dn}R1ahT%
z%lz&Yn=kZNN03Sc#d4ziQ_KzP_D9W-K+?}<sPJvA#~<v{gnkLTBCZtCEEv*ddD+@p
z0|axdvnb=yT-&x!tn=d~YjNrt8u@=bKlgI0-0fF1iOK%{7&xdQbdNk8Wp=FNsrZeS
z-M;elNFc*)HYIx{BqY|$fxkSH_hFjkQtNAKYSNaIqd@sMF&*3i9fY2NA<^u!H#23V
zK9QEFzWny;#iZbn5CNnOXfI9A<Y}j7XRRCmOc>oqNK16N%j@gww>x|^vn3vPCWncI
zN4#Hj1i+!Eqf0<#i9iK*adD|f>XZBZ`}eCydxV67iq}@%a6>#oLPFq_i7wNF^~q|F
z4Ta>5bPWwpZ{t@dKbbL88bySLCLH{h-o86|jFs-i9z1c*^`GJQ?^8K(-7;u@+@Q;T
zN?y{+NDw&2619coX0EQT)SiDwN3B38XBxF+b#B}^82RvFYI3~&P>l|%?W;JEyY1H2
z8!5P*`fRI4%kj>ld*|!_%dwWjb<fDN0&UA$pF<;-&`pbP{oS5#UszJ_t7a&zIWM7l
z_H1Ou=zxh9M{DXy#x7CGWblWdCT@&xissm%E!d8=AB&N8*(#tJ^AWh!AEJY=0?+O7
zPOkUw-)ClJ-34_2_Wirr<;&DzD%3cJ6geFoW?wpB@~ZFO6~TtVlfoW7^3Ki{+d?qU
z%*=$IJ@5BBI5>~^%jUgl=W|=ye7f%)g-)U#|D3UV-5|fikj>t;*4NkHEh&+OAQ%=C
z69^P+`sc^11!PT;o9&XMvI|0YQdf5?qSZ&ZcwxOw@ghHnLG!<zqLke{t`+qI>nG}!
zckMZRc<W=~i-Rxj^C}Y*CnwMQXTF@fw0E^EKF|C5H5;RCTbL1n0SgPxe0+Snd3mX@
z@bafm)1$s7WMpI<9Hf}y*|H2a$;-<}J$dq}p<%1IxH$RbPoHuic`+{E?ASfh!n}LE
z;}~_z$m1RPpl=l+yF$PNdb>Z}AAUZ#M%&!nEP1P&LXe0bkZoak*?g=$pH0Nn({R}=
zjt@BnJds@Hq1yN%DgQ4r<tZ&e9V(}`cIi+LKoJHm6%G#%*ON8dCqSE-{c^eR?z+&2
z)CT9%WG_o?*|zQezB4aALe5y8ANRR?cU@N)+tIE6jyvUFU=kAgRLgzz8lAT()8GWb
zC4cMXs!`y(w{P``sr$5R%WjG^B0hKSJT-@iGBG*%smyO1^Uj^s^llu`yo|Tmn6CHC
zVhb2BsBzl#N%Y7O`O09fr{~)3?bqh#quF=wr*A1Zd#9)M?(CnR<_J)dgn}C({eE}i
z^QJ<9q4zpvt~{pR2L%*tn80VJrzl~=#2Wwl9YX6H8k<=&gLrsUl64On7qA?Dvgyyy
zH$Daj7q)&Bi|EdH!5`B8Bn3BHidu1&xODSSLh@OOYj4gOGk%L><UG-uIdSAPmNx2y
zo4CjFsKh^rhR!RqKNo|`ZmCZQx83OP6hD9d{87SiaI*fY2IDQ_pJb+0gyGwqqbRpg
zlD0gnOMiqpC2x6dpL0AlQ63xA-o}6Z`kz6quT4K>7Cm@&3(~2mTDjzj^GQi@@G1wF
zHD{c&o$6N&<d6s+S5ZH}>~1^c-aj_V7$f<~cv(I1<;$0$PoAVLZQCVH>7T0Dbw+SR
zzn0F;!6rjhRrQlEGr<d5dJozE8OpG`@+HHt?f|JF-g#c-ksq2zUVZuWX&uNz>g(6~
zqki+_vPI56n<yz-R9!^ZlqdUGZ+QUQ@!NtBP{>0mNvcMER#&jd(b7X647pqGZ4pf9
zmh$h$znpxg-96S(n0{iiJ~=cf?G=s80;Q_TlJJmZWaZ)t0w_!X;*oTn5yaWWa!trh
zlp9A3wMT>nno{R3u3U-!{Oi|F%B|%&>*lzq$3GXArEaG#pHX8N?kc(I|AMQv_>L6W
zH~BF2<$$E~#O|5dS@J3h<W-c%de=N$epRigB6kfpBr^aVG|vbyuUqRNd^WVUI(+3;
zd|oh0ckiBORaHS{ZOpm?ry;)F&{r2rxFK9rfQ@E4{Ly!`*Od}jNKdQgb=o*$_pY~7
z5}HY~(wN<`ghZ;Sr&KaBAJv4LgPKy*(+-UN_|fjQUBv6uT`*hTg9kT(Z<R@nfB5Xg
zr$nK?*nZTd-{fNZ^?IVJ;lu__P0jS6_MDu%KASl@Q)ed3Itu0)SV#QbrJ#R$fzb7$
zu$JNUideUb74wR&-?Lz~Q{2v`5X!t>ygl)cN$<{`kLAg)^Ky?M%dts09)uXPW&8GW
zdZ|XwZlAK4koLsHKXdj&lmlb6gZ=#vkoxq_olBHoA!zCc4f3{#96ftcgz?te>Yas!
z1*8aCNsoV$OEbSeJ4rJM9HLNmQ&bgB>hUR=;?|Gt%6D_I{UL@+{YX^aEXYFNUVMk+
z_3PK)ZH9~Hn^;d%@$vJQR#wttu~Fl_UcC~+>0h_?Uv?mWZN8JdgvuVTL$?=$c~tNj
zqOPIg6oktz9Wf=;5fCmKK<yYw+k4v`2h;vrsG1HY$Iz1-<TH=(a5SRjfMDuDhILo2
zrz2Ju1x#Fj@7=^9z6r2j<j9desja-G{{^6(MO<x12iMQnIkiWwo+V{ja|rt6VVs0o
z_8cLPaHl3T|0h7cP3IRDz6)EnfAHXeDH5SGdR19hzU~ljF^g81kljn~b#ME;__xoW
zsnE<5+_%qc$XP}SAq{R&4vt3Z!XH1%VQ<(Kc$LO(BsDnVdiQ-pqoS&uSWc$Bd|5X0
z8#iCc^l!3q*UQW!`?vjfP-_TmN+eh;!nOIx(dpG?2Z6&@dH3$EC-pH1;lBR<_n_or
z0U42#O^l4bj_xVGA?suUEjh2CAU!2-ZAjLkON$Yd#2ct$&ygeBENWstWtx3n2rjb}
z{KV>{q5pvA!EWkbRfAIg2`<jgB>5V#q^s)=csDsoOp={Rbii(nH0^^tFu~<lPp+Q=
zb(nXY<;4d))VypvElX&X#ma$lnORwB7aN0XukUwql9PXYmRr_?^Rv%pA86!g$1tLW
zL#{yZb|<E+QQGLMbqOa0g@tVv70)_3?G_p(+={RJqC3F_ZGseAczAiu2+OLQuI7g1
zN19okju}ewSoc9PguZBT;e&J56-F25Ca&RUC);-3XvtiMO)LN4GMx#yJ3Ks$d$r;v
zL*xmagYfAGDV)RKC=>_`2#%jqy*IBju<f)e`yj>Kv$i_EwuOb|LF9pRy?|%~jy;sd
zF*=WR`TQY7`=0=*Y4`gZ7)!A`hCh5&sAv9eZ2yoJr_CAmb-*C+)}Hs)ZQdGB#mx{E
zh?v&h`t8TsYm2x!@@3;wQUr=u=8PUjMm_`fLHz%DLsOLNcK-a%7aC8!!S%bbXeeSm
zQd=99Hm=Ug)dUniX01zR5YTuR=GMr_E=JJV<^S{B8-{K~`cbhP1|8Mv>!A;ChK>;<
zOK<~mAXs9y{;M*Ma8R<q$zg1H`+msw)-6vjFB;(8dqDHMp<jXODgoJ`BG<jM&;sGE
z`u_nK+ODuD@&CeosC_npcIRdH$}QOh#0XC8B2LejfIujHTm7h^22d#0*4A-n-hTrc
z?C$HM!kPTHGT${^T5sdOX<AUfeWwWvD{CJ**kl1(M}Z>o<r1PZ0FA(b%IK$0r^9u)
zg1JxlpEGfx0VHoJA!OSY!>x|qsEO_ILf;G`EaU_^1>#HTY987Zau+X_L^d8ZhN5!m
z(k1pgH~v4>U#c-3JN@LnDu@VBmdhAJZkp%0VzO0(Q%&LoNIV?U?>$yrzeOKqfJ~&|
z?3`DjB_tZH{6A&%HRR<!G?kqPAJv?S3qN1zD71h7eq#6IucBt9)JR$Pe4%+|p<xLb
z{N=@xxQ>{ziBl7@4l%4w@8hN3=)W;x6c9*B;jU`PxqhyhgK;zM3w0~Aw-#y|8j6;d
zg4eEH)5Ze$`_l(<pI|@pLj5Fuh@PMq8g2H`cg^Cww*PmX<BorQp%UC3uI4D>+e)ZT
z`@6Xu8xn3of5YrP@pZrs&(w=Hg0HXd$Lnv3Dy-HvO-qv*?d8@;x|4LeVPB9?{&bbd
zJ`%pTS~e31_<!)=K|PzgR(8RIFAq#{=GhI<&y95~mnn8fN3$b6zdZkL!vN>(cM%P6
z+1J(fW7|{ZMDE!#<SqpB5YB7!#M*q>%qHfeHZ>is1!J`@4<1dvZVASzRivqX=FI&A
z=U$sa1JD+1(XZV^7}ySNY4b?<O+E%fOd={yN08<FfiZo32f|Lf9bSM3h<oX2Q|9~&
z!Qe@k%Kf)r8;>`O_9wG~5OKEg%|+Rl23e<$?bpW#iKmZ#H_+5ItRL$*b6p1I^2*mo
zC+0soBK*M7P@^((2yY!P2t-C7{oNX08RpQ})YN=ge+@lHSKdpardDZly{ky>eaON=
znz?9`f(%4OMVbD3dqo8h2i>KnR@u*~_pD_<?hzMCELdtjf`FvdneHN+(garfc|rnZ
z`}Xarsi_nSSruQt+(bcek|0~_#0i$l3Lc3=OvjHO7ZDft_VZJHr*%9%HT5I1XuLB4
zm0jD|n9<3}>BDr5RMatheklz<X6pZPM6AYFF=InR_M7c_cV}j9L;m~t{X65&Hy10B
z*h!tZwDc4ac}V*>mEdVjr7@e5qs|26a<ai;Z*Sky+FJVK#}4Q%^5{#N+Sr7c6uEw?
zsv7p2EB-g7);~0KfZ#5_B#O=pnQS%#PeIVK`u*6`tO;u^EIih_Dy-kWmXe)K_ImOl
z6hV&-4Gs0|@4qbVzOaRokdl%@9L&RdPE1UAK6$beI4&~L#@yT-$yH8EYqMNOc=ghX
z1*lU9XQ=Uo3xUvp27F6aYhz+K@ig6_0Sn9HC1H_~$roBJuUw&l7>}X^yiEm26Iy-w
zOF%04JRLoK>9;3GewqCUG=(B|{cZ3t-M*w-o>W~r&V>DV=LrR6<vx%sNn%wRx0s+U
zXliA(kzCS=QU;h~JV{zJJ3_N<Tf~l80c^~T8!!LOClbUXWSgXv6yAONHcn1Xs+>N3
z3J?N>=H-QVH?LmZ0Hesam}A=#m4fcQetk3G?cq(26`ERJ-Ccjl($bggc%XYoxGS4Q
zC!jq68u1yuE%b_lK)kR3BzZyn>K8-L4H{3?;CcMj77`br$@+~OH<I^?WSfwfcpslK
z|J#zqChH;HeoQ7k%uEl*?53%yCq9@Z|A0mvFaZRYY#um{cLt$5Cn+iU@#|LxG*#FL
zS&t71xua*B@m05a%QkWzv!kZn%v}cY3|qb#!ZXRt7AAV!l`pgPusWgDjoid-KlrUj
ziherk!2{}l6Q4J?<XmTkq%3l|Y76=6$cXGm4%_VGA=hJWw9N<V&vwB0TMiD2u<l%~
ziTTxUwZ~gWLcg8Z8Cf-8_qQz<^1%d^X6$hqVA^j_B{nyvX!(%HZPHm$LDFsRpyS_`
zyGUDr@ut9q&hs5kqNX1y#eJfqf!#Kj8u}f^tX0v`eQ(pn1O?>7><^=1=($XslAb`m
zri)Vpqs5t24y^V*j2zy(h=0uBw4Vzc{t&mYrlf>~z8JvbfD}(-aAt4KoISzMQ;J&$
z@7_$_4oP)jognpPtjS#IGRxA^65XavWyr3!!*zR5g3TZ;kedLUT;U}Bg3Gi#c64Y}
z@7x=c@~SXiQHvVRKrX2r5Gc!VUb?&MJRUoW`w$@^n``1u1f-@4lC=cFo<YHFvTL&t
zT#;=?rd@TWW2d&<Ys>rY#1e_@V2*soCV6W&`aozu`NDM5^6S?mmqwxs02{U$jlx}-
z_CuP$Qr!@b2BmhO@+0V{VhTC!0H*v@ppQm$<lEB%I8|;YWdC{M*SLY%PL_DAECe@S
zMG8Ylts6}k63D~Q<%^t{#l^QX$GkjNKKi|F3$Z%AYi7J_7l<aQq9gOAWL<Q7c0^S4
z6Tk}DZ@ST%y^&Wb@FSvXA-Fty&&xb}l?ZVr^pbYL0rfyNf`5;=_>L29b40(CZsEia
zBbz;k4w0=kHneV_7=b!vGvn#4@74t@!;3V5o{f1=saMN;YmT<J?;~f2>#2~cbZ=UP
z!m#7S_kwYYyR8iauhSkW`@v1?*RP*gT^{F!f@)s!Bf16xMP<k?pR}}n`0}Lo_&BrZ
zfbH|cmZnXDgl6=PrEGO2r41%`X3wfeiLAT+^R;!P%&v~$+<kU-c3YU4?}GfqBr24e
z-MA5m#{M>l!X&-!vSnke^e=pTuH(s5&aKyMVN4<$RhhupzUA#Jf_nP;y{HS7u)3LD
zyVlx21c=oGxvKnsfq2c-M!#3D2q7LGo=jrR<KJA%N#0F~^dw;B_h!>osi!)z5IptZ
zh((PlYz%aajAyxBKW&G(>!8v5O%gZT<xrv`;(unG6Ltru;N-tv92mH%rWMKt3#p7E
zE)(Cr4S(km^<LwDm~8nL`3*Z=6D7KI;wIh@D{A?yF4u<%c#O#M?Gd*{7^06GtIdbW
z4Jmj2EqARcX=*azIMrCms<)N<yGcEajSWI+HN`f7`$)p^Fflu;h^9fxioyBwW(`TI
zEk$k;DJvq@kGn&DnU@={kP5``@84$adA2dLI<UZK$t+SsXUNRV^n{(SjXt!O=)FY3
z1E4+xlW$0fGPJrZyMfaKtG`)=P0HNHi+t`<(Uz8#QDa{e;U@r5iC=7)zR~uc(a+CM
zL`<v<riux)-qHu`uYJSXPC^gk5fGp?mMlJ0mVJ`%7KPORV8J`H<Dh{YTnRldSAa{{
z#Vog=^F?T*lgG^5QGDkqko(agWv2i9BEG&jf>pnMJ<)$<nCvc%eo0JhY;|X69K=&U
zU-BQwTD79}R6Gbl61N<c?vnTL=~Fcdl8$-Ie0lbXd-0FVnuH;qK^lw6S;GzX!mWhE
zoi>$wNLg7q=P&=hi0WGee2L%UWiKwuFfcH<PgM)#q5rnJGEPuY@KsPVebM9Ek&}~S
zkZ+f4x4L>hWTPJ{CE3s{a{iia{4y{1^Gdtg>C>jBrXSBQ>*(lEC~j`c8^+I7+I>ar
zP(E%uBZ%ZhO^G?-wu6FPElXBbG_XG<oV>4XNEoL7i;16&s$*kgOLe!Qe5T-vUsu}U
zc<_gGX8&Uqm4HG)NAQ|W;VMn<q|wZup(I8|#&r~u41v*zIQ}&#D=#;L5eTQV&3@Om
za4c{C;9w~{jpKrW8!nZ{n(v`Ic{m&vKu_4@QkEYhIZ-G~+|sFY#QQ(W!y&oGvJO21
z12hRY(^3)>d6{;}1d_S_&||VE?=ohtvG?&;qop+Ib2z6+d?{xv)j2BWlh&p^|1hLe
zn0-d?%eko<us%RT#A$0l`NJnqs#TXEaq`342%{j&&Yh|Q0|U2bhIJZ#{uH3x2Ds<X
zJ*%&;L?NnbYBU#ZP%_@WfByp3s)0(fSxI_V(_g>l=Xs8jM0Qh|na`d(7Xcv=oM|{e
z<fs>J^Y_RI)NI3u_(o2MbOJCm0GD-V?|NZ<iFe+y3JL^jL`+9#C)|K#Wn~}B%V}0u
zSBE<aB@SK4^DMrz3~LLqm}fu4#6n+g-v&zy)4lum*O5{^j=Lgyo)F1>Bs!g`(2*$x
z&<WXoK>y>nZ=}>v_v6O~ik_aH>9;6R039+RDFw3KMMjmL&w4R2KTnFDC=e}yZ!{a>
zzG4L-VD2wBQCd(?;QJP&wW`Lq)ePr?h=MEU92doHem!T7@I5-WEY`-4I;&R=K|H~a
z!yZ0-fEAeJ@^SeFemc>hCftJvcSieUG5E9PZiqlBASOXTlAUumnf(-M@2F|s?6|)D
zRCSWJU?zH$3yX_cr8QztyT`S!<e8b7-3^ghqRTk<rUF*mx)Op|(;vO)TOvZgw-JsG
ze?UWEfe<om%z2%d=IvPvNMM<$9O4bzCR(Qp!JjD$I7}12EaD+IFb>ztI~V#Lj=sNr
zUcaT*I2DK3ajbo#v9U3H8c!k|eycZfNjYw^DVp7Z<L^moBz+>JLJ+LF$BW^@0s#{j
zX*viDR6r(E3g%4vu+GoQnD{qI%dZ|qL9HjL*tn!R$47su%FCgjsJPuLo%vxPijld)
zP7%4$B61<`*4-h6QCeomW)4{SN>KbDxM>yhV<OB9?`?P5Krywp4k8ULhyb$c{=7&C
zMKJ_gpY`w{WXJ-GeeEV|#e5f0``gw>eYw;loNX^GHwsEhz3*ye<Zs;)f!EKruY$4I
zJV%w8nR&P|Wix<=H?B%D98wry({F#W0je@O;SfWD{{3@zhS-7wuh({xW7OEr?(l~X
zA0}F_zajH6Nz%NTxV{ih2VXGagjzL0b%VU(Ln;l8FElc9bCp$8=+OIvNi$6M%o%3r
z7YJD%S=n7FTJaTVfISpY77Z-O>g~if+3e3U2!v3`17`=aPMJmgoegM}x-Skqz7aCX
ze_CC=3;?_5@83XlrFtrN5oKrx0bL6d6I?-7eFnBRKFj$TUtppkooi4!?*Dpc(T8g3
zg?{j0P3+gj!Sm6u;n0)b^;DfivS$ay!whyk!0u99drz3kLU2_!6_AFy?{dDxW9SpG
zX59k3C+~u!9~&6iACM#*X7m2rA<oY-UcD+O&=AbDtmnD?=LaKh`~fh+>U8`X+`1dd
zzdqX0HhSOc1TK1z?Gw5q?v>gHaL!4tMfxwvb;cEW(cK5nV3T$hDP9^mP*zzv%@tt}
zcMK^f!^mTL{dzzcuW~Q?;jny)Vl}f+@kz;Ch_Jcr{K<Q(dH^XQBA&5b|2z@yKn3d|
z{*#s>ShunFqDZrWoX2$gv{>N8N2RO;G$q*rA^>9R`kOazl0JU4yShh4zBs9SuS38l
z^1}d6Vp4QpHiO(kucSqZAT2gnGMzJLsEF&=uali4Gz`gtKzfR{ZQE9yDB<r5WaSU;
z-3=23*($|`Y3u86Ku|inxR6JWM2A4o6;2ntUDh8M3v9($fU~P>DR>J2w&-a!HPQxE
zR#DN5D=Z8?wF!-lWu#<gXFtfuH~>E7Me<3ariKZ2bTp58+@Ii>2%)`A_Qrr^0A^`X
z;#Jhu6%f0mV6!8j9&m@eU9z)<R@r*wZSm$(I_?8wn}*Ox6LFofg!*B)SPZ_4=I<UE
z8E)8o7KY-9OmzG<NSzt^{reMGEgGF7qM~=RvWTpyTF5^itE%YaI)cv45^vJdP9g!{
zFM0g<v3%SUzu+fNd?Qt~wY6bG=Oo>Uq&hWX5_;ptjXK;6X+tG@Jnil6h{+99RQG=@
z>52xnX7+M(G3FIc$3E}K%E=)$XyECe-78nFT=^1qd&LMP1z|AxlekDcbDJL%6cSPd
zdjW@%Q-Bjj5&l(*oSGT~3JAp$`1jPgbF6SBRirE~PE=Qadi(RDGJG_qH*W@`mGB8`
zNVXo|zP*%jK1$qcMa<1*GS<^Y0cEadWMrw#t9tF<5QrS8a(MW9bnTis-oK+_p?8<K
zMeN?flJp{b6Yqr^y`ko(x@wKjpmaZn>FE1Y9&CV$)|=(!0tY$kSFWE$11VX(HKH2G
zv)iPdCn$r1gRnWL%O2ZLpIUN2Oo-7pP|rrvn;8n_&Y)1Z0T~zq22U(b^sKEIt1&DL
za?C!s#I_D3qMi;OG7$-hb1QxXJ<Y8d%Z_-;?#EQ3qH<Ds_a9kx8Q5BgkzT)O2|H+z
z&I_46%h@@1$BWNsiFh-U;$KkR-!Qk^w~a*!Jq^Z8hXdN$330bJ@yfl-d$-bl^b{5s
zJ@d0f?=yLaGom5rInhgh&er*#e=S7@j+78MdR}$o7FJq9%&{)_f{Bw^j>n-mGct!B
z`|${x_!%p!eXvZwY>f!!zB3m+w>*EhWcAwl|M~u7KXxGHSt~q%*am!?Hdj_L*tTo$
zj(1nW)Ss67OYu5G&e0ZRNe^k?%uSu15x<&uYF*56nhPbJZ$1CJ%_H~D;Ve<ow-fVC
zO$9=<+1VpX<3Sz2_V1vosF2ft<a+OLpl4>#XSPq*^=&02Xjf;26Xa<>mL-jihRAw;
zef;PBE)mgn(eKvn{A@{w1tEWJynUxZQ9;jgc=*T*)4!w5QNd;p{7B8Ou)BFz@w2Iz
z<vwX?&(c#z`7W{bU>8P*4mn2>k&zS%7gy|~qcdL)IM{4zWXfbn=QLl=&yDFVkziLH
z$sN7IHkjX16>EODY9Tmb^4gjk?^YbQ#@jdgYv@c?s*Dd_m3_UbQSUFo!0U{kiKOG&
z%+lg^7HrK~YeIqDSz%J2Q&Dlr8%eXyQ8#3tXtTMdQYKPb=`TgJ)f!JUi3{2PlN2#m
z`bs4lxB<M`{D$oOqYUAZrj}<M>Dq!6@tn`u+mM6<M~6D?TGu)vik8l|&5P3<7*m%j
zCD$7UR!YK)-WvUOkx}YpRSRk@>++)akAm8tINsmxq%_fV+|B|;P5_Zb9TL>OQ_0~*
zp9pzFMB1yo`QdfyPo8@#Y~pqQFGOhY;f~6bu^_Y7b%=+GIeVKpNOdeMEW1(tp!85Y
zaTZSjMuV^l3JQ9n{h<H)LbbiFH#0wT&O*(Wo9K@G1?WN{?USN!Jl*L&tpUGU#;0Y}
zXEZyI$xbSyrlo!QNZh5e?J1>+(>)xRnZ=ME*u9BOgu)Y+`e}KD!4(KFJtG6X<DiY(
z_$MBQnmq`?!`uSBtwY(e@KDDL2}*;}KIYGs++-Apiims!`gqmum>>%4FPoU9_JJ3d
z*uIl%q;m4)$<VI@K{EV10z|<OEp`yzR^!l9Etc~c!ePP(I!H*K?Z0;jrF^olGNW_Y
zret094Ij-Hcm9(y4jG`@UcWta2#Pg|Q8P!3>@G=1b;fEor3I+6Y;Cngi%4>`%F2}j
z`Zb`0pD;v`Q^7)?khw)f;+**@Z{|b)yPPs%Di2Uk#3k<%q@a#DoOFptPmOYO1HHW1
zw;c|=g*-#oiYH+NkQ*o&uydJyBsVztW2IUNJN^kzV~NL$lCX)hyAr6vSE?pll;kSE
znY-!#{sm`vcVAD1{sm?MdsLWJva|p5e)*`lzWs6R(tBGrUT2i-C_=<+{p#wn4T>=n
z7;V+puktwgaCxkwz;8tv8SI~(nV~dwY=UEp76SrNL;Z4>Z`K6=y8ao0`>nq%qBDPf
zZXUd`6!c$!XJW*u7hW$o+vVVxxY<=4^zb1ID$zR1kJrWl=w_t1kA3A~u5cQ8AmZdC
zd7L<2id9;)=qkQ*UbM~>N^2>g6=`z*`}fA};ktDw?-i(oU`Nf}_uSpElWn!J=U})Y
zJESB(fO+CVvHMP_3$QqmCJZnWPA)D{ltP$C5{R``2VPryd+<BKc~^<m@!M*M8dOsX
z1x#F!*thSbi3vAIEE0Br>ijtG-n}%Wvjw8ZOU}u*)>cxJf-QprZ0v`|?18qc4^rN~
z-CD9T$3c2Gpa^_~I~gWWvbAZu`p@M?=ZBr}lFCD2BSo(dD|2mRd6#m`xMZ|y2OQ;0
zW%Cg*TA7*J>lAUfuQH?$*wfd~?-TlFd$#&@_w?Ka=ikq81N`J!V&Z3)I5dg=0U!GY
z1jt{wz)9*}sPDrgBbOjdlASz$tvHl@*IRL3F2%5fJ@D}{N36m#Tn6I|GaH*P%plUX
z64S7j|JR6xZ3a928DT>_03`rb$iH*L_~d$8Pc$yRI&FGE1C~j8=+W$}`t~i5qK#YG
zCn)F(Jy8xncUQi#d+%N^%$z7d$32dru5FYNAo4qNzbtM~4{8+m<w#*eSh%@0A+l0?
zLOie{C)ncRI+KRZojqHR=c~eyBl<5JF$Nb7=f6?zP1~55i!bwz#N-oeGMI>Yo0}^@
z=^q(6ouU8gIfzkl;?XCVEdlMWhwq{K%NKu29fn^>XY_wC{pSl~%*Mfet_nXH+9N6;
zo8-hM#DniQ5xV3M3|!GgpXT4be5oE(?R6K)gc#krZQE~n<oF;b9X@(A85HDVBK=Y$
z`ZnaSrbe`wA2`){1A{t%C{2tFV3zF&q=|j_+NHTsHA*T3PK;a%qMW~4!KeI#gZVKo
zMGlApX~2S*1Q45%vr7hNI#uJ;sr4H1GO=hH00Pv{jS_8X&!68LH;AdKs!9Mg^d&9J
zWn~XQDmAcz?v)J&vx&En1wh<Wb{@H^ef#$9jF~$y3eYn$!t0j1A<;!+&EsuvO|}7C
zRzm0B!dsAC_4)ICAZV3m+OPwjMdGf5*{-Uk>yXW!|KlZSi7Id$JuO8z{<W;nKIs-7
z8L5boOB@^ymS)no`{;K)>rSX`1A3wSm&!Z71`IFQj<wqK!2&6dbw;22$kOkW_*M-a
z;sOrP_juWOJQU+DnHaQNJ~{E7bag*oR#oMT{rHVm4OT7Te~q;;y8LIXm3+e8yOlUF
zMqyt5E_Y?~E}0~79&#v)hG7RewbK<w-Gi~tCXi&(LkIPFic4AK@ZrO7<c7t@##R_t
zz1;Qv&!0aTqop^mBSrW19^&1(S7)om==2^8jJnPYpM;rz25fVZOZirMdOA7Nx*N{U
zBUisAz(HNiJNcYQPxr@4RbkWu+wAZ^BZQ<HHg5S{S4Zb0sOFcwPe5#WVGjf@#&fV7
z7~^Dh+F~i36Wg^3+O-<KL=4J5rk}XGY^jAXW(U)m)y#1HJ)^I+$%x$l@ePx+tf(=9
zJUsVM)j%u7VF4n()avkr3CdPNGb&EphZym9i%?WnCMO*=VkC8VJz{s<gIqxRg5e#|
z_VAGDvXz`3PSxkz8U`^5181i-GzO&DQ;@Sc+&gDWVlj^dQrS8bd_3^Wp^L?VWD6MT
z$0raq4$g#6(y=~1w4~&YlCB<2h;0N`b63{QKpv&0q50I>%1(*}koP7L@H|J4ZU+qm
zLvdH`*F%q{XM8*aleJ07$$JC^>A*(4$;w$&lm*dywRng$hi(Q~azZU>LYo#I$mbZO
zDDJaNLJ=ZI<50LU!B{?WE*uJaJcZD%jXR;rA5s6GQqE5o@|q4qL%Of&3S4KeWej3h
zx6fN(jyD<HUnhk7L`!Ux&}~#5s9f2whZHA@sjZ}kp$<qGP#u~%%j=$g@hiN!btBgo
z&)bFs^M9!seCeVgJO<?jR_tL^l9+jzqf;9y(PwFd{^Mr#J^Co;UfmMtES}i6ryqWA
z@1m!tC({aPL_VmA$$@M69(C>G7uAF{rzh>t$VSJDns315sUK+~#{e@FqVn_cB=e34
zAk7D4wgB0vJ--WUn|<{=JNx0wbbjT%{IbHLqG6pbLtF#do-fev+$}%|2*5|$E%oKh
z3G1#%>_N`#vyu5XFJ5?~UaEm*^7<(}pYs4qwStXY%wa@tX*e;&5()u1GcHybZ;{p|
zTlx93eEzdIb^b&Yik!L6P->F+k~5kL?5#)Mq9adEj*?<#tCow)F^t7Uq(AmV?_KP5
z-Zp~Sx;5Jm6X7(uG6f!$WlP4=e^R!9yB_<k)jWRD9{quaar7B6@vG@_#019@^1qIy
zrDed9EB7JC;+Md%(|VBIlAOBk<Tfu|t04?E4vw8YdzPGK27T_AqOTs9+Q7Poh}ws>
zdVR5^6T^2)E7!+rtKTgV8X6jV;gVgNFIns8CLM$FKsMKQ=}5fD%Zq{!-x33A)Sl#7
zMtXW{By#23qf>{23ZN1r&pgW#Fx_|_Jzx>520my-Q;&MkjKL_WH;xOim=AP0a!vxX
zPjn0n@<{$JUw^cEEDo@u>4xEj7oUjbi|{XDzS|tj8)G3NA|i(l9jR-ewWt2%Fb#yP
zy_k6J866F}vpfe$HlvAON<Pv`@A73ngp26yVeOn;<Nu$wALrJpi(+B$?kXel3eG$8
zu3aJMFPIN}iTq-(FN_k78I8?U1TE78KfjG6IKsUS|M??3E$T2eIe8L3UYPccMsJPb
z{J@Y%AxG!n=%9xc<XhYcj_ILVpQ55W8JC^oo=VvCCWgq|1Mq=DNXi!E+!i-CV@TQu
z%<KVAd83>7xw?AeMfXV>r4VjL3S#dg#_j^168J2(7iqN=S_b5gPRn@cTN^wDY79ny
zyyBpcP3DnRvH0rdW-=2X73q7FywHkg!hMikMiig4NeKo`PmQk=JU|Gfa-5TEbmoja
zJU!1nJt+epdPm!G@wS6(XG09>D7a7?6+ud{b9Cf~LPiW+<HSikEH17;CkuDom+ETh
z-6KE>nv`K^G}mmFZPD(<TqfRJpm!_xOJA1NPN<J|w{Gp`c|J07?XZ{_A5@TmzS+jZ
zK=_!>RFqv^Ahl_{t3dxX?`F8Plakg(hx$iHHFoUS!T4xOrqw21<__J*Z35Rx{>Mzg
z@$_AiwcXy4XL}#&A?ALPtBsDM4JtO0^V1{|;3>7J14F-CD5q6aEGIlHEQWTl&Xd)e
z0!c{!e#pknn*}J@FzoX13><rngaPq)7&fRcYYVlm>ai$2q~RS2xVckYr^Ej-x|Zyc
zw-TnV&k(Ti+?qPoyfuV%9#AS8h80Bxhxq$&fTP;%+x13xEW|?#uUObf`5tpD;*z=f
zM!7_rkehb%mL+H~?xtvQ9z0`WqDe^w(2a2)`>unDZHylzB436X9DIRMfI9!}2iT2g
zAcujW=zwd8nOAJe$^A1l^mAB{P~~FPQ^uGTB`)t|KMbPOp5#2Rf&#9=d*Z~20sm0t
zpA)iwL>(HDL@=nK*;<UV=P+D%ml6v9f@w&I+lRbu&DH`?XO))VN3U*eE%;QcXf7)N
zrygDk!NjS;A%24#y^UOom9XJn=loo*v(PDof(dCGdqVa^4YU+bywc#Md(X8SXEzrn
zxc1Y9VvK|Slc<D*vWZDtYg-XYD+T29;;3~%xr1yCuFTlkm8^!r#b?j<P)yNqCk5S#
zV6Fh9&_FiP^%zicLmcVBT#HI?fQ(;cO&RVil7hZQng#JglDLrPuM8WdgLhV_#*f?r
z70oLw{0!GEQr82(Rr~leYb!vIiYUD354H^-<@7pLKRn&h-cF&wNzB-Zb-zRT2*c3X
zCC)ruIFQh7G9CQ(B=g<7&*%qj0s01HMYSCE3jqiJKdqo=Z?QGflf>uOOot8~qIjbI
zr5BAtB^7UVM3;v`8WbpGpOOJnB$Dn6B8UtcSbESx?9UHLJG1ih2kPXg_0J>X;z~$a
zq!^Bg6*I7v1vCiEptxH#zUV>ceK$|d(*Jubk)GYtV({hKD%3TC)Z|g6=r(S&nh=HD
z*pg>!o>)s3IAk|l_$oGxhxqy3#fwKUj~sNBkQoV$jPxQW?yp|PucJ{@jt&{dhv#Nk
z<Mr3>oKmdoF1o^!w0q0&xuwX6x%?TpJ@Ik{on`he0O1G)o;?s){R0D&U}ib)O3VoU
z{_|%Wr2sAah_>{S?kg4+DgSceI5}@*BoXCw5qfbEgz_%|TCv9+bUR<_8XIeD*swv&
zvMymr=7lEedJ`H)`m$B>Do;I5tgi6#92ODTO(F9Dta*Ge@~oG`OcR+<u+G$S`70-(
zvI<?u3ngFGl0)R==;`P@`>^pkCJwW46H8dU>b5qcV_IDXB`7(dE|ET$Fae2ILTr0w
zQdH1f%pKy|x<Z?NLxz8$_Yh`XfMEU`4|yDM!92hXDBSA$^+p_}=FT|`L=Ahfn(W3&
zLdHq}|84MP=C#B~_wD{FW(e6ttu+{@*A5}Wq!{}P7$n}#!lJFJx>4w2aSULU6<kzg
zULg+_2a67+3Fa+BIOmEcD>%p(VjQ@r7dIPqTm!wH`TTb5|9{j7`~loW4*cgf43PYH
zR*Xihx=Juq8jhxzi>qq`Rt)1+57DkC$VnZPKV`_`<fIPFI%Lu|%(i640A<$6K|s84
zw4Q((&OAP3Ouh`|3+tPdS)-Dbk482tvTpNSJDTfP*8MB=;!#nS)83M4CTyV7)WX=J
zc0;AfRJBR(r{&h8{I++3I?7B{|N2?FZ~MDdjb(eXukZ8l7D0bPC5*|aW_+{9-|LUx
zI*k|o_x7ILZ!t8|Qc_NX->9Qy37H_yW8vaA+bt)aTh5Pn2_);P!nj;;R+K#BJ?v*)
zGW&0O_OD8<)Tl0BAdPwCP|&zYdMuDP5GS*`LshhEowxQahi;$lq4vFC*n>1*1GL$5
zskg=vylFADGRvDt%8!}iO|5SfcwG8^266Nj2J2i!X9<m<O7txjnP@1c*RG{Y-QwA^
zXB{|uB?N*^m}>7BQ;9rX?iAIm32F!M;R9vg49AAz2~gCHl&`%%685fCL)s;m@t$CH
z{>E2s1Jpje^IP}t*)m*0%v{uLm3ROE8v&cKl^A=meA*6jcn+i@hBYZYeQ0y8qn+K>
z*`6P#kfcmA<=>yT8_JAx6WtyllV4d;5spDsxbpnRwH34y>$M>|q@jiN{P@A+c;U*!
z(|bWPPn;17k4-f4xYE2!=-|N_la<M{)bZ^DcE>CnB8hzcN9}6~vI|Vfr;AsYu7fA;
zAZau%HyR*Zyc=dT|A2r#sfVXsifi0{P1>I7*j=9cHc}O^aSx?`U_kM7ayaN2V}q>P
z7^iNSKTA@=_1mN0H}4RMJgrF1r#7dZ@d3wPWKw>J?k;e^e{aKKQWVBit=wrMIb8b0
z2}Te1n7YWIhVS3LrNVJgpi}T{qf9=>&YksGm_??<imzY4CZY`xj<+~Gb$XS6mS(6r
zf1mYtv7d38S5ei)Y<m?<!gt1h>HLegv@CpkYW%CfMTQR7;NT$N<+H$af6sGhCL?vv
zsE$cWX6}o&)7>8lMnI4e?e0^W|F*AhA=+Cc`y7|D;6tnZkJ_KmSK~_|RFI?47K(4u
z?*m-B!gr;*KeC}U=lUX3Wp|#mOt8(BN@j7(I=MR5D4O_o4Rv)(lumb_PtuCXy7lPv
zsbg*r?WbB*)eE8l!ND(J!9gR4>%X@VF})$6JXs=;3zCT|vvcEJCAChL+ja3y7Jvw`
zYfY&ZkG$y@*;&-a)RVc6**yS1PxDw=!t`;eljMi5-@hm0D3c+8w=4yOM?@?#scHjZ
zYr-y%2{9Tc%c=}CJMTly!Ban64&C%@i@V*>+rgvES*~N`8-)lZC|6jARJ_6Ix$)q+
z?d(C`aLH*PS6((Zmq%aU;%Tu-BB(p;u9~na;&);HI4|5bIy-<DS>X3btb=WG_tD>T
zPirjaTJv*Ib-SPIe5CZQhXbab!FrHuh7w6*uj8k7E$2U!1KI^nO`HZb?MW_p9%Y$h
zXjtTW6fb?y2G0`&i^i)HR3M4Hv3vG1-5&#bUZ<WNNE#d(Qr@#?&xU<T6*Q09kExH9
z>#3jCZ7AUjWZw8Q+fZn<t0cbly|o%v9nixvbN_wshhbqSNev0Z4PoF&(30zL*+nL`
z3GNews{&puz=zi_!PZmz&ty2U%=(J7%T#~z-cT<`PO~eBp{Gxt)MLW3)R>l&e_mkb
zfm}l3?S~O_UZ*pfbuBGZ$IPu)mS(Lcd&|*86)Kj`5X&*tKt4%89)+T`7j2+unL7mu
z!l@gp$uUqYB3evJ=*Qg|H7CyK>Z+vah5Pta9?8%FU7p#Y_3Iip$`(HQN<YKpOJ;wK
znO*IR8|&`|*NbYlvfc-gUi$m4w(8TTyA*9a7fkOZAWM>;)|B@P3MQa=+5p>7ij-Js
zTiesovX>MbMEJ4!AYcHRG;ux9<-YQ#3OA^Ny&}kAQ_$V#U<9Yj!=F4+{q*Tm)7vX*
z>*dG|O4d!2T1YHlcDDn@PQj~bo?zrteeC>0#ocwb$B}R^J=f3pjVVtt`$2VtG7p8E
z@`d`Di0QmK#DTNc9CYN{kVTi~+HA=8gkkzHo#Pn5&%>aG1H%MZTGMM|skfP#3Ekb@
z`*UW~%l^HHD2?m1ybPPE+kNzEU+YR=+P%<kQs`)XbMw(|k$ekXezh^-XZ4ntB~nY$
z+n<KXC2}5^QUy`ab?&b!*?+a}D2Rj;_V*sz?Naau34(cpKAnb*Dj8Juv0?5_4ZKc<
zpoBuefYK61vcmdTl{@_LZUUXr)*KQ9j5pd2Jkj0vv~{19T}VJ0*RzN%8<+VuVMj^R
z7NqIH7gT=KE$?XAP8g1BSnOeVpQ*@JT1R_lrqjbg2ZmjsNOC#=V+8-J@9OC3QBzWl
z3&k;H(K9?uM_^jNrL8Ry5(1`Y<ng`hC>SMyr3C_n!hds<Al!Jtk1}3b0ZicoIUUyH
zK2S;RjAjZ&N=ho*x@`wOClAX~UGW_^u^TO+x#M0fc;Am{Mc~eb*`F6hum3oB@M1A1
zj2<yFv;EcENKOpaSTXRGcnoFT%c;|amb9C};$muH;e)^h^;L8dans-#lJb=3`N#Jn
zQzYSv8q9*rs_NdD5V(h=N_I&L+I;E$x5G?MA1CZS&b}fqpQU$5d8WwC`QHLw@xeg8
z7$re0%~Fg#DmwZ(@_Xp%WHf8<A+P==Mn}n6PCzwNBw{4vW60lhc)v_bOUuOU+l*Mu
zxL-mF!wOVlqF3Zrx18oOIsXSkvnXN?)@4%aCSSqQnrjo%<+02NBt7^yNRh>PrCpl|
zwc)u?#8MRZHoDvQG$*+H7r2h;9{19()};lApq3*h1a|v{8NT8Ns!&o=BJq{$MEAO`
zJ)&hb+5->wsXy~vfAdzK6w9QV7OlEEMeQrS>@2cf9yDr-Dad@?wVb8k*A=kKl^WR;
zc_{|$FoW5aG(KQBY=J3Y-N$e#pKoSNdmm0b5a@dLf3@}9;as-i-(M6enH9-M5>d7a
z5kh5DB9$T|87V>$a?4g($w(SVg^ZN4vnoOeO*12-Y!Tu8Ts^<v`~LHG9M5q)&lBHq
z-}iN0=lNM@a7XeUwL41D9)q~n>!YeE8VxngTWFZ4)*cJZg)9OJ21Xcn^PIZq(M2$X
z`0J}L59o|iuAqtW=PU0p8cYQY*CfJ^UI6N|LAe~>N7>oz5G2D*C_IWqCJcKkPUkM=
zEq#hVeZ_@YqxFV)Lm9E)8#2NS(pz>Qu`ScA_-YKF>E0~rJp!5towZR^v>S*(MVyS)
z3~R`xrQk0s6K^!hqUigO@p!Wp#Gi0TsGp2%Vb=6lIP<ae+qZRqz|`R#)BQ;=I@9xB
zdzi)NLyuWV<Q&*DxPK>W&jP{zTyQ}Ge&4Cprn$fS8-*c{CbAadB0|r=$W3u`msD7`
z`-n`JT)qOiZsuj}UE9vmpML*jv<Wh}C%120iq}pqBWz4~Ra)+=h1nJOZsc7FMuV$8
zAh}<zd=uV2|J4HhhYvGhzL%nmGiYq#%T)l6b7SR6@$)@yv3~;R0&l4HNe*KP@#y8(
z)@;>ST$pdnppI{cLIZBOppopj<v1L<C!XsGjAygdL|i|GS}byZ!j7@83crv6qK3Vm
zPM9+YdTK&KK#(Z%;#}KRv_c4Pnc!bzsE*^$^7oPh(rgn>g42IkQ?&8$ds>vt+Z{?>
zC1LzT+FY~z<$qx`yCq|Vu`h6mQu}+9H$J?9dhGR1XQ}k96{Fn{%);PysdpYX4GM+|
z++`M?Xla#8<4XvwgkJBV-Ys9W;>K$FJ)f1_7|a7ISO9H#=AlbQm>uw@X8WlXul88K
zevKN-2Xx%Ts?Cc)xuw1Tx|7WVO(9&HyYP^EP}o={Pd^dgNTG-J!zss!O(Oe0#m2%+
zHye?!;K#}Tfexp`Vq7P~xEuLfW$?u_Kpp1rC1)u?hl5`Chjd02#_bv2l0Wel8a<qJ
zQ*tjwHg4=ht>on9_G7bn7g;n=mBHk>7Y-D9SUXQs7cA1&meuFGNN!>2Z}f|L_$8t6
zZsl8eIa5TK(;!Rf&vhA9cnyhm7}zFa#>PH%YU!)A&1Ir)@AnK}o~4C_wdS&IbI!`C
z)$9Cw>HgIekI>N2Gp^235;F=&W`h8a7`{;>e^=fTDC}SoxH~4>pMH6>mTAdu4(er2
zsP>1nP%V3B=Wxh^^3Hd|uN%_Pa1c-G1V3km%r@sA5i1?;`*Thi+#5MqnXx7R$oX%c
z`g+6GOJ9LPxr%xpTJ9K;-@F>5)u!UnIZ-b51v&Sq)lr2=`ZE%b7NF-0+7m8NR$tF{
z&wF;W>f&D)<I(g$4)`(kHc?hnyX=+l>7fah){B>42Dv5?sx$&TFWGI$qBkn+7Sp=+
zvdDD2w6m0?<Qps;X$b#`UJC-x(;>xymcnk@Gm|%4b4|8BI9wzqaSGz-Qiv4K!>UVU
z!bltFRNff4$9GZxhu8Z%EiT+80xCX_#X1$VO5ah*N}|&_z9)$WFf8nJ$Q=%EXBe2@
z59Y79m#ffFqH^$c=r-QahhO>S$8c~!!5_2wuccY(L+c%rJmF859+zHfXwK$YQjt2A
z_N{6j`s0EdMNp<|u!UVTk^8=Jro&Z6bb8eTbAdh~hP5cP@6T>@%QYb@H}Swd)lusA
zZ{j=E{aL#Lw83usn|Iqbb1%Hi*M7{}BuhLBkV(Qw8vuo%IXZN&pi1VOozKGJq6!&P
z&OhssWU&;?AAx%U(4W)Ai*G=xH=uaIm0O}W`f9+kJ<(*FbF6x6^6Np3<i=px{2zUN
zmM~NCsCdYu9DL15DH}@q3Ld(8sptJeYk#c_<5$1+4huntR8F1*D89*)@aNAMKc_IK
zuizV}yRC{(e!Ra?c(=7=rMoAR0S>3$g3>VV_U)m~V&|e){{?n?1^O-kb;m4i#5G5c
z1x@s{KVq}df2QcBvRRxyDfvpzDYX>(oE)7qMV~UL6-M_3@{iu|gii@n*%-(wi!$G(
zB-B7t@lNHa?&z^SwN0|;oc!>_YM{F0h@Bd`vbezQ{x?rP#KtDJy*=aGv+y%bjCo%9
zt#wz0@&zIHL{0w~jT|nR9}2BeeE37Yf~>)c`P;Rv#1j3N>CLceov^g@F?>Y%<mG*6
z^e}6`7A_;3HtHH55k(LToIHMLeUeaEfpoSCxs%Fiu+i;lW{icn-EW6ZeWvG+jnrOw
zueLXST&;qJm}s-G&)`gBb?7MJ2hP!_yhXbQ+ZrlJTiE?Ya_tyghyTusM>s2X4joTC
z9T3-Av)ua@6&FVeBsA1Nun)%nw*j*lE)~2yImqx-g!BHHBlmrizu^*>T@QZt>=~3B
zJS2e(ieL?Q_Z{dR|FB+5)jsNM<8L?PRN6S!J6+vddS89@xg5C4&_o|*`B_I~2r#&>
zmy?@FnMPXP@~tM%Nw@Db-b3vZi`Hq(cxB^%0IOFrc6Wxc%O|?@ACh{`iqdC{2kzCU
zS9&SaP+cN}Oq;E&n4CsfTpXO7zSU8pyIf8lE6ak$g7}$<+1hdGqZpotMfl>O*RSX4
z4?nqE=(KccaZz9)R#U-+fZMtU4(tJvHOc-teRH&qCZ(lCM+3ae!N<o!BieVAuOURO
zb`+sR-jH*{hUUz>!tfdT;cI^mb|0;@T={X7KhXWlH-_%FyFBTmPOuiuZ#!6~uYzOk
zz}=l!idEb$Od&{%s5;vQB{vXp=u^wr%SoAtQj!Qk(|f9T{IOd{Mn+P$QFDq36)$IE
zMx-mxzEAjI7bk0SHs#?d@T_W@38AepR3cp?kNeyw`&<Z41c+Q01fcQgH5ndk!N3O^
z?;gzxjE@$0oVM+(kEH3s%Plt^Pb%$O3dINm%|>{dm>w2kM_FMQY`qw>ccZk7#qBRV
z6&za5+(T8=9T7K`OCHBgMQut^uomw}=|IcdN9a26J{1yKD^qcFAAiY;5Yh(E4web=
zE%`2cS1P3UX0lZT&O07^@QO-_M-)vEVif1JY9X71hb8euKryk|z{u4P`{Je5Wo#0!
zcmBw{5*XS#r7kJ8b)Ph)`oSbuLr0Hd`Ojv+Ov9P04v^R9!vGuDwl>4F4h{f(uA<&X
zeLgfFR}45+NJ3&L)KdnBEJ?9KB!Sn$pC2BfS1io5M{i-F3#?&ua*e#EUvskqpr{j+
z5UsB$LD0P9XhwSu`JpeIIDR=fqnmvKxI~l0saGrUkU}4#j)wxu)#bp+QG^PjK@9@f
z0lxU|B^UQQ06aROOrUz_tv%%tdMNER<AiM~@uY|(qnsX^)CnhV_2>$Z;n!lPR}0;X
zij57zO(N=PoW4Y<OFWQpzR}dsDO}k12@XLJ0l>DXAdtw=ayotdXuO?QN<svFTwHv*
z`>ua1E*=Xpr{R&p-bzP^CFEGl$!*=bl6)WM-j-*3V*Xc5upC~7VDK|-Be&2^Av!zo
zbX|Pn;dEmJ#B!1EuyzWZO*b6OL7E%r=BY@zTY8Ke=s6OtshSw#=0bQ8u{l@Qd>oQv
z3DS&geXX>$n?bOd?&a;%{*sX7;4=}+ACbPGgHum#ECro~9#azmMKWMT5zNL1smbBH
zT(NsM*8kMlIud4-VSEj&r!clB;!cDj35k?MvIBQN2mw;S-ch115PLs~V8qpF>u*Kn
zf|x5Gq|87qy@ZJ=NW7ReFjah6<8Ax&@iXZ2k0M3+!)M7u26$e|KxPvOlz%_~QQAUO
z3kw{B(uKhXb4b-v?MRHdW|E<eS{1dI6NyADa_0b<MZ5253hN_rRziO(Q{O0Cy9k)U
zu{Dolc>p~~1ssz_zSw85MP<8<nEi$Mi#+1s?P2}%Q@uQE4wS#ZE^4q78ffAMfGsZ+
z!g3*8sNvyZqQQqoyy4d$u4JA-CRC-b{*JFuo#|v~vuD=cL6`!NS>`xhtKL)ZwNYcr
z*f)z2akaegMB0W8nwpyCs3(bGlK9!59Fv1EKxqAXp9u|XtU$aTTFvspkrIiom!=1`
zGmNdsPa{W^s0whZb`?*zlRn~~5Zk?I!v^A{M}}M}`d2jV(%8a_up!uXl&Ex2T`~&S
z#kR?$8i`+Q7N|E8Uk%Gu7;sc8B$Pv<OibM8+KWPf(LQ-2dh!0GWhNYhB*`5z2XknO
z^1S}Y0w`~U#{{2ylz)SvU+{)}hR$<}8`FO*m}vTgd?aL;`$+M0=g`nKpeMLpWe^{2
z9rIj@{K*yga$SgWCLGK7!~}AhxQPZ4mR+x)x6L$sTWir~r<m4bD$_>_Kr(q<zAS@~
zV!|E~wHom<c6F7x*(S-NHznT`3w)2U@fy;O60ZliXRn|)Mpop7gxzG`=Z}=KCcLpL
zsd$`y;|3>Hdq<hOy}iA`R-Sk;;B$bIdWKcM8QP+2KyAz+0r;HnA{xK#>{T?j<7hMV
zpZ2^@JbX03YR4Nlp4HB%@<=fVj;$1lr@?RqBm%+k3Oali89I7*lvcxWsR?VNH1fc_
zkbaT$cmA}0sE8YDNXFmNMk;wvBjA^tH*c;N7hjHRByijCM=<2tq_x4Nvx0<^&}LAV
zRk|J3Q`i6;=3=Qj^x5^1MkhF~YDTr<Il3maJJ7r;LI|FDwWv+{GpwoFaS#?JB_~Is
zn8t1I1`v+g&4>$E0jLu_&Gga{na$cVdX&XGHy+35s2hkm6vCWRlo1y8;(z1PqW}#N
zTe7r*f)=i{)ATS3cFQz6oH`I6(bTXj;WH%gJhVnX9)zX%9cHOA(A8h$Sq`B2c7hul
z-cSpCE{8qa46V212L;eUB=^CU<LVL>=L6{2N&b$sZNqv*4K=U7tZEweawpU$NXYet
zh;)G|^xNmpooGEPA(@7XuF7r4^(vghE~;Cb9@Q^~IW@^8hPUpT?H5J^2rPuZS7}pb
z4r<3yWJf1~i~$8GF+BlG^(a6XD>#)<I563a9cJXPz%~qlbbkw#&JEpA7{D0gd{gZG
zB>n`AX_gAJzeRiSlKshz5eOznkBMGXdg<o=<)&TA*JF{tFA8hdo-jU{y%a>a1gc#<
zA1$$E%SKxBgMD0K8`2Yy3lRl`1q%ye$0QgQ8j*xE^>N|BzNo6UwmUfXrao>mq@FRC
zIPn}*Qg~)j!HE6?z<*jOddUhH0?@Q59K8oN;Wc!F28|~bLo|JHtP*D|pO*1?{KZm^
z0qf@rvY=yan;&xk*$KFRfBpVh^R(?{2Wn_3)uz|et^Ef9ZjhD+6&MTZ*9-_0_e-e)
zxU#~Xp$I;uIOLbwzJ#h3IH)ut`KCoyV+uc>48oIisWz9PU+V-Hp$3^5j=uYiwv#XD
z0Xd2iPX!<p5}*Xj_;cGvdWbbrGQB+@%^<rSB3vrqb-<0#!g_?u)6BEuHs4NncGLx$
zczOg>y@DykRxr>=TwB|%%vo7%O@uuL_3qTuU%+k{I;JdPqzdNjp@?qGD8F{D%;j0v
zK{(iKex)0U55i%s4=@L?GY1D5$N(Ln_|6~uHS25hfAsVm1es<FSh-*hp?=tqwGoxQ
zAKSa8CWcN3=RAGdIJJv{OciPy&+g07At4gLeU|tTH4!|gQ<V>`?IB&**qnYO{Ml~`
zTi%HR*JUV-BhL-OWMU}Q0#$?vs1+ILCfd$I%?mGvl+%y*{9a%wXg*J#Y=H)cJZZqF
zW<VBRxc$t`?Op`RnF<_pIK^?!P{HiF!wTS&V0J|um*Sa4kX<x{qD|YJ`veOmwiCM2
z6^3G{7HB>I^BHJ}+w(zD(G91LCZ74o*w{y&by<Z6Z1L+7McaKWXdEJL@B3QMn5#@f
zQt};|*@WDC@LkX#acuDXX6?vc@7O4HJL*MF4n2*+vBL>|7Kx#P*e)82J>4AI3Z_D*
zmxbu0XaMs>0_O~C0EZCT4;izA^Gu88iZQ!abn&wt%`#KjKNKr}_UkH|6CCKpFHN^;
zP$j=|Qm^5Jq|t~I33i<CSaZae+ib5;g6p%rU34S9Ikww`{fROyJXA;2LN945NM`Q#
zPzFuU-~Gj3P7WNo5ve!wYp{Y^oI6RzA|Q43KSw178o+Bp*joa&L;V{8qZJ*Qlo4B)
zfi{I+4Njy2$ZFaX@X^H-6b?`4LDy)AIwckPyTQTsEooX%$7yOpo1hUx#l&EY!`toc
zM+6WMu*3(jaLs(eJE9wcFg7PtPcJ3dvPbgGinRQyfTit4&%@X-N&qL=Qk!qzzCE||
zBlag$raZ7cgS5SZhdmJsleiku^{folo_KSSv_D87q@xl17~4W}ULFs(<cU&rxY87a
zQ^a-~r|c#P#)w3jwLOIjJ%HFWK_8+gCHLena7D+W5q*Li{+al}Aa;k>1k8NqiTp#i
z9Glkt`tpnk2Nl<U2Fs_rry#T}MH5IujpnyYM$$Zg!^1d~`z88lNXyaCIrh_+haqO|
ztcs+BQJ6Kfj9B=0W0rvXP{vF0JClg4A93R0;H;}Mb#NKh5XfgJh2|GQ7X%0x{qTyA
zw6TdbE15+$g=+i3GsX_Gd&^U~=uPqbX_mKRs(0VLA5{nuO<(=JaO`_lh(faclzB&e
zj{7^4c4Flub{&9e>poM3Zc-hI(i=8ijZ}V=ZoMPz$K7iunghLGCJ%ihhD_qo>gW)}
z4(Ap0pYxKKI!Q-@O`%VjPrDMuDp*xa19_|8%?EI0_Bjg;2{t4j8kIeXXA>RzaBEx{
zs4Ky~u-`n>iO`D<*^qzjd~7?V#!|_19eetA><49#e;`Gd70JYHj2qu%69A5dSDqQ)
zjXV$?UD7Q+VmAt*Z38C@dZE@$talq7*C=Z{UclMzbG8r&4>}s+ok%P}Jx2?lray{s
z3v4$eFyiT%Pm*oU`vX&#8*H4;+1hKluJ{@ia_!m*f)k*QNrDC&ZD)YFq_pu7JdMQ9
z2k+rE;=m@Fsz-16WfiW8c}GDHM|bn%*y%hgkOf5Ug0LFW8=_{Z`Lb`ff+?Jn;8#LH
zN+l?|Fk%nsCCxoLJ{w`Tipaeiar7~>vF(A*g)+&t55N(7!jb80vn#RnLz*@EUU`HF
z_t8*m0e=RHo*IbD{H3aG6veeaIvbL&aC_TXU$jV&w{0q@JEd_R^#Mo$CIS!Rg>fZF
zepp+z0ZX!D(%xYZ0iYdPum9VZ#<8S{TX1TK`ozJ`?t{&}6j^1^r`X&}dN%+;5Hkyl
zCbn-UB+veZ_5v|r8d$-9SVL3$@JUh*LH@lZZ*BgkQjg))y`7!^G9(Z>Oq4i;Mu)-M
z{N2sXDCv5Xcb07iZby<bQAFLxLL|`s3x_YhP#7+u7#TG-MUA0sWgum1ilLn4+O4W(
zTwXyV5myIuhngTo0>44xCqcl50Xc>Gl>>Wv1xi-M3=Sz*lZ-W>5?_OmI{3=_GLkDw
zVTZ%ftpz;-{;ZF^mh_d$P(OH6{_$hSLQcP*7ZR|fTeF^_#pUgzloUz?X(S=IQSb~T
z!*hQ0g~M8;1jAn=>a{pO4WH!?Wxe+h2z4O`GEUBh^@OP@nQ;I?p%tXy1hD{X9ggzu
zS#Kzdl%~NrHw|LVE2ssC9eDO^oZpRC2EiIkuBH5~>J0J?#|t$c9!hnZzb18XbZ&%)
zO1KmH7Re|VFKWXA(g^xxW_I@Kf~F8KLLBM|NP8izA3`lqu)vPsLe2|NoZ0|@W>}*v
zXu%J}H?Gf&^ptd_QyUGan0)w{S=*)W{?tSL(w|@4^BmS%e*f<Dcji3l(Gai>kOY)9
zX-na5yB@}m{Ar|n0N(CZo{inGp%n=bIE@HMf<FX!#FmaCl3<mDW5SaWgnuUZB`b>5
z<f~2fT!z<H`K&th`99+o7hSzA^UW@E+hFf1?Qe{r`_~M*_WMB3zFta-4QLC=l)`RA
z0v>iE0Z7g6EOi?ivGV`IXq;SJ{2+Wn1^f1otU9_^=<vUz79$Q{l;ULg#EDnl{A7^;
zsDMxroi({p*9CV80D#kmg7u3LOlV2_kOpN;Acqc%o4Sm$=1V224%Qvf`aAuqw5Qr9
zh>4wx!QG=|(gHg)r41h?F>=z>P&JV{hy>)hkM}IY=|y5h!KnSZ_3kEqG!U>RqV8el
zT>(`^+dV+W1@mm2A)3=}(K;#V>rH-7qq7E5wn0K-1@RKq)@lOPB&n%~QfW7htDYRm
z5+ENL0cm>oRQXX<?wO$a>MZQ+s=vI9u*lmG-}C-UmC6<t+b@T*^iOZ#x|rQ`=C-fK
z60cD@BwUm}IaWH}>l0Fa=H3l>br61Kg?=1QXwV7CAd`$YSOM7=f0?v(ql>#BJdl%>
zQ<(rKE$Eu{_&~VvK)=>+fXtxn<{kV-fR<gVH5bLjMIu~VTlxMc!WAknd`3*>;wLZ+
z&T@qI1OU`HURQ1;k@V+mpTqm&1A@idZ(U*etP$r@?I9FNyLhp*y|}b0GV5|baGNpk
zTf~@+VFMr9GL6&m{9ZvPuR7rlR$u<i$LpqXr{csp4?A<$BzRIMC$$XadzD$vU7nwk
z7=bFg9Jp&XMB8NN1S4565H1VdZHL#y%AY^e#zgr7yCQA>odJ*sC$`uxrDW1~%}(&s
zOQFvokQ8$Ix^8J$)r($CHChhc8ZLEPgOIFVs8D>*IV&p$JXf$*l}X-9uLh4xb^iLb
z5@1ZO$^6&1_PHkHWW$YRyc%X<FJQrT<sRk?@zY9y&t3|9z?kpysFe-Gx&lUh(LyxG
zzhddGE&KF|8?9-ky(>#aQDFa#IV?|i{P4k4pKw+B_-$L=d}FIm5i|j3!u(gO?<mEv
zJ)~sqT^o3bT52HThQ0#Y9CSGC0;)?qrWv<6GJ5}6>f|OhZ3smW*@KZeS?qc&5rhh{
zI-xTdkh2W5Fw>B)ZbPF2!hH4spw_v?1ZAe@jwj<g>+dnW2~Bt6!nqBa9JW8Hn=hgf
zCzx|8q$CN3P6hO>S3cZ$=0TC)^Gpq0cjaX}{nuOuNz=6EH<>uoO7X3bQwCk+6+nx1
zWi_W{iI4vA@t2t;!Ztj8BAU^yL(QqeGj_Mu<z0~)6S;bDvX5!S>l-4awYN%-oTN)k
zEM0W$%?c)5DX1S@lsD*Eh<^fWnL%tTTW5cNa5$wpySF1!j%8x`mi(b(TP&Zh(zRgW
z*gCO-nZ_!}xMqu3+hr3v`&?cUDj<5#%a<s-aqxXGJ$M;oeC`{Gt7{UZm_%d;-?fN~
zWv^3myDy)=qS*Z??}ttr)85yUB?EoNvxn8OB6$>@HzL#m=KF<MNyh|~e6ti_2JAqC
z!iNGa-t9B0Zfk7h0K?X!eEpNoZeG5wDmR`4T|GU20Ag5H=fMkn+T<AXdF`7~P5GiF
z5JyJFi~n@lC-~hW_W(g{N$3=&aBv{|^v3oI=7|0qo~KXu$+avE*tF8l$(O6cqNsoH
zuo_#yCn)-YUv^O5>yYI87COn!{-ebmRXo~HG-F#sn8CL;oJ_W`Pzx9EII};ZNw(09
z;f^wcgt1Y<GE{sRIYDBu;-C)no}W6>oOW0@TwRtW`9M;<i`&JEj0X!0-W1a-i=Nc2
zpK2BSF2vV)|I;=J{S<oD`Bg_>W<F0Sb;)h2k=VYS;zr~C8gHMC;s_9gas3s%jAN3{
z@P7$vh_LHYzIh}*{LknWwhjwKYL`AXP;-?Z9m*<=lZm$PLq-<xwmryx1o}nhzo;<1
zlV;(@(*c1UI91{PYiV*aQ6ua=c{|Me52|G_?!PCwb@RbxgxVeLqAExzL~t-OcAJy0
z&dah?tTm|nR|k09amhM&=#WpQ=@rJ6I&2x4rz|`2&I`AD3~eP@+xT_RG8UTJuGMXj
z@Ir&(6(n>*QnD1L)wkgczkjdx_JEEQW!=L<CqN8l*yhf&i&aRU;A}yKnwOtXKrb&U
zNXWt4JKDnptrnaCp~&GPVPpV2KDn5)RPbEw*B-_u=qshZUK@O)HoO)Q&}-lW;~~)Q
z!M$21Z|ei-7jcwBkJOFp`H3e85FG)mNHc<Z2f&ZC-s%w9{4=>Gu!0UeQh)l<<&|^F
zJAGSuikGB|VS9N?qK<%Jmi?nyWLPS;8*Fm>$B1N|FVbW8b>iUT-X<YVze!)b?Yv3A
z)B<^)VatSL#1D9v1=LDX2?>?b*WE*gebbQWiUMuP_K~{*TO7(aU9a3bth7&mX&@U@
z=1HdCKR27MHCzUSk7pa@zys-qQ(x^`m`&Bm&9B(r7}`o_VP>SSxq)_Hh}XGET~BYo
zRz;30&z-}4->=DWlG}|d$_n2C=5al(d@tQ{Q!J_wwdlh9^vRM6ckK<F7A#hI<%btT
z9lseD$fO>YSTD;tI-cglrRk4ygCwQ7WBLB$pNtzLEz)ERHcb<_3&5vW(Aag&jph8q
z5ksC2hu#WaaqHN+_3vul<wuVl&I<ub%_6AENAfj+6+%#XVNaI~i=j%$x%d3EczHWt
zv?U1AOzpgN@5DaM4ZMkw3+n`Amt;Ss6F(M$+#C*EVuFSc^#owEAxcp#(87Q3P2p)E
zJL}<L+_kiF(y0E<v!3DahXyLWJ4@+^3m4ThN?=PoaQ5u@sCcUAeY_u@o}Hwdl%D>}
zw$QaH{|G^>xlejsAy^>7kO<^+A$N3bVCq`UTdn+jd?*|w;x4(-TZmq?WM>@XUk-{1
z{O>o%Zv5n12v4(7uOm5flz&ZyG~=dsif)}0_u+|&36k=G!uLe6$LbYpwx}g*2lA`y
zu21!_l-jZ+&E3INPmcxJKXlj@UM(&F)4z~rqQ_DZ9D`<iWc@qc3}az{^XMbW0eeS)
zfdNT`DgX{3$U-QB2N9Vn!1|N>mcfHRaIQ&skB^So`gSB*!{`!*%S4nhuw<x(&06sU
zawqZRbFW@al=cMg1QZHmDXKA0{lhZYs%evMFVaeSdT6<47(T?-`dFW)7v2+-Y1)yS
zeki2)@ndE|)HB`-e{2Z!OLbhH=6@hHbj>1;S8LV3(+p=;!KutfTL4{V?g^S38e8si
z_$Tn)x2X8=4l~YFZx=28VH%xga(2;8KlI|vk*>bQKR*NsY(@BTRN};wq{3J)eLXJ3
zGFUXKIve98i|iPyv9gXnV%%zKqNKF=YrA=w?_!$(C16(2-TZEO@YOUU%U#IrhH;h|
zLt^6Mc(A|&F9nL-P^O_Iw}hQBtL@l8SC_$9U_QF!n@gUUEVY&QF+Ls-?1ssEVJ#2?
z*R%80Q9e<I0r4%i@<*W|CmAJBiJ-YSGJHXf#V{je6IyvxxCoQ``(f_EA!;)o+CyX7
zc*%u<M=AT}?Ogh9o{Nv)&c?iqPJCu_HwZ!4_}KwP5a0<%=Zc}I<rUJtLTNq22+~Fn
zN?!^$3&>XY{I~*^8g?Ha8%NMAp9{I&2YsbzKFptA3GztaFXujx40s8{9gykYi9V5e
z`SLW&9u}&?$OfA|O&St8<hrO!Qol{IO)~mK^6!;zwjFC`zVq1Ce<8<DDp=={9pg&+
zb8X^of+#5o??jYyn2>~q%zzC?xZ#};$$XRvV2xg7n`kFD^6bn%b6<VQhK7}G3vN*C
zm8R&HE=tvTjqT%IFU8j1A?W^hhRXf*qqo-rB76`gj0CMfDTUlo<M1pWi?~4@*J^&6
zmzS6H`R|q7l9{}-<=+pVc+Iu*3Y(XXqnIQk%pi4!lDsROrX{nTdnE)pkzj;4mrmHe
zcfh?21#Q*GEPS+cNp)+WMfB>kOv3yt4CZ9vtx!zx6WKyFvefA~9(~5>KHB+uMXL3?
z_Uk)UuC%0IsozBnR1hu(0)kRBq3DXpcFWZG?~7V;HPsMo9Zsp4PCvH$E%seKJ(F)3
z?gE(~>nvk9Wn+^JRT>;{?(6WiV<W82tU#3sVs(d-Qh8(JSm>`i6^b*ZOq6Vu&!j8n
z=jrb0t(PcN?HoC2Vt?OlBsOf=#o3wUt%CtzBKcpSKkHdr8vJm@X=*5e+{64WSoRp@
zTHk$ycZWR>-Fq@RVIpq}W=6{E9_$Mcw-MtV;PnmQ#^79+dfv!XdYD!(6?n7yl#{C~
zg-4T#0B3Mu+$V%32s{*1bhL9$yV-rSX)AQ1e#dNGJT4?08~eUcS&1HSv78NkR_Zh-
zb|s>l1i$kcJH_nuNY*{aLo1`BqWnnQC*D%q$Y>Rf(2URqyao(O8XXsZmWr(gqHgzH
zn~_0j<4PfYml%AI@QE;Q7Fc8O6p}-eOwdDnWAO5v=?-OOf@A+rW)ITuz>+~JjEhd#
z6-3EDc=(VQbxE`)`VaDjYisqelio*d(?4@@&yzU^{;<7aof+#c;Ct#kvg#TCuxtLW
z4wpv<RU(xoF|ioT3|93u+_8-r$F~#c3fZX&oo?A4iXdGHX9@-adnReV@nx3MNX7wV
z-F}!6h)0bOu&%I9{|H_iD7^6yYLQ7WAcyb+%V@|3XhOV!9i)=PlOrCjGjbU{Z7hmT
zZAiZ<A$TZ!H&?x`3$Wa&5<=UsXHU}WYautHYFf-tQxWS2*mh#qk>?F<x5``wBAU!?
zBJy3@V~odw{C5C*gw)P$7)6ZpFcl!i5+7_T8Zagh)@pYTO9elgj0|I4z}3kRk={5V
zDGKW$Q9TP&d%_N$KmW9>8#q`pTJxr#j4N%W)y^KzO@fFx5<OPOOe-{~#CZVdG#pTw
zm5*&Ycc}JmJZqR$x1KmA0M#DC)WJI{o-bO@BK|5$BXRxl<HzA@d<fVdc3CmV7+BFZ
zRIuwivtj-W)+js89t0?bY0!q?B04MvP)oq!!Hx?S1%Ckf%J8b4bWbo&Pa#|mdY?aY
zbFW-otBXwR?ZqJ;8K0c=>T^rn>G(JcL)P{}72o{mfFS0y8UY$VB(};Gh?t7@d+9)t
znH~WS4t3CsQ16ooFs=xFE!pjnF*!-O&LUZY%zy*>3qQa22mO~jJtl*pov(UmA-)%u
z5;##qc@^N;x?Y7uL&SNB!K^uQnV2R2^A$uOz5$T?L7;w^L(^X@71(vdJjxSQ29W4b
zoQyCh=#K82zZ5RG>mk%2{&=a!cr;h>@#!J|>OC?ZtcJ~k|5r?)^KatGP6MJA#J6<5
zaDhzp0;tJ2S4pPT;(J2GxQBSN5)u$`a1fISe_WQkuF5Q%4~{$vFa_4+f6kFdIzX&-
z02PqwK9CDM04Y+koAWsm!)zYz9sHO<>B1@j5r<&ACF;NpPywUbt=C)H`{T#N8m@|>
zkj@)(5Sj0jjE#Z|3X+xy{xCCQRY2M)df!GLI)<aZR0yCvTyP@@-)tW4&O16~bSx`g
zfb-iUorkB{^_-8Sw}8Xa#3x4It7VcAGZHZ=i5LK!gVY{qkqFCEQqk4c#4SsA5Q~0a
z;=u;!M`l=mh5j?iq}+fg`ti#Zg=lbm_YR9kUnL$*pv#l6=#TTWJ|Y9N0jE47(3Db3
z=_6F}@km7c^7(TF%*EvH!AY$-7ijk4&^yF=pfo17uT^W-yrDqqu6*n140R6%g>RIF
z${)(ok_wq%Y$4;ne{X^m+hb-_sH2NV^GjbMRxKyZ9s;`DZ##`%+)Bd@Ug8711QrJ4
z+XGtKC)^JVqedgPAtgwlI}&-C(fB?F|4u~y;#;=#DD%DFsgi~ev6j#e>4iDeC#=_B
z_G1J79D-(1bHpEk0Z@>7R@(3s{lT6OJ83*TmX}L*H>QKPYrr*2g?O;Ob`Kb~QU3Tc
z80L1mm?mPKJ$LHTD8NJ00b+;Gv04R@A81fX5QA61dS;~7lTqJzjUMHj;+MQdLLg{e
z3ii|h=R`;1vNzZ|apB2GLxhj0XEIx+v9;frL({DRYR%!;dX7+tA9)q*H@-8Cz!pb*
z2kL%|m>jIA@+zhSk*HV=^C)Er+b>+SaT=6R6t4a}GDp2!rFOvjKoSkHaq)K~-qc0e
z2n)~$3cR2z=*0TF`p6N<aQ;SeFyI_(fWFo+@kjBKuyCQ-j5HwgjfhVr-k*Z0dH996
zjM_G8C%Eq$1|P_QQ5QoO9(Vxd<i!z9x1QDvh;0ya2Xh@e?qXUJ_#u5{Wf~3qhDJ2w
z*t0;O`ntrkf3A%xXhg>_xn>c^Y;{|kX~WcM$Stbv+M;2+X+oWJA;rBz6HX>9f@-Lw
zPs!eDpKGztg-U&sJ%AwWR)+AP*aGiLII-$*dP{-b*8CIu0j7~B{z&GDM$eOab>^cz
zW?;LZ7f%G#-e3F+);xMOM2R*~Z4Y7N00FQO=YTyJjMs199!dN4|0l0kD?v}pPE$Jn
z-M{^7bI=coO$B**5y*sreZcAC$xBhn1_MiV0Zyh3HZGAGI)Y^cHq#C(O5XFg1M%X}
zo&!4)pPJf)sqwghy7d%`yM3C3lvH?=_pQO#oz?RYJ7bgFaeV7&S|zOXI)DLs1_q9$
zh8H0UCJ{Z_kZ|P@^z~f&w`2pFRNtjP`IwLGnxraVg9z~V=Xvn)lGY7;l6q~yAMbaz
zjR4=UgM*>Il|ue1y&6EJ!^7!2-$@=i0BJdOD}D;rOU8kF9NYawr@^fm;%0${y%0iT
zD3{znbdMLaX2owhiCaMznlL@41VvV2XQw%Yrb4u4xOHJ<gOTfW!w&z5R#;JW$+*fs
zZya|RxQJ1=2E8TX;#dmgY(#<Mxby5)i(vP?!zAUSc#aw_L^^LlB}~0!;Sr!<_%})U
z!J7-yz#nVjUy|_voj<g@2g-C@8<qvZ-vfTyaOhNi3cUS;3Pj!UfJ0cj0p)`oR6EVE
z{n+#+)!dlFzmU<?po?IpxxN4H&Qwo5oORgL<9A+?B@d?uH9f44Mm+1+^d;<zppfmr
z)OmR2t`>cMk6T!e@xZWvfB^0ku897k`kjP?D%8dxW_M%Lr<1Pw>Le7t<lds$c9>p3
zTLeFzj1R@KR&Z=T;YJ;Ez6ZjBf0uUx(F?(Z0KZw9Cs!QCCF42jzuN@O5Jhacq6h94
z<#hZAz~O=XOQf$1{mL~CIH+nTQ+{f+=bW|m>w&Q5?ek0-0I*v!jvpcKkLieX_kmlu
zeB1*a>c0arG?auAAzDl*?$qJFKO<8x-jktinnp@&zq-2Ao%8>FC`!oyn0q{pOr_JJ
zG&TE;4OA8{z^HrV_nbVuTA<nscKB~nQi{h*sRPL`m@q%1ld-9<w?{e~2s^;8nua!d
z8qO<Be+|URJNjYqHWbJVG(dEUL%aXWyC?HMNth-iVi0o#KU<jWf>KNyA`0)BE+)YA
zNuYu4sodFsw<uo*7gW(TdLccX%$f#B*a+FziNcE+A7r+e74hISdV>m9qKz>Y+<y?k
zG!gF$RlX_$qpkt|K^Z;4&s)KjnyCC7Vn$=*n8d=RG%~*oz&n1sFJ*Cib#3hu8qu?U
zd<!I$gO~TJ_dGRASQ%Jx2)Bf4U2*7H8afVtuzFzd7)bht{in?NH$Tuf(9=LZfknB3
z<z6(YZ<yRz^fUu(cq!0(#HJp8?YG?r@~$l=&zT8w%gn5XUMTzQw{75l#_-{U{Y2eR
z_hDkex`N}ttE^zdrfi!Yt~`xlc%ut7$v@{T9UYO{0sx#~dZ=f}95})*!;NYCB}W88
zv!{qTz}DvUk+KHYrj~NZ0(Xu^%P?L+Tc319Rdn;_Wwg#8KY|FKiVg_-qGz8BnK^;8
z`Q*20l&VAoiUME7We%&IlK87MnTk7s5KJw?fI+=O$W7w(z$1=ho1LA#^qc4@Z5|fL
zjzEAR?Ce(5i<%oqCu)>?`L?o>MBHkk*g{Qd)MXc(!*OBXz7TKv%K_-&(F9<QLR6FY
zJ1~-JcrXyATY-yDzJ=Fl8B-hu5Ew2oKhV<2fYBI3Qfh0w1Bu%Of)Kp>C_Fo8<`~5I
z8==_Qy&;k&Hm~^Q>sR04Z2J~*5=9EFxtOG6AYo#mqJ&(Qq=JCdCv6`F1(o2bhNxXV
z)rf?Z6>O*5K)5UmBzHoG=)6d+yhM!Cs4D_do)Mx3&1v~v6;F~ZT-D3CI8w`eD^c{%
z5fo0?&pb}PxbXYuF>D_>!y3on6Mc*{DKZohfaN0`;79ELSe`hMaBlMhsHRXRU^e0t
zGb<@M^pV&vyB_=+_c)f{j?={u>-Wdy#W`x^6)2f4ZS_thpA7a#9DAa$L=vO0DT0kj
z4%f$zALSvQ!}%&hHWO%7AA?^dSv*j>uECJ9g<C6+xDB@oA@&O_z7W#QJ)op>WeO;I
z(t&pV5WFX5mxMk>XqEhr+xLlsw4o68w)5wy^GkdH9G~gWT&kaPC=x`|<*Ak<UO`C{
zX@1*2d)QZ)GkHrNF4OJG#wBtth)gXqQxNx@9U91OJV6q`5VTn3a`;R+$0d!jutBRy
zEse?e^h11qK;n%dBASeXM}-|YY!*QV6M|@ggX-GIK{E*@C0-OLZz=frNVqFhI>KUN
zd%1TRD#9{_5C~xsBMl%3{nCc@>(#PrF0SS1?B+*yeFPMWwm1PvQ3Hl@O`B26M9YSu
zmh?R+r6DnS3oRtU0B|nh<ou&IwMSCtxU>SBqo~ZU&rkhOL73I_T_vI7*hZ-9fQZW7
z#G+#&BGmBvc-@M9ccuq>{&X{EWMq)cIb>MXy4mU}R2(N&>LdWes7H@}$bEZrCLS7+
z2&f@!rW#ic{ORsSJ&$r;BjOVW%hvzvet+7{yw<OCE2edOZ@GZY9);qv*(Dn?;eotH
z^5<}5!xq>cEg}c`9drRb-QBz4fmIls$wNZc@@30}0ARJ9_y1WJCbP;cN|~b(QbQ8a
z28r`)H!;Qw5CD@#@4&A4n<EEzOr_Z|XZ~K!T!ymhv;67`<#zHba0no^tPv;VnM&Sv
zSbfr90fp=IDWzT1PV{pvzv;iHOG-mF9y`(y$E(9$edqBceL9*x@>3?^PPiTWurJ9&
z$ZbEbbaGE}V*_MGxITN`yjsD{lRUbS2JtQ8;#aYq=|iJbaHtU57NcTVgoW82X}q3m
z8WyxKG+D;KVw%yZH6W#$(uPr5b$+smSHY6HlcTSUGkZ;IqEObM7pdte%?o%(9SpgJ
z<$ZOAH8oM8rr=vgLLQ1W96sVSiM;DbNHR1Lg-i0NM)||cRZNG~f-xC)?Bew}o}!+S
z+eDa$U7Jj{AQ_5)hshv92vj!%kD*8)w8;yHkffvyXn=7TM2uuFf!%)vG)UN2-rNw;
zMQxHIVOQwzMHaDRYj={AdIxBwv8sGAZXT+!F`x|u-$7?ca>2jds$u5p7?I3!ftra-
zVFf5fx?C;8ldyL*VIh&B;An#S=SHYkSx+a1dI5KAg)nP1DB?!Q6)(IzS$HuN^*sI?
zmhq;Hod&5!39rZhE*C`X5r6~E*7N=>6H31Ohc#1NTHx(l&&0$;?kT#kaEPX7Scg*Z
zW27u5T0aDROp@8($Z_tlSk?|?K^>b4X^E%;q)O=uPW)xu3_1ZlWR{ngHN2Qnhf+5Z
z5uWh`G8P6h*aBr%m5ENvBKMNSfe@!PglO1X9^#+nmYQBugNJ9)XaM64{GXt$vqqbl
zP)%S>!4L?ID5^|@{JU=WqIeg$v}bCS#=m3!#PYZIjngkAyZ*nb9rf7ibJr~VHRY>|
z_0Xq4-9=I@wD6>W+Oxx;3(|M;Iut)Q$#@80o@8_=oc(%}76B}6D~#O`R@z4JR7xU*
z57;G1bU;>Hj83jWbfp(O5yi#o(%0F?4vil<IZPqy90jr!G_L^DeT>Alh>aPFGuOf9
zWoV6o_J^i<{XGi(3(>7Z`h!YCc;`+&wD^dCp$lXa48sje2Gtayv()XfNd!qVCHdqC
z+OEX1gB`3!{N*i#V!=bPe#Z_TY}D#NrbwPU4r8hU#e>>FelnH{+4W%eq{EFMgHTZT
z`V|&>Mq8Ql2yt9+rZDP&m%Mla21y_yOB=_QJnu?i8Jz<I2!LV(N1X_{7zy}>JP)W9
zp0I=PfTLtXvC)lFcdxGQZaf0R`$oD3ebW!J88L)YI`w=cXA7!wBC7!kIXa)C6%b66
z3|<IW9G#e`Ac|;f>y%Hb(3xVp{&!Hy%df(kBhltGb3oC=9u1UD#`@hV6027%B-dh`
zyKlH&LR=gn@bo149sCXg&Kcd&%tMladES2Dk{(k=;&{NyK7Z!H%Ry<4Rksu&6J6iG
z`vxToexJ;4it}=lU9cJxBx82wb2KF|S_DTv;lKdBuS4MF`#C*b-3suaBwCb#MhJQs
z$WgC3w{Nc)&G@=$S{4uc(GoAkc5r;lce$}Mb}@&Q3jE%yzWh36QLFCX(t&9$GVp3b
zhHj1)rvGcM1k9;=xkoZ~MaAep!@MS=nS2qkGP?vu+Nlj(o==a|H@2|g{0aioh~tQi
z+T>KO;MtqIe;7ppy^oH_G_&S?_5i>EOpA@jPbjFv)b?=yovTPYCFyBksvVSoA3vt-
zYxXY56t7lcv5*dw(K$%J^?C4aKFLn{g8SD>DxO><@>+Z_m<>A7{Sd1P`Cy?H+QV5+
z%s~fzIon=^n)^o|O?~+}EqxcJ<KQXtKOwmXVjA6_EEQ4q_gc@iq%B1f_;MAgbzky)
zCF)xr`YvC5O-1@|yc~FxItvntP@s@cRsc#R0kSVHCRn%|{fhFm*8U~xMDNed&9P7D
zb@+^dVB%p3G_L*V>`7o;-R;G!Vo$cK<2&beESMVae|IQ4D(Hb`<VNE&t45`H4+KBB
z`{B9x4S8|KIt#P#Xz@_(GrJuU?7LG#cpDyAJ)ceTG1Q~DuCZ*BPP($;v$kflkx<ut
zi5m5zpD(xX;+qb&xngtdV!o=}^$Jy|kpSv%UjDrw1tab_i9#?2IirR%X}3$i)2s8{
zj8J-=`900w&2wkiML8)_d!hDTYKn2_IG+ZTc-WPJJ-|y@hjDtRhi>?@?{Lrigt^XL
zU5!7f5nYjJZQc>jj4-9&PqT7UzNly!e9p06fjb0Bi%iB%%?b5k*WhR6A0|nztkafP
z^WU{FnQgZ^Exc}|(4onIhlP=JN`P^&bAez^Ex|7&UErHFs6wh{%|nCmA)dDlNw56}
zN<OV#_1@)fH=hkXSF>1C=HG1Uo4l~_?)V%2wvYHc`r2<eE&56c?2s7I6g_cX%P(*V
zXR}yi=H8d7?7u4#ZO8|JEqMD7)MgxVr%yN~N|=t7`Y8=E3;CN(Vwyui)1C9zpY3n_
zb@1|K6>zkt(EY*b>`S8lGK|j*<@mF+bv1Og-?%lmdB4S*>&n9vttVnPvyj7SIl3iB
zlWQ;P7w89MA7CxsVtD-cx1UaKuUj~O`dEur30_RUxaz}MasT-nP88nEa^1Jl<ZcHK
zk+z2#pLYeB#n>T}@xr4A+-6%&CF-osrC`0sAjWH0<LIhVHt%Cr?rrEgCwr^s%}Kex
zqYh2b0NLqqmQ!JhlR4M@=ei@$84B@*@2+pHJN=B$=Pxs(-xV||=X<Imw<&)!Vz#)9
z>o4m<?HJg$y8OK2<nxcGWUFiE=^o(k-u5+A+ibgS-RI4%_{}IdtSqs@G}QW*Ht!|B
z?Ny>DQ_>s&NCsn<j!H<-19XkH5fk*d%GDKJ^QiRmyIX=IO@HRhbx+~Ng2P@XR<yy}
zrDfMvGIYB3<(r(QMRB}ol9hA+BWvJ!GQ7uMiq`&e`eaGfaN~+-zQq?C{{H>Lpwp|%
zwn4>pttsSX>Pk4%Z#6T!c^;1;Uu{WM?R?}<yJOn8zU(je<?Yho(pQ^fbEELm8h;}$
zKT&ud!P}L&`|ZUSP2t|+hf`F^U$oYBRTP8yI&9V7Ft+P2n@WG;p~HjZ^(y^#)aK~n
zE<?|cNmtI$=8@bT<ZA*N#Eyz-Pw%0blfGRC38OCvk8cG%#*xqP=4{CZCf|CS_W$jb
zqAbYySDNwTo8xxrTY8)HHbwOgVUzbL7PzzDCjHRyt%wE#mP>MmEiIGeMV1K-cS+aC
zf;V}}m-^}R=e@gkGiWD|g_(b{mB#>$+H-!U<oku!Sd_Qb)LcV>*!}b8-_oq}7cZV3
z@2Ng}G};5G0nhexd;k<hR8{ZZLI3w+*6XN{f46g-6BC)ehCoiXqI2@RIMN&|t~R4`
zQ%55V1t{9Unr-wN!F!w^I|%}3#3-pm!$)%+q8|rb!Fj$sf0zx4U272rHR4GgWA?4R
z2fJ9akq|KVz8%10S65eE`>UNH8@J^cWQ-4EY-3$dH5K*eOTG6Sj7}NOJ1xyaNKmL0
zFi#1ScPhiZCxz3Sh5Vg%vT_gOIELYML54T9@9r6?qEU@!Z|8iaW)kXK#C{2Y+kAi_
zAW_woFpBI&ZoE_aH&@t85|l20xM2CBt;XEwfENLp2d~fM-Q7xho4ByW)KLOizRSHv
z4bpdBLs^L-d+JtJ!keN(ptA}?aBcvqdL!~(GWw+J^A`^yI$XtL@_uIK`|+=a2UN&V
zka(4kw%T%WWe!%i?WYvv<w-;dS{ARjoG^TH;SEUuIJAk{VWnIe&*LiUTDv}mHeqcY
z6S(Gjk-B&qEzefo8eX|+w51(fKb?k-sy+Ksi?cxFGv4=m1g;?@=eq9O6cl7Y1Fs29
zzL*D*bo{RQfdgz_rvQTx@){<R!=49(G~ae-cM0(D>;-E>LO(=B0a<?fDWeycSGKaH
zyv<f#7NhQh1iqe?y(P`VQcEQ$P9)a<Tp?6hv7`2l#q={&*{CtJam%NsrcUHL2Ve_D
zd?_8$`k(&f{Q`QBh65LgW`x*$7QXgb`r4CThf{3;vVz}hdn!^sA6Iu8EFJOFAw4;*
z=p}oS)%mz}X_!C-O=BPmeW=$ViS7fSy^6xnKiONhjo|}ynJ%US(M{J%OlLXS*<mi9
z2ZCb`aAgT2WBYVu%^Z52^+-{?^6We$j7efBw-Ds0je{1c6|xZZ)@2=MZtVJEowqJj
z8(3pCL|s+Y)k2tEhNpu@YD&nVw+JHc35);(H1<Gq#;D23O5jZJEf*XuH_ZBU{LMBV
z793Xxj~&}*Z7qU`c(1u%FUg1_G=s$bkR%3E2`8{^P<4<Ao<h_cd`2CHS=Fa3!K|S;
z{2eymJHk2{d(!*XJvg~PJlW`Pt5CRX8u_kxBKGO(3ImNTC{O}0zgCHI6+^R$<->(~
z6eCw!&ZK5#J-UCtbo`nKJ~`CKtF$ZtiTVcy%A&X<375v{z2VefVKg1Ib4v@pIJ^P%
zz-{J!1(i6da1hGbGPFj+mG75QD}ss?QK%uO5s($!kGgm&Q0U^tUZnpzm!6hZMO9UG
z0we$1Ex6d(vAD1uG#jed$hf$@SO>VM$N>s~*b6Ug|7V2;vrzNLVe$~6#VJQUj)JxV
z`wYezk}Munn3JXYWnXWToJafscD+Bat`5LHh~5M1?k32T4<CD%3GtmMT;}FiR{G*B
z^^F|%9>sS7F^}Pgxda3j8ul>#Qpcz}bOOZEmsn5`kB>m8Gjs`OP2<_i)xYd)UCGD*
zj{P)5&v@QYH&-HlM+?BJ!?Bpd&7VHGjCQI+F;a?rG&C0GkcV-OYcRKb+0wdNiwypH
zaw7l6?5qa`(5f6gCQ8oqt?%p%Pd~c_2=V*(?>pRviOT8}r1s-Cud{0?Z2NZy(QPVT
z8sA7bE=kEu{kzU~j<u)d;4enL%vCtIi08o0gj3^jQXTiRX=J5CrG;s6WFLYV^zm*+
z<<_rD$KgIfXio&bGsS_~Y#6$X%HoBjKC{J-uMU0f4Gn7eU+^x!Opu(+u6UxH1Xzt|
zLt<mO;rZ(VPy6D+J`HB&mQ7vWn2F^*cI_lFpg=kNb39`31?u~9Byo^6=19%012*>Z
z-;bY~a(V?XI8f_7S)W|MMksbte`K0lTF8e$cZ1iq0D%8Oav!sW%@f{X0Q(yOD<EeS
zZIMu&a$Q4nb0yaBUSu{43;O{x`c|+}$e;hINuI=2lE{SQeNjBzz!g67I(9c)*A^xj
zS-Tehuz}di-K{z&<)6)(uCg@>U=JE0phr}UxuXzN%4Fo-{zn9n-TLjtQRYUWV<N4u
zFxnfYhxgDau2sJHM4>(DHLQ#~AW{Olb~i)!(iXtH;H`qN0o(usdpuUyKby<=Sj?qg
zxu2ju--u}?z;tqh9G^la6bQ&-0>eSaYvt;lhFcYn$4-8Z*-9OMb8>O^2=SFez?V_O
zq4C@$t-dwuM1DEG#T#6{Zx>hpq@hK%!2$F_GFc1B9;s<*mS|I6NNO=#Fd4B;Lwx^z
zXs9O3drBfV$T;Bk?b{i;wj6+sKr=%dQkbF3q4AlnrD7hNdeP(u-4B=TmqO6ngB%^I
z3%(}uLXGcJJ;t?;9b1bQzi4i_(4KWgtH*O^bgD#4(=<RlY~-jP+KOy#Elh!c?<2Xf
z3P=1}G$kWN6Rar089H&>w?H`nxdy15_ks(fOp_ieN_U16KQ$vxP|Q_{pFrxk91qj1
zRtWK3r;?JgN@jKFZo-SK5OjEzmH~{vY1msgV|*7jyLW)CSF8P*%A(qSKBe{Evbk%3
zpJ8G2<HWqgC#HoTor^I{VHpdH5#+eZ{Zk%V2CGD*r4J5#%(!j2?e=OdgPT~pDNmmK
zjQrHR&9X9h&3vs!aF|w$^{t#7S$SS+C3YNq*HW0^4z7AFyY4QWYcyy(GkB*F7eER}
z3;?VckB3Vi+0%WXA<6t*TCrKUS(XjvSYYv!K=qp0`HIZc4B)a}r!%4uOu%HnPgC6p
z78c{R$C<l!7_FSfK?eRHBd1*zVUeq}grJz2S)|sH&L3~~TPmn9_8_2e1~p*mG(xL_
zc_Nz@th1OD<_FACI4f2EQImx<&lA}f?B(Yl^A6*~W3K_No7|U8BNrYP>^OiFP}Y-%
z7oV>i!RD9K-xOa~9YT{k;@$==c&W3}2c(Z<1cxxSb7un5upzyz!{#_%b;cfK<jdww
zXcUs{^Ez1XVDTvgEiFAIdmgwqIo5EV!@x-Nu7F4iJj-9&^uJu(koOBi7HHVI>9UbG
zkAseQL5b!b<=hjq;#pjMV-pkcE=hlPc?IrK$M&LB*WuP<A(+2=#N{Y%nl&CD9S{)F
zaNYExkv{^yEE-2v3d-MO-5Bd4?(=<Q#PN?8WRE)7Kcn%%YCd6B<k7W?&JQf*d2y>(
zVP<qM9vD0~R~v1O5Y7^fi>ERk9}i!{Dx?{b=1dir?&;%_`O=%44V~O5H&GcGgE|Yv
zi$8hrAc~_%a4Q=A^{Wm=NF1mw;kU$#LpFMl-ZVm4Ks6bQjdl!p+70Y^2dG&?l*~$Z
z!r%-%THv6-yAre9grKef?t<WLGBFo(xi9rAC=kpN)ieerWW0DN<?qCj7UW4GodKxJ
zcdSB~4h^Gaa5`XVqXK}QPN(yutiGMyX4rBuzp|BUHVmn`tB{0<AVK?~L;mc)5(fFx
z5M%Wa><#&BX`%VBQR9rbwTmk0MSf!q&aY}*%>%e%jkfaPf3qKjmJi3=xufslp@b7W
zo4T+W`}lp2yuyNl93XqDe@AztJUYB~B0Z~w-W-Z0bQdOO^>uY32$=$rh9xPy>*s}&
zm!5smL&pnvjKt%;klcf<=t)kmqoE-e89dz55)E%EBx1)E+R=y1w2gT0LF<Kj25|?C
zg>H^u*KUFPp>W2FA8uELYc2#L*W2)Q$-xl+Z!2XmRz5AYYu7zc0G#XA)c`ntGPZ_a
z1F|ffAU5DjaOS=H*vl;R)BiAA)&9MEAt_x7SbZpL%6ZHkK6qkE#o+@^EVZS6a)po*
zYx+YioNU06doy0Pm$$VQQ*Faz?E>g7u-f+H@k6=dv!swc>DZIIJS3_yGcycZoS})y
zJ4f${b!T3sv=JjEP=ORUvY>lIl(Eh6*p2?#a>k15=guJo!5kF<Qp;qi1uy$EXTE(u
z`|AT_O*+tE0b=P!tpkDGmw(5WeUuZ^Kl~Z%36LunC#MD?M2;8dnqt!8_vj;L7KkJ)
z`qOtCf7ygsh~u#${@I3S*9_C7N@g}-pGE?U41!p2B3Q3sgOE4^2k?!%ci*Fsa)Z4=
z?95M%U}L)`CnoprAq4!h0;>mc9XTIk2<W-(_BX0v)olqeF>@TdNNbVB&IAq@t=G#7
z$26Fk8v43eU%p@^N?HJOgkwc6lq_&hR<7~I*-x*qLX-#yj3FkZdl_jz8_o5^pPfHS
zzkGfhDqL8Ch_bL=WtxvXrPSgA$!jY%bFBGbWFvcPhjS0bjJ$>o5Y(Y&bAgK*y6`&W
z2s)VFWiKx~nj4au|Nh>tdXSl9FF{`f#T^-HgnhDUa4U;LFGd^wMB*w=Uo@7Ov0#N5
zRFcmWs%<qi?$6GeW*RVe7Nd>5Fd2xXPxSQlk)slmhbnPYt(BurUvFW(XY7|Gh6)wV
zf?7Om1k8kN085ojELKzV$<SS;;Y#}@+e-E0{rHqzgHfYFns?sK%?1Dkz}hQ-a^=9$
z-3~0qE3IBVR!=QNK^Yk)xC@T^wJ6GaVqg6gkvm;$j^h8~Tx<|~xoYm5(;}6)xG?=H
zhNiEp+l@q%3e?i~{`QEXAeX~Pm%HAxcGycn1|OL}Uk==Y%t6D0jiG1I%EuBhJ|k=?
zZhe1#+_S+gL}88A;FO}*ab}^f@%ZX(_+d05PyHRhy91|XDE;zfEoefL@&xX}+ne&9
znVD(8!$qRxAfT_STZv5+%_*ZFoexf|!NEZ!v);!fYs96a`XR<%GJz5P4Kn{7n;eA9
z<o^A*>-oOxrNA1E&uoRP({qhOQ>1A@1q%pT50?PPc?=nLiQ1*yVVetJ_`#u}Mthsw
z7`c5Hl=u-FgEpocGFlR^;_bb2s@V|yb^{EVciq1)%U>(*Z~KW)vNeER<MWK7n~*>I
xM<E<0?5umsO__!G4vKbCXHEUtHJC5V3$_$H@#LH{WT)ZJKCS(lnHuK4{|Br*kpKVy


From 1bb2308ff39594e081d0a1a5f6ae10752a105f57 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 10 Oct 2017 13:51:40 -0700
Subject: [PATCH 75/82] gan design with graph

---
 doc/design/gan_api.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 2fb30432cb..f9bf5939f4 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -29,13 +29,13 @@ In our GAN design, we wrap it as a user-friendly easily customized python API to
 
 <p align="center">
 <img src="./test.dot.png" width = "50%" align="center"/><br/>
-The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss mared in red and green are the two targets we would like to run.
+Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.
 </p>
 
 
 <p align="center">
 <img src="./dcgan.png" width = "90%" align="center"/><br/>
-Photo borrowed from the original DC-GAN paper.
+Figure 2. Photo borrowed from the original DC-GAN paper.
 </p>
 
 ## The Conditional-GAN might be a class. 

From f185af8d7bbe45cf8029a5c3727e27a5d001c984 Mon Sep 17 00:00:00 2001
From: Yu Yang <yuyang18@baidu.com>
Date: Tue, 10 Oct 2017 13:55:01 -0700
Subject: [PATCH 76/82] Complete parameter

---
 python/paddle/v2/framework/graph.py           | 50 ++++++++++++++++++-
 .../v2/framework/tests/test_parameter.py      | 27 ++++++++++
 2 files changed, 75 insertions(+), 2 deletions(-)
 create mode 100644 python/paddle/v2/framework/tests/test_parameter.py

diff --git a/python/paddle/v2/framework/graph.py b/python/paddle/v2/framework/graph.py
index ba14885462..0f0a2847e5 100644
--- a/python/paddle/v2/framework/graph.py
+++ b/python/paddle/v2/framework/graph.py
@@ -1,13 +1,19 @@
 import paddle.v2.framework.core as core
 import collections
 import numpy as np
+import copy
 
 __all__ = ['Block', 'Variable', 'Program', 'Operator']
 
 
 class Variable(object):
-    def __init__(self, block, name=None, shape=None, dtype=None,
-                 lod_level=None):
+    def __init__(self,
+                 block,
+                 name=None,
+                 shape=None,
+                 dtype=None,
+                 lod_level=None,
+                 **kwargs):
         self.block = block
 
         if name is None:
@@ -144,6 +150,10 @@ class Block(object):
     def create_var(self, *args, **kwargs):
         return Variable(self, *args, **kwargs)
 
+    def create_parameter(self, *args, **kwargs):
+        global_block = self.program.global_block()
+        return Parameter(global_block, *args, **kwargs)
+
     def append_op(self, *args, **kwargs):
         op_desc = self.desc.append_op()
         op = Operator(self, op_desc, *args, **kwargs)
@@ -190,5 +200,41 @@ class Program(object):
         self.current_block_idx = self.current_block().parent_idx
 
 
+class Parameter(Variable):
+    def __init__(self, block, shape, dtype, **kwargs):
+        if shape is None or dtype is None:
+            raise ValueError("Parameter must set shape and dtype")
+        if len(shape) == 0:
+            raise ValueError("Parameter shape cannot be empty")
+
+        for each in shape:
+            if each < 0:
+                raise ValueError("Parameter shape should not be related with "
+                                 "batch-size")
+
+        Variable.__init__(self, block, shape=shape, dtype=dtype, **kwargs)
+        self.trainable = kwargs.get('trainable', True)
+        self.init_attr = kwargs.get('initialize_attr', {
+            'type': 'uniform_random',
+            'min': -1.0,
+            'max': 1.0
+        })
+
+        self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
+        self._append_initialize_ops_()
+
+    def _append_initialize_ops_(self):
+        attr = copy.deepcopy(self.init_attr)
+        op_type = attr.pop('type', None)
+        block = self.block
+        assert isinstance(block, Block)
+        shape = self.shape
+        attr['dims'] = shape
+        attr['data_type'] = int(self.data_type)
+        op = block.prepend_op(
+            type=op_type, inputs=None, outputs={'Out': [self]}, attrs=attr)
+        self.op = op
+
+
 # program is a global instance.
 g_program = Program.instance()
diff --git a/python/paddle/v2/framework/tests/test_parameter.py b/python/paddle/v2/framework/tests/test_parameter.py
new file mode 100644
index 0000000000..3b5d38f257
--- /dev/null
+++ b/python/paddle/v2/framework/tests/test_parameter.py
@@ -0,0 +1,27 @@
+import unittest
+from paddle.v2.framework.graph import g_program
+import paddle.v2.framework.core as core
+
+
+class TestParameter(unittest.TestCase):
+    def test_param(self):
+        b = g_program.create_block()
+        param = b.create_parameter(
+            name='fc.w',
+            shape=[784, 100],
+            dtype='float32',
+            initialize_attr={
+                'type': 'uniform_random',
+                'seed': 13,
+                'min': -5.0,
+                'max': 5.0
+            })
+        self.assertIsNotNone(param)
+        self.assertEqual('fc.w', param.name)
+        self.assertEqual((784, 100), param.shape)
+        self.assertEqual(core.DataType.FP32, param.data_type)
+        self.assertEqual(0, param.block.idx)
+
+
+if __name__ == '__main__':
+    unittest.main()

From e31cfcd2715feb16a5961020b9ae19c3e3013123 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 10 Oct 2017 14:06:29 -0700
Subject: [PATCH 77/82] gan

---
 doc/design/gan_api.md | 22 +---------------------
 1 file changed, 1 insertion(+), 21 deletions(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index f9bf5939f4..5764112f3c 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -6,32 +6,12 @@ It applies several important concepts in machine learning system design, includi
 
 In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation.
 
-| important building blocks | People in Charge  | Required |
-|---------------------------|-------------------|----------|
-| convolution 2d (done)     | Chengduo          | Y        |
-| cudnn conv 2d (missing)   | Chengduo          | N        |
-| deconv 2d (missing)       | Zhuoyuan, Zhihong | Y        |
-| cudnn deconv 2d (missing) | Zhuoyuan, Zhihong | N        |
-| batch norm (missing)      | Zhuoyuan, Jiayi   | Y        |
-| cudnn batch norm (missing)| Zhuoyuan, Jiayi   | N        |
-| max-pooling (done)        | ?                 | Y        |
-| cudnn-max-pool (missing)  | Chengduo          | Y        |
-| fc (done)                 | ?                 | Y        |
-| softmax loss (done)       | ?                 | Y        |
-| reshape op (done)         | ?                 | Y        |
-| Dependency Engine (done)  | Jiayi             | Y *      |
-| Python API (done)         | Longfei, Jiayi    | Y *      |
-| Executor (done)           | Tony              | Y *      |
-| Multi optimizer (woking)  | Longfei           | Y *      |
-| Optimizer with any para   | ?                 | Y *      |
-| Concat op (done)          | ?                 | N (Cond) |
-| Repmat op (done)          | ?                 | N (Cond) |
-
 <p align="center">
 <img src="./test.dot.png" width = "50%" align="center"/><br/>
 Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.
 </p>
 
+The operators, layers and functions required/optional to build a GAN demo is summarized in https://github.com/PaddlePaddle/Paddle/issues/4563.
 
 <p align="center">
 <img src="./dcgan.png" width = "90%" align="center"/><br/>

From 16b2f963b8a45175d2caaf1fcdc02df4cca8e6d3 Mon Sep 17 00:00:00 2001
From: zchen0211 <chenzhuoyuan07@gmail.com>
Date: Tue, 10 Oct 2017 14:08:45 -0700
Subject: [PATCH 78/82] gan

---
 doc/design/gan_api.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/doc/design/gan_api.md b/doc/design/gan_api.md
index 5764112f3c..fb41df8615 100644
--- a/doc/design/gan_api.md
+++ b/doc/design/gan_api.md
@@ -7,7 +7,7 @@ It applies several important concepts in machine learning system design, includi
 In our GAN design, we wrap it as a user-friendly easily customized python API to design different models. We take the conditional DC-GAN (Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks [https://arxiv.org/abs/1511.06434]) as an example due to its good performance on image generation.
 
 <p align="center">
-<img src="./test.dot.png" width = "50%" align="center"/><br/>
+<img src="./test.dot.png" width = "35%" align="center"/><br/>
 Figure 1. The overall running logic of GAN. The black solid arrows indicate the forward pass; the green dashed arrows indicate the backward pass of generator training; the red dashed arrows indicate the backward pass of the discriminator training. The BP pass of the green (red) arrow should only update the parameters in the green (red) boxes. The diamonds indicate the data providers. d\_loss and g\_loss marked in red and green are the two targets we would like to run.
 </p>
 

From 843ed8e3207583a8009526cbdac43e31e917de66 Mon Sep 17 00:00:00 2001
From: Yan Chunwei <yanchunwei@outlook.com>
Date: Tue, 10 Oct 2017 18:08:33 -0400
Subject: [PATCH 79/82] dynamic recurrent op forward c++ implentation (#4597)

---
 cmake/configure.cmake                         |   4 +
 paddle/framework/operator.h                   |   6 +-
 paddle/framework/tensor_array.h               |   4 +-
 paddle/operators/CMakeLists.txt               |   1 +
 paddle/operators/dynamic_recurrent_op.cc      | 276 ++++++++++++++++++
 paddle/operators/dynamic_recurrent_op.h       | 158 ++++++++++
 paddle/operators/dynamic_recurrent_op_test.cc | 222 ++++++++++++++
 7 files changed, 666 insertions(+), 5 deletions(-)
 create mode 100644 paddle/operators/dynamic_recurrent_op.cc
 create mode 100644 paddle/operators/dynamic_recurrent_op.h
 create mode 100644 paddle/operators/dynamic_recurrent_op_test.cc

diff --git a/cmake/configure.cmake b/cmake/configure.cmake
index c1c93e17fd..db8f5ab045 100644
--- a/cmake/configure.cmake
+++ b/cmake/configure.cmake
@@ -24,6 +24,10 @@ if(WITH_DOUBLE)
     add_definitions(-DPADDLE_TYPE_DOUBLE)
 endif(WITH_DOUBLE)
 
+if(WITH_TESTING)
+    add_definitions(-DPADDLE_WITH_TESTING)
+endif(WITH_TESTING)
+
 if(NOT WITH_TIMER)
     add_definitions(-DPADDLE_DISABLE_TIMER)
 endif(NOT WITH_TIMER)
diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h
index 1e9ace9987..15f80b5720 100644
--- a/paddle/framework/operator.h
+++ b/paddle/framework/operator.h
@@ -142,9 +142,9 @@ class OperatorBase {
 // Macro for define a clone method.
 // If you are writing an kernel operator, `Clone` will be defined when you
 // register it. i.e. `Clone` method is not needed to define by yourself.
-#define DEFINE_OP_CLONE_METHOD(cls)                       \
-  std::unique_ptr<OperatorBase> Clone() const final {     \
-    return std::unique_ptr<OperatorBase>(new cls(*this)); \
+#define DEFINE_OP_CLONE_METHOD(cls)                                            \
+  std::unique_ptr<::paddle::framework::OperatorBase> Clone() const final {     \
+    return std::unique_ptr<::paddle::framework::OperatorBase>(new cls(*this)); \
   }
 
 // Macro for define a default constructor for Operator.
diff --git a/paddle/framework/tensor_array.h b/paddle/framework/tensor_array.h
index 94a14c2df4..293da04997 100644
--- a/paddle/framework/tensor_array.h
+++ b/paddle/framework/tensor_array.h
@@ -87,12 +87,12 @@ class TensorArray {
   LoDTensor Stack() const;
 
   /*
-   * Unpacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors.
+   * Unstacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors.
    */
   void Unstack(const LoDTensor &source) const;
 
   /*
-   * Unpacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors,
+   * Unstacks the given division of a rank-`R` tensor into rank-`(R-1)` tensors,
    * with memory of tensors shared.
    */
   void UnstackShared(const LoDTensor &source) const;
diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt
index d132c1813e..7dae8fe2f9 100644
--- a/paddle/operators/CMakeLists.txt
+++ b/paddle/operators/CMakeLists.txt
@@ -133,3 +133,4 @@ cc_test(gather_test SRCS gather_test.cc DEPS tensor)
 cc_test(net_op_test SRCS net_op_test.cc DEPS net_op)
 cc_test(scatter_test SRCS scatter_test.cc DEPS tensor)
 cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory)
+cc_test(dynamic_recurrent_op_test SRCS dynamic_recurrent_op_test.cc DEPS dynamic_recurrent_op recurrent_op tensor_array)
diff --git a/paddle/operators/dynamic_recurrent_op.cc b/paddle/operators/dynamic_recurrent_op.cc
new file mode 100644
index 0000000000..b919aef8fb
--- /dev/null
+++ b/paddle/operators/dynamic_recurrent_op.cc
@@ -0,0 +1,276 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve .
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License. */
+
+#include "paddle/operators/dynamic_recurrent_op.h"
+
+#include "paddle/framework/op_registry.h"
+
+namespace paddle {
+namespace operators {
+
+using framework::Scope;
+using framework::TensorArray;
+using framework::LoDTensor;
+using framework::Variable;
+
+namespace detail {
+
+inline void CreateVariables(Scope& scope,
+                            const std::vector<std::string>& var_names) {
+  for (const auto& name : var_names) {
+    scope.NewVar(name);
+  }
+}
+
+}  // namespace detail
+
+class DynamicRecurrentOpProtoAndCheckerMaker
+    : public framework::OpProtoAndCheckerMaker {
+ public:
+  DynamicRecurrentOpProtoAndCheckerMaker(framework::OpProto* proto,
+                                         framework::OpAttrChecker* op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    const auto& name = DynamicRecurrentOp::kArgName;
+    // inputs and outputs stored in proto
+    AddInput(name.inlinks,
+             "the inputs that need to be segmented for each step.")
+        .AsDuplicable();
+    AddInput(name.boot_memories, "variables to initialize memories.")
+        .AsDuplicable();
+
+    AddOutput(name.outlinks, "the outputs that need to concated for all steps.")
+        .AsDuplicable();
+    AddOutput(name.step_scopes, "step scopes");
+
+    // Attributes stored in AttributeMap
+    AddAttr<std::vector<std::string>>(name.pre_memories,
+                                      "names of pre-memories");
+    AddAttr<std::vector<std::string>>(name.memories, "names of memories");
+
+    AddComment("This is a RNN operator for varience-length sequences.");
+  }
+};
+
+void DynamicRecurrentOp::Run(const Scope& scope,
+                             const platform::DeviceContext& dev_ctx) const {
+  cache_.Init(kArgName, *this, scope, &arg_);
+  SplitInputs();
+  CreateScopes();
+  WriteStepInputs();
+  InitStates();
+
+  // call stepnet in all the time steps
+  for (size_t step = 0; step < cache_.num_steps; step++) {
+    auto& step_scope = cache_.GetScope(step);
+    stepnet_->Run(step_scope, dev_ctx);
+  }
+
+  WriteStepOutputs();
+  ConcatOutputs();
+}
+
+void DynamicRecurrentOp::SplitInputs() const {
+  // TODO(superjom) make level a config
+  // TODO(superjom) check all the inputs has the same LoD
+  int level = 0;
+  const auto& inlinks = cache_.inlinks;
+  for (const auto& item : inlinks) {
+    const auto& var = item.second;
+    const auto& tensor = var->Get<LoDTensor>();
+    TensorArray& ta = step_inputs_[item.first];
+    dy_seq_metas_[item.first] =
+        ta.Unpack(tensor, level, true /*length_descend*/);
+
+    if (cache_.num_steps) {
+      PADDLE_ENFORCE_EQ(ta.size(), cache_.num_steps,
+                        "inputs should have the same steps");
+    } else {
+      cache_.num_steps = ta.size();
+    }
+  }
+}
+
+void DynamicRecurrentOp::WriteStepInputs() const {
+  for (const auto& item : cache_.inlinks) {
+    auto ta_it = step_inputs_.find(item.first);
+    PADDLE_ENFORCE(ta_it != step_inputs_.end(),
+                   "step_inputs_ not compatible with memory set");
+    TensorArray& ta = ta_it->second;
+    for (size_t step = 0; step < ta.size(); step++) {
+      auto tensor = ta.Read(step);
+      auto& step_scope = cache_.GetScope(step);
+      Variable* var = step_scope.FindVar(item.first);
+      if (var == nullptr) {
+        var = step_scope.NewVar(item.first);
+      }
+      var->GetMutable<LoDTensor>()->ShareDataWith<value_type>(tensor);
+    }
+  }
+}
+
+void DynamicRecurrentOp::WriteStepOutputs() const {
+  for (size_t step = 0; step < cache_.scopes->size(); step++) {
+    auto& scope = cache_.GetScope(step);
+    for (auto& item : step_outputs_) {
+      auto* var = scope.FindVar(item.first);
+      if (var == nullptr) {
+        var = scope.NewVar(item.first);
+      }
+      auto* tensor = var->GetMutable<LoDTensor>();
+      item.second.WriteShared(step, *tensor);
+    }
+  }
+}
+
+void DynamicRecurrentOp::CreateScopes() const {
+  PADDLE_ENFORCE_GT(cache_.num_steps, 0);
+  // resize scopes
+  size_t num_scopes_need_create = cache_.num_steps - cache_.scopes->size();
+  for (size_t i = 0; i < num_scopes_need_create; i++) {
+    cache_.scopes->emplace_back(&cache_.scope->NewScope());
+  }
+
+  // init temporary inputs
+  PADDLE_ENFORCE_NOT_NULL(stepnet_, "stepnet should be set first");
+  std::vector<std::string> memories;
+  std::vector<std::string> pre_memories;
+  std::transform(arg_.memories.begin(), arg_.memories.end(),
+                 std::back_inserter(memories),
+                 [](const rnn::MemoryAttr& m) { return m.var; });
+  std::transform(arg_.memories.begin(), arg_.memories.end(),
+                 std::back_inserter(pre_memories),
+                 [](const rnn::MemoryAttr& m) { return m.pre_var; });
+
+  for (size_t step = 0; step < cache_.num_steps; step++) {
+    auto& scope = cache_.GetScope(step);
+    detail::CreateVariables(scope, arg_.inlinks);
+    detail::CreateVariables(scope, arg_.outlinks);
+    detail::CreateVariables(scope, memories);
+    detail::CreateVariables(scope, pre_memories);
+  }
+}
+
+void DynamicRecurrentOp::ConcatOutputs() const {
+  // TODO(superjom) transform this to a config
+  int level = 0;
+  // TODO(superjom) pass in some lod
+  // just a placeholder
+  framework::LoD lod;
+  for (auto& item : step_outputs_) {
+    auto tensor = item.second.Pack(level, dy_seq_metas_[item.first], lod);
+    auto& output = cache_.outlinks[item.first]->Get<LoDTensor>();
+    const_cast<LoDTensor*>(&output)->ShareDataWith<value_type>(tensor);
+  }
+}
+
+void DynamicRecurrentOp::InitStates() const {
+  // init the first state
+  // TODO(superjom) parepare the scenerio that boot state not exists
+  for (auto memory : arg_.memories) {
+    auto* boot_state_var = cache_.scope->FindVar(memory.boot_var);
+    PADDLE_ENFORCE_NOT_NULL(boot_state_var);
+    auto& boot_state = boot_state_var->Get<LoDTensor>();
+    const auto& dims = boot_state.dims();
+
+    for (size_t step = 0; step < cache_.num_steps; step++) {
+      auto& cur_scope = cache_.GetScope(step);
+      // link pre-state to boot_state
+      // init state and pre-state
+      auto* pre_state = cur_scope.FindVar(memory.pre_var);
+      PADDLE_ENFORCE_NOT_NULL(pre_state);
+      pre_state->GetMutable<LoDTensor>();
+
+      auto* state = cur_scope.FindVar(memory.var);
+      PADDLE_ENFORCE_NOT_NULL(state);
+      state->GetMutable<LoDTensor>()->Resize(dims);
+      state->GetMutable<LoDTensor>()->mutable_data<value_type>(
+          platform::CPUPlace());
+
+      if (step == 0) {
+        auto* pre_state_tensor = pre_state->GetMutable<LoDTensor>();
+        pre_state_tensor->Resize(boot_state.dims());
+        pre_state_tensor->ShareDataWith<value_type>(boot_state);
+      } else {
+        auto& pre_scope = cache_.GetScope(step - 1);
+        auto* state_pre = pre_scope.FindVar(memory.var);
+        PADDLE_ENFORCE_NOT_NULL(state_pre);
+        pre_state->GetMutable<LoDTensor>()->ShareDataWith<value_type>(
+            *state_pre->GetMutable<LoDTensor>());
+      }
+    }
+  }
+}
+
+void DynamicRecurrentOp::ArgCache::Init(
+    const rnn::ArgumentName& name, const paddle::framework::OperatorBase& op,
+    const paddle::framework::Scope& scope, rnn::Argument* arg) {
+  this->scope = &scope;
+  InitArgument(name, op, arg);
+  CacheScopes(scope, *arg);
+  CacheInlinks(scope, arg->inlinks);
+  CacheOutlinks(scope, arg->outlinks);
+}
+
+void DynamicRecurrentOp::ArgCache::InitArgument(const rnn::ArgumentName& name,
+                                                const OperatorBase& op,
+                                                rnn::Argument* arg) {
+  rnn::InitArgument(name, arg, op, false /*is_grad*/);
+}
+
+void DynamicRecurrentOp::ArgCache::CacheScopes(const Scope& scope,
+                                               const rnn::Argument& arg) {
+  auto scopes_var = scope.FindVar(arg.step_scopes);
+  PADDLE_ENFORCE(scopes_var != nullptr,
+                 "the step_scopes output argument [%s] should be created first "
+                 "by framework.",
+                 arg.step_scopes);
+  this->scopes = scopes_var->GetMutable<std::vector<Scope*>>();
+}
+
+void DynamicRecurrentOp::ArgCache::CacheInlinks(
+    const Scope& scope, const std::vector<std::string>& names) {
+  for (auto name : names) {
+    auto* var = GetVariable(scope, name);
+    inlinks[name] = var;
+  }
+}
+
+void DynamicRecurrentOp::ArgCache::CacheOutlinks(
+    const Scope& scope, const std::vector<std::string>& names) {
+  for (auto name : names) {
+    auto* var = GetVariable(scope, name);
+    outlinks[name] = var;
+  }
+}
+
+Variable* DynamicRecurrentOp::ArgCache::GetVariable(const Scope& scope,
+                                                    const std::string& name) {
+  auto* var = scope.FindVar(name);
+  PADDLE_ENFORCE_NOT_NULL(var, "variable [%s] not exist in scope", name);
+  return var;
+}
+
+const rnn::ArgumentName DynamicRecurrentOp::kArgName{
+    "step_net", "step_scopes",  "inlinks",      "outlinks",
+    "memories", "pre_memories", "boot_memories"};
+
+void DynamicRecurrentGradientOp::Run(
+    const Scope& scope, const platform::DeviceContext& dev_ctx) const {}
+
+}  // namespace operators
+}  // namespace paddle
+
+REGISTER_OP_WITHOUT_GRADIENT(
+    dynamic_recurrent, paddle::operators::DynamicRecurrentOp,
+    paddle::operators::DynamicRecurrentOpProtoAndCheckerMaker);
diff --git a/paddle/operators/dynamic_recurrent_op.h b/paddle/operators/dynamic_recurrent_op.h
new file mode 100644
index 0000000000..6a2970f27f
--- /dev/null
+++ b/paddle/operators/dynamic_recurrent_op.h
@@ -0,0 +1,158 @@
+/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License. */
+
+#pragma once
+
+#ifdef PADDLE_WITH_TESTING
+#include "gtest/gtest.h"
+#endif
+
+#include "paddle/framework/lod_tensor.h"
+#include "paddle/framework/operator.h"
+#include "paddle/framework/tensor_array.h"
+#include "paddle/framework/variable.h"
+#include "paddle/operators/rnn/recurrent_op_utils.h"
+
+namespace paddle {
+namespace operators {
+
+class DynamicRecurrentOp : public framework::OperatorBase {
+ public:
+  static const rnn::ArgumentName kArgName;
+  using value_type = float;
+
+  DynamicRecurrentOp(const std::string& type,
+                     const framework::VariableNameMap& inputs,
+                     const framework::VariableNameMap& outputs,
+                     const framework::AttributeMap& attrs)
+      : OperatorBase(type, inputs, outputs, attrs) {}
+
+  DynamicRecurrentOp(const DynamicRecurrentOp& o)
+      : framework::OperatorBase(
+            static_cast<const framework::OperatorBase&>(o)) {
+    // TODO(yuyang18): Implement copy ctor well.
+    PADDLE_THROW("Not implemented");
+  }
+
+  void Run(const framework::Scope& scope,
+           const platform::DeviceContext& dev_ctx) const override;
+
+  /*
+   * Split the inputs(LoDTensors) to segments for each time step.
+   */
+  void SplitInputs() const;
+
+  /*
+   * Create step-scopes to store temporary outputs in each time steps.
+   */
+  void CreateScopes() const;
+
+  /*
+   * Link TensorArray steps to the corresponding variables located in
+   * step-scopes.
+   */
+  void WriteStepInputs() const;
+
+  /*
+   * Write output of each step to the corresponding TensorArray.
+   */
+  void WriteStepOutputs() const;
+
+  /*
+   * Initialize the states, each state will have a corresponding pre-state,
+   * which share the memory with the state in the previous time state. The
+   * pre-state in the first time step will be initialized with an zero tensor or
+   * a tensor in parent scope if is provided.
+   */
+  void InitStates() const;
+
+  /*
+   * Concatenate outputs in each time step and generate a LoDTensor.
+   */
+  void ConcatOutputs() const;
+
+  /*
+   * set a stepnet that is created according to a RecurrentOp's stepnet.
+   */
+  void SetStepNet(std::unique_ptr<OperatorBase> net) {
+    PADDLE_ENFORCE_NOT_NULL(net);
+    stepnet_ = std::move(net);
+  }
+  const OperatorBase& GetStepNet() const { return *stepnet_; }
+
+ protected:
+  struct ArgCache {
+    framework::Scope const* scope;
+    std::vector<framework::Scope*>* scopes;
+    std::map<std::string, framework::Variable*> inlinks;
+    std::map<std::string, framework::Variable*> outlinks;
+
+    size_t num_steps{0};
+
+    void Init(const rnn::ArgumentName& name, const OperatorBase& op,
+              const framework::Scope& scope, rnn::Argument* arg);
+
+    framework::Scope& GetScope(size_t index) {
+      PADDLE_ENFORCE_LT(index, num_steps);
+      return *scopes->at(index);
+    }
+
+   private:
+    void InitArgument(const rnn::ArgumentName& name, const OperatorBase& op,
+                      rnn::Argument* arg);
+    void CacheScopes(const framework::Scope& scope, const rnn::Argument& arg);
+    void CacheInlinks(const framework::Scope& scope,
+                      const std::vector<std::string>& names);
+    void CacheOutlinks(const framework::Scope& scope,
+                       const std::vector<std::string>& names);
+    framework::Variable* GetVariable(const framework::Scope& scope,
+                                     const std::string& name);
+  };
+
+ private:
+  std::unique_ptr<OperatorBase> stepnet_;
+  mutable framework::TensorArray states_;
+  mutable std::map<std::string, framework::TensorArray> step_inputs_;
+  mutable std::map<std::string, framework::TensorArray> step_outputs_;
+  mutable std::map<std::string, std::vector<framework::DySeqMeta>>
+      dy_seq_metas_;
+  mutable rnn::Argument arg_;
+  mutable ArgCache cache_;
+
+#ifdef PADDLE_WITH_TESTING
+  friend class DynamicRecurrentOpTestHelper;
+  FRIEND_TEST(DynamicRecurrentOpTestHelper, SplitInputs);
+  FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateCache);
+  FRIEND_TEST(DynamicRecurrentOpTestHelper, CreateScopes);
+  FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepInputs);
+  FRIEND_TEST(DynamicRecurrentOpTestHelper, WriteStepOutputs);
+  FRIEND_TEST(DynamicRecurrentOpTestHelper, InitStates);
+  FRIEND_TEST(DynamicRecurrentOpTestHelper, ConcatOutputs);
+#endif
+};
+
+class DynamicRecurrentGradientOp : public framework::OperatorBase {
+ public:
+  DynamicRecurrentGradientOp(const std::string& type,
+                             const framework::VariableNameMap& inputs,
+                             const framework::VariableNameMap& outputs,
+                             const framework::AttributeMap& attrs)
+      : OperatorBase(type, inputs, outputs, attrs) {}
+
+  void Run(const framework::Scope& scope,
+           const platform::DeviceContext& dev_ctx) const override;
+};
+
+}  // namespace operators
+}  // namespace paddle
diff --git a/paddle/operators/dynamic_recurrent_op_test.cc b/paddle/operators/dynamic_recurrent_op_test.cc
new file mode 100644
index 0000000000..675a7890f3
--- /dev/null
+++ b/paddle/operators/dynamic_recurrent_op_test.cc
@@ -0,0 +1,222 @@
+#include "paddle/operators/dynamic_recurrent_op.h"
+
+#include <gtest/gtest.h>
+
+#include "paddle/framework/ddim.h"
+#include "paddle/framework/lod_tensor.h"
+#include "paddle/framework/op_desc.h"
+#include "paddle/framework/op_registry.h"
+#include "paddle/operators/net_op.h"
+
+namespace paddle {
+namespace operators {
+
+using framework::Scope;
+using framework::TensorArray;
+using framework::LoDTensor;
+using framework::Variable;
+
+class TestOp : public framework::OperatorBase {
+ public:
+  using framework::OperatorBase::OperatorBase;
+  DEFINE_OP_CLONE_METHOD(TestOp);
+  void Run(const Scope& scope,
+           const platform::DeviceContext& dev_ctx) const override {}
+};
+
+void OpDescNewVar(const std::string& param_name,
+                  std::initializer_list<const char*> arguments,
+                  paddle::framework::OpDesc::Var* var) {
+  var->set_parameter(param_name);
+  for (auto& arg_name : arguments) {
+    var->add_arguments(arg_name);
+  }
+}
+
+// create a LoD tensor in scope with specific dims
+LoDTensor* CreateVar(Scope& scope, std::string name, framework::DDim dims,
+                     const platform::Place& place) {
+  auto* var = scope.NewVar(name);
+  auto* tensor = var->GetMutable<LoDTensor>();
+  tensor->Resize(dims);
+  tensor->mutable_data<float>(place);
+  return tensor;
+}
+
+class DynamicRecurrentOpTestHelper : public ::testing::Test {
+ protected:
+  const rnn::ArgumentName argname = DynamicRecurrentOp::kArgName;
+
+  virtual void SetUp() override {
+    CreateGlobalVariables();
+
+    auto op_desc = CreateOpDesc();
+    op = paddle::framework::OpRegistry::CreateOp(op_desc);
+    dop = dynamic_cast<DynamicRecurrentOp*>(op.get());
+    InitCacheManually();
+    InitStepNet();
+  }
+
+  framework::OpDesc CreateOpDesc() {
+    // create op
+    paddle::framework::OpDesc op_desc;
+    op_desc.set_type("dynamic_recurrent");
+
+    OpDescNewVar(argname.inlinks, {"in0"}, op_desc.add_inputs());
+    OpDescNewVar(argname.boot_memories, {"boot_mem"}, op_desc.add_inputs());
+    OpDescNewVar(argname.step_scopes, {"step_scopes"}, op_desc.add_outputs());
+    OpDescNewVar(argname.outlinks, {"out0"}, op_desc.add_outputs());
+
+    // set pre-memories
+    auto pre_memories = op_desc.mutable_attrs()->Add();
+    pre_memories->set_name(argname.pre_memories);
+    pre_memories->set_type(paddle::framework::AttrType::STRINGS);
+    auto pre_memories_item = pre_memories->add_strings();
+    *pre_memories_item = "mem@pre";
+
+    // set memories
+    auto memories = op_desc.mutable_attrs()->Add();
+    memories->set_name(argname.memories);
+    memories->set_type(paddle::framework::AttrType::STRINGS);
+    auto memories_item = memories->add_strings();
+    *memories_item = "mem";
+    return op_desc;
+  }
+
+  void CreateGlobalVariables() {
+    platform::CPUPlace place;
+    scope.NewVar("step_scopes");
+    CreateVar(scope, "boot_mem", framework::make_ddim({10, 20}), place);
+    // auto* out0 =
+    CreateVar(scope, "out0", framework::make_ddim({10, 20}), place);
+    auto* in0 = CreateVar(scope, "in0", framework::make_ddim({10, 8}), place);
+    // 10 instanes with 4 sentences, length is 4, 3, 2, 1 respectively.
+    framework::LoD in0_lod(1);
+    for (int x : std::vector<int>{0, 4, 7, 9, 10}) {
+      in0_lod[0].push_back(x);
+    }
+    in0->set_lod(in0_lod);
+    in0->Resize(framework::make_ddim({10, 8}));
+    // set the content, each sentence content is seqid.batchid
+    // the seqid starts from 0
+    int start = 0;
+    for (size_t seqid = 0; seqid < in0_lod.size() - 1; seqid++) {
+      for (size_t batchid = 0;
+           batchid < in0_lod[0][seqid + 1] - in0_lod[0][seqid]; batchid++) {
+        float v = seqid + batchid * 0.1;
+
+        for (size_t dim = 0; dim < 8; dim++) {
+          in0->data<float>()[start * 8 + dim] = v;
+        }
+        start++;
+      }
+    }
+  }
+
+  void InitCacheManually() {
+    dop->cache_.Init(DynamicRecurrentOp::kArgName, *dop, scope, &dop->arg_);
+  }
+
+  void InitStepNet() {
+    std::unique_ptr<framework::OperatorBase> stepnet{new NetOp};
+    dynamic_cast<NetOp*>(stepnet.get())
+        ->AppendOp(std::unique_ptr<TestOp>(new TestOp(
+            "test", {{"inlinks", {"in0"}}, {"boot_memories", {"boot_mem"}}},
+            {{"outlinks", {"out0"}}, {"step_scopes", {"step_scopes"}}}, {})));
+    dop->SetStepNet(std::move(stepnet));
+  }
+
+ protected:
+  DynamicRecurrentOp* dop;
+  std::unique_ptr<framework::OperatorBase> op;
+  paddle::platform::CPUDeviceContext device_context;
+  paddle::framework::Scope scope;
+};
+
+TEST_F(DynamicRecurrentOpTestHelper, CreateCache) {
+  const rnn::Argument& arg = dop->arg_;
+  ASSERT_EQ(arg.inlinks.size(), 1UL);
+  ASSERT_EQ(arg.outlinks.size(), 1UL);
+}
+
+TEST_F(DynamicRecurrentOpTestHelper, SplitInputs) {
+  dop->SplitInputs();
+  auto& in0_ta = dop->step_inputs_["in0"];
+  ASSERT_EQ(in0_ta.size(), 4UL);
+
+  const auto& batch0 = in0_ta.Read(0);
+  const auto& batch1 = in0_ta.Read(1);
+  const auto& batch2 = in0_ta.Read(2);
+  const auto& batch3 = in0_ta.Read(3);
+  EXPECT_EQ(batch0.dims()[0], 4);
+  EXPECT_EQ(batch1.dims()[0], 3);
+  EXPECT_EQ(batch2.dims()[0], 2);
+  EXPECT_EQ(batch3.dims()[0], 1);
+}
+
+TEST_F(DynamicRecurrentOpTestHelper, CreateScopes) {
+  dop->SplitInputs();
+  dop->CreateScopes();
+  ASSERT_EQ(dop->cache_.num_steps, 4UL);
+  ASSERT_EQ(dop->cache_.scopes->size(), 4UL);
+}
+
+TEST_F(DynamicRecurrentOpTestHelper, WriteStepInputs) {
+  dop->SplitInputs();
+  dop->CreateScopes();
+  dop->WriteStepInputs();
+
+  for (size_t step = 0; step < dop->cache_.num_steps; step++) {
+    auto& scope = dop->cache_.GetScope(step);
+    for (auto name : std::vector<std::string>({"in0"})) {
+      ASSERT_TRUE(scope.FindVar(name) != nullptr);
+    }
+  }
+}
+
+TEST_F(DynamicRecurrentOpTestHelper, WriteStepOutputs) {
+  dop->SplitInputs();
+  dop->CreateScopes();
+  dop->WriteStepInputs();
+  dop->WriteStepOutputs();
+
+  for (size_t step = 0; step < dop->cache_.num_steps; step++) {
+    auto& scope = dop->cache_.GetScope(step);
+    for (auto name : std::vector<std::string>({"out0"})) {
+      ASSERT_TRUE(scope.FindVar(name));
+    }
+  }
+}
+
+TEST_F(DynamicRecurrentOpTestHelper, ConcatOutputs) {
+  // Let's leave this test to python unittest.
+}
+
+TEST_F(DynamicRecurrentOpTestHelper, InitStates) {
+  dop->SplitInputs();
+  dop->CreateScopes();
+  dop->WriteStepInputs();
+  dop->WriteStepOutputs();
+  dop->InitStates();
+
+  for (size_t step = 0; step < dop->cache_.num_steps; step++) {
+    auto& scope = dop->cache_.GetScope(step);
+    auto state = scope.FindVar("mem");
+    ASSERT_TRUE(state != nullptr);
+
+    auto* pre_state = scope.FindVar("mem@pre");
+    ASSERT_TRUE(pre_state != nullptr);
+
+    auto* boot_state = scope.FindVar("boot_mem");
+    ASSERT_TRUE(boot_state != nullptr);
+
+    if (step == 0) {
+      // check pre_state is a reference of boot_state
+      ASSERT_EQ(boot_state->Get<LoDTensor>().data<float>(),
+                pre_state->Get<LoDTensor>().data<float>());
+    }
+  }
+}
+
+}  // operators
+}  // namespace paddle

From d1479d930eb2067ae08b63f3ad41706cc7265dc8 Mon Sep 17 00:00:00 2001
From: Abhinav Arora <abhinavarora28@gmail.com>
Date: Tue, 10 Oct 2017 16:57:50 -0700
Subject: [PATCH 80/82] Fixing errors in the refactorization doc (#4680)

---
 doc/design/refactorization.md | 45 ++++++++++++++++++-----------------
 1 file changed, 23 insertions(+), 22 deletions(-)

diff --git a/doc/design/refactorization.md b/doc/design/refactorization.md
index 629422e774..ec51aa1a0e 100644
--- a/doc/design/refactorization.md
+++ b/doc/design/refactorization.md
@@ -17,22 +17,22 @@ The goals of refactoring include:
 
 1. A graph is composed of *variables* and *operators*.
 
-1. The description of graphs must be capable of being serialized/deserialized, so that:
+1. The description of graphs must be serializable/deserializable, so that:
 
-   1. It can to be sent to the cloud for distributed execution, and
+   1. It can be sent to the cloud for distributed execution, and
    1. It can be sent to clients for mobile or enterprise deployment.
 
-1. The Python program does the following steps
+1. The Python program does two things
 
-   1. *compilation*: run a Python program to generate a protobuf message representation of the graph and send it to
+   1. *Compilation* runs a Python program to generate a protobuf message representation of the graph and send it to
       1. the C++ library `libpaddle.so` for local execution,
       1. the master process of a distributed training job for training, or
       1. the server process of a Kubernetes serving job for distributed serving.
-   1. *execution*: execute the graph by constructing instances of class [`Variable`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24) and [`OperatorBase`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L70), according to the protobuf message.
+   1. *Execution* executes the graph by constructing instances of class [`Variable`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/variable.h#L24) and [`OperatorBase`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L70), according to the protobuf message.
 
 ## Description and Realization of Computation Graph
 
-At compile time, the Python program generates a protobuf message representation of the graph, or the description of the graph.
+At compile time, the Python program generates a protobuf message representation of the graph, or a description of the graph.
 
 At runtime, the C++ program realizes the graph and runs it.
 
@@ -42,11 +42,11 @@ At runtime, the C++ program realizes the graph and runs it.
 |Operation|[OpDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L35)|[Operator](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L64)|
 |Block|BlockDesc|Block|
 
-The word *graph* is interchangeable with *block* in this document.  A graph represents computation steps and local variables similar to a C++/Java program block, or a pair of parentheses(`{` and `}`).
+The word *graph* is interchangeable with *block* in this document.  A graph consists of computation steps and local variables similar to a C++/Java program block, or a pair of parentheses(`{` and `}`).
 
 ## Compilation and Execution
 
-1. Run an application Python program to describe the graph.  In particular, the Python application program does the following:
+1. Run a Python program to describe the graph.  In particular, the Python application program does the following:
 
    1. Create `VarDesc` to represent local/intermediate variables,
    1. Create operators and set attributes,
@@ -54,10 +54,10 @@ The word *graph* is interchangeable with *block* in this document.  A graph repr
    1. Infer the type and the shape of variables,
    1. Plan memory-reuse for variables,
    1. Generate the backward graph
-   1. Optimize the computation graph.
-   1. Potentially, split the graph for distributed training.
+   1. Add optimization operators to the computation graph.
+   1. Optionally, split the graph for distributed training.
 
-1. The invocation of `train` or [`infer`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/inference.py#L108) methods in the application Python program does the following:
+1. The invocation of `train` or [`infer`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/inference.py#L108) methods in the Python program does the following:
 
    1. Create a new Scope instance in the [scope hierarchy](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md) for each run of a block,
       1. realize local variables defined in the BlockDesc message in the new scope,
@@ -107,8 +107,8 @@ Compile Time -> IR -> Runtime
 ![class_diagram](http://api.paddlepaddle.org/graphviz?dot=https://gist.githubusercontent.com/reyoung/53df507f6749762675dff3e7ce53372f/raw/dd598e8f1976f5759f58af5e5ef94738a6b2e661/op.dot)
 
 * `Operator` is the fundamental building block of the user interface.
-    * Operator stores input/output variable names, and attributes.
-    * The `InferShape` interface is used to infer the shape of the output variable shapes based on the shapes of the input variables.
+    * Operator stores input/output variable names and attributes.
+    * The `InferShape` interface is used to infer the shape of the output variables based on the shapes of the input variables.
     * Use `Run` to compute the `output` variables from the `input` variables.
 
 ---
@@ -139,7 +139,7 @@ Compile Time -> IR -> Runtime
     * Limit the number of `tensor.device(dev) = ` in your code.
 * `thrust::transform` and `std::transform`.
     * `thrust` has the same API as C++ standard library. Using `transform`, one can quickly implement customized element-wise kernels.
-    * `thrust` also has more complex APIs, like `scan`, `reduce`, `reduce_by_key`.
+    * `thrust`, in addition, supports more complex APIs, like `scan`, `reduce`, `reduce_by_key`.
 * Hand-writing `GPUKernel` and `CPU` code
     * Do not write in header (`.h`) files. CPU Kernel should be in cpp source (`.cc`) and GPU kernels should be in cuda (`.cu`) files. (GCC cannot compile GPU code.)
 ---
@@ -185,10 +185,10 @@ Make sure the registration process is executed and linked.
 1. Write an Op class and its gradient Op class, if required.
 2. Write an Op maker class. In the constructor of this class, describe the inputs, outputs and attributes of the operator.
 3. Invoke the macro `REGISTER_OP`. This macro will
-	1. Call maker class to complete the `proto` and the `checker`
+	1. Call maker class to complete `proto` and `checker`
 	2. Using the completed `proto` and `checker`, it will add a new key-value pair to the `OpInfoMap`
 
-4. Invoke the `USE` macro in which the Op is used, to make sure that it is linked.
+4. Invoke the `USE` macro in which the Op is used to make sure that it is linked.
 
 ---
 # Backward Module (1/2)
@@ -199,13 +199,14 @@ Make sure the registration process is executed and linked.
 ---
 # Backward Module (2/2)
 ### Build Backward Network
-- **Input**: graph of forward operators
-- **Output**: graph of backward operators
+- **Input**: a graph of forward operators
+- **Output**: a graph of backward operators
 - **Corner cases in construction**
 	- Shared Variables => insert an `Add` operator to combine gradients
 	- No Gradient => insert a `fill_zero_grad` operator
 	- Recursive NetOp => call `Backward` recursively
 	- RNN Op => recursively call `Backward` on stepnet
+	- RNN Op => recursively call `Backward` on stepnet
 
 
 ---
@@ -215,10 +216,10 @@ Make sure the registration process is executed and linked.
 	* Only dims and data pointers are stored in `Tensor`.
 	* All operations on `Tensor` are written in `Operator` or global functions.
 	* Variable length Tensor design [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md)
-* `Variable` instances are the inputs and the outputs of an operator. Not just `Tensor`.
+* `Variable` instances are the inputs and the outputs of an operator, not just `Tensor`.
 	* `step_scopes` in RNN is a variable and not a tensor.
-* `Scope` is where variables are stores.
-	* map<string `variable_name`, Variable>
+* `Scope` is where variables are stored.
+	* map<string `var name`, Variable>
 	* `Scope` has a hierarchical structure. The local scope can get variables from its parent scope.
 
 ---
@@ -246,7 +247,7 @@ Make sure the registration process is executed and linked.
 ---
 # Control the migration quality
 - Compare the performance of migrated models with old ones.
-- Follow the google C++ style
+- Follow the google C++ style guide.
 - Build the automatic workflow of generating Python/C++ documentations.
   - The documentation of layers and ops should be written inside the code.
   - Take the documentation quality into account when submitting pull requests.

From 0a74fed181bea14e6b797e59261631556401d29b Mon Sep 17 00:00:00 2001
From: kavyasrinet <kavyasrinet@baidu.com>
Date: Tue, 10 Oct 2017 17:08:01 -0700
Subject: [PATCH 81/82] Correcting few mistakes in the block doc (#4681)

---
 doc/design/block.md | 74 ++++++++++++++++++++++-----------------------
 1 file changed, 36 insertions(+), 38 deletions(-)

diff --git a/doc/design/block.md b/doc/design/block.md
index 4d5dd4ba95..9c812732d6 100644
--- a/doc/design/block.md
+++ b/doc/design/block.md
@@ -5,12 +5,12 @@
 Both deep learning systems and programming languages help users describe computation procedures.  These systems use various representations of computation:
 
 - Caffe, Torch, and Paddle: sequences of layers.
-- TensorFlow, Caffe2, Mxnet: graphs of operators.
+- TensorFlow, Caffe2, Mxnet: graph of operators.
 - PaddlePaddle: nested blocks, like C++ and Java programs.
 
 ## Block in Programming Languages and Deep Learning
 
-In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions, or operators.
+In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions or operators.
 
 Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning:
 
@@ -24,14 +24,14 @@ A key difference is that a C++ program describes a one pass computation, whereas
 
 ## Stack Frames and the Scope Hierarchy
 
-The existence of the backward makes the execution of a block of traditional programs and PaddlePaddle different to each other:
+The existence of the backward pass makes the execution of a block of PaddlePaddle different from traditional programs:
 
-| programming languages | PaddlePaddle                  |
-|-----------------------|-------------------------------|
-| stack                 | scope hierarchy               |
-| stack frame           | scope                         |
-| push at entering block| push at entering block        |
-| pop at leaving block  | destroy at minibatch completes|
+| programming languages | PaddlePaddle                    |
+|-----------------------|---------------------------------|
+| stack                 | scope hierarchy                 |
+| stack frame           | scope                           |
+| push at entering block| push at entering block          |
+| pop at leaving block  | destroy when minibatch completes|
 
 1. In traditional programs:
 
@@ -42,9 +42,9 @@ The existence of the backward makes the execution of a block of traditional prog
 1. In PaddlePaddle
 
    - When the execution enters a block, PaddlePaddle adds a new scope, where it realizes variables.
-   - PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are to be used by the backward pass.  So it has a stack forest known as a *scope hierarchy*.
+   - PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are used by the backward pass.  So it has a stack forest known as a *scope hierarchy*.
    - The height of the highest tree is the maximum depth of nested blocks.
-   - After the process of a minibatch, PaddlePaddle destroys the scope hierarchy.
+   - After the processing of a minibatch, PaddlePaddle destroys the scope hierarchy.
 
 ## Use Blocks in C++ and PaddlePaddle Programs
 
@@ -94,14 +94,14 @@ with ie.false_block():
 o1, o2 = ie(cond)
 ```
 
-In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `x+1` and `fc(x)`.
+In both examples, the left branch computes `x+y` and `softmax(x+y)`, the right branch computes `fc(x)` and `x+1` .
 
-A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances.  The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values.
+The difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances.
 
 
 ### Blocks with `for` and `RNNOp`
 
-The following RNN model from the [RNN design doc](./rnn.md)
+The following RNN model in PaddlePaddle from the [RNN design doc](./rnn.md) :
 
 ```python
 x = sequence([10, 20, 30]) # shape=[None, 1]
@@ -112,9 +112,9 @@ U = var(0.375, param=true) # shape=[1]
 rnn = pd.rnn()
 with rnn.step():
   h = rnn.memory(init = m)
-  hh = rnn.previous_memory(h)
+  h_prev = rnn.previous_memory(h)
   a = layer.fc(W, x)
-  b = layer.fc(U, hh)  
+  b = layer.fc(U, h_prev)  
   s = pd.add(a, b)
   act = pd.sigmoid(s)
   rnn.update_memory(h, act)
@@ -147,9 +147,9 @@ for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) {
 
 ## Compilation and Execution
 
-Like TensorFlow programs, a PaddlePaddle program is written in Python.  The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference.
+Like TensorFlow, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest executes the message for training or inference.
 
-The generation of this protobuf message is like what a compiler generates a binary executable file.  The execution of the message that the OS executes the binary file.
+The generation of this protobuf message is similar to how a compiler generates a binary executable file. The execution of the message is similar to how the OS executes the binary file.
 
 ## The "Binary Executable File Format"
 
@@ -186,8 +186,8 @@ Also, the RNN operator in above example is serialized into a protobuf message of
 
 ```
 OpDesc {
-  inputs = {0} // the index of x
-  outputs = {5, 3} // indices of act and hidden_out
+  inputs = {0} // the index of x in vars of BlockDesc above
+  outputs = {5, 3} // indices of act and hidden_out in vars of BlockDesc above
   attrs {
     "memories" : {1} // the index of h
     "step_net" : <above step net>
@@ -203,14 +203,14 @@ This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing
 During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator).
 
 VarDesc in a block should have its name scope to avoid local variables affect parent block's name scope.
-Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example
+Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example:
 
 ```python
-a = pd.Varaible(shape=[20, 20])
+a = pd.Variable(shape=[20, 20])
 b = pd.fc(a, params=["fc.w", "fc.b"])
 
 rnn = pd.create_rnn()
-with rnn.stepnet()
+with rnn.stepnet():
     x = a.as_step_input()
     # reuse fc's parameter
     fc_without_b = pd.get_variable("fc.w")
@@ -218,17 +218,17 @@ with rnn.stepnet()
 
 out = rnn()
 ```
-the method `pd.get_variable` can help retrieve a Variable by a name, a Variable may store in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance.
+The method `pd.get_variable` can help retrieve a Variable by the name. The Variable may be stored in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance.
 
 In compiler design, the symbol table is a data structure created and maintained by compilers to store information about the occurrence of various entities such as variable names, function names, classes, etc.
 
 To store the definition of variables and operators, we define a C++ class `SymbolTable`, like the one used in compilers.
 
-`SymbolTable` can do the following stuff:
+`SymbolTable` can do the following:
 
 - store the definitions (some names and attributes) of variables and operators,
-- to verify if a variable was declared,
-- to make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers).
+- verify if a variable was declared,
+- make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers).
 
 
 ```c++
@@ -240,19 +240,18 @@ class SymbolTable {
 
   OpDesc* NewOp(const string& name="");
 
-  // TODO determine whether name is generated by python or C++
-  // currently assume that a unique name will be generated by C++ if the
-  // argument name left default.
+  // TODO determine whether name is generated by python or C++.
+  // Currently assume that a unique name will be generated by C++ if the
+  // argument name is left default.
   VarDesc* NewVar(const string& name="");
 
-  // find a VarDesc by name, if recursive true, find parent's SymbolTable
+  // find a VarDesc by name, if recursive is true, find parent's SymbolTable
   // recursively.
   // this interface is introduced to support InferShape, find protobuf messages
   // of variables and operators, pass pointers into InferShape.
-  // operator
   //
   // NOTE maybe some C++ classes such as VarDescBuilder and OpDescBuilder should
-  // be proposed and embedded into pybind to enable python operate on C++ pointers.
+  // be proposed and embedded into pybind to enable python operation on C++ pointers.
   VarDesc* FindVar(const string& name, bool recursive=true);
 
   OpDesc* FindOp(const string& name);
@@ -270,7 +269,7 @@ class SymbolTable {
 After all the description of variables and operators is added into SymbolTable,
 the block has enough information to run.
 
-The `Block` class takes a `BlockDesc` as input, and provide `Run` and `InferShape` functions.
+The `Block` class takes a `BlockDesc` as input, and provides `Run` and `InferShape` functions.
 
 
 ```c++
@@ -302,7 +301,7 @@ public:
   void CreateVariables(const framework::Scope& scope);
   void CreateOperators();
 
-  // some other necessary interfaces of NetOp are list below
+  // some other necessary interfaces of NetOp are listed below
   // ...
 
 private:
@@ -316,15 +315,14 @@ private:
 Block inherits from OperatorBase, which has a Run method.
 Block's Run method will run its operators sequentially.
 
-There is another important interface called `Eval`, which take some arguments called targets, and generate a minimal graph which takes targets as the end points and creates a new Block,
-after `Run`, `Eval` will get the latest value and return the targets.
+There is another important interface called `Eval`, which takes some arguments called targets and generates a minimal graph which treats targets as the end points and creates a new Block. After `Run`, `Eval` will get the latest value and return the targets.
 
 The definition of Eval is as follows:
 
 ```c++
 // clean a block description by targets using the corresponding dependency graph.
 // return a new BlockDesc with minimal number of operators.
-// NOTE not return a Block but the block's description so that this can be distributed
+// NOTE: The return type is not a Block but the block's description so that this can be distributed
 // to a cluster.
 BlockDesc Prune(const BlockDesc& desc, vector<string> targets);
 

From 6604d7cda295cc7978ff227a91a0128a497f14be Mon Sep 17 00:00:00 2001
From: Siddharth Goyal <vi.siddharth78@gmail.com>
Date: Tue, 10 Oct 2017 17:57:02 -0700
Subject: [PATCH 82/82] Add logsigmoid (numerically stable) and softshrink
 (#4663)

* Add numerically-stable logsigmoid activation

* Add softshrink operator

* Adjust relative tolerance for grad-check

* Address review comments
---
 paddle/operators/activation_op.cc             | 35 ++++++++++
 paddle/operators/activation_op.h              | 70 ++++++++++++++++++-
 .../v2/framework/tests/test_activation_op.py  | 35 ++++++++++
 3 files changed, 139 insertions(+), 1 deletion(-)

diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc
index 92db629079..a6bb738af3 100644
--- a/paddle/operators/activation_op.cc
+++ b/paddle/operators/activation_op.cc
@@ -49,6 +49,18 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
   }
 };
 
+class LogSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  LogSigmoidOpMaker(framework::OpProto *proto,
+                    framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput("X", "Input of LogSigmoid operator");
+    AddOutput("Y", "Output of LogSigmoid operator");
+    AddComment(
+        "Logsigmoid activation operator, logsigmoid = log (1 / (1 + exp(-x)))");
+  }
+};
+
 class ExpOpMaker : public framework::OpProtoAndCheckerMaker {
  public:
   ExpOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
@@ -85,6 +97,23 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
   }
 };
 
+template <typename AttrType>
+class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
+ public:
+  SoftShrinkOpMaker(framework::OpProto *proto,
+                    framework::OpAttrChecker *op_checker)
+      : OpProtoAndCheckerMaker(proto, op_checker) {
+    AddInput("X", "Input of Softshrink operator");
+    AddOutput("Y", "Output of Softshrink operator");
+    AddComment(
+        "Softshrink activation operator, "
+        "softshrink = x - lambda, if x > lambda;"
+        " x + lambda, if x < lambda; 0 otherwise");
+    AddAttr<AttrType>("lambda", "non-negative offset")
+        .SetDefault(static_cast<AttrType>(0.5f));
+  }
+};
+
 class TanhOpMaker : public framework::OpProtoAndCheckerMaker {
  public:
   TanhOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
@@ -271,6 +300,9 @@ namespace ops = paddle::operators;
 REGISTER_OP(sigmoid, ops::ActivationOp, ops::SigmoidOpMaker, sigmoid_grad,
             ops::ActivationOpGrad);
 
+REGISTER_OP(logsigmoid, ops::ActivationOp, ops::LogSigmoidOpMaker,
+            logsigmoid_grad, ops::ActivationOpGrad);
+
 REGISTER_OP(exp, ops::ActivationOp, ops::ExpOpMaker, exp_grad,
             ops::ActivationOpGrad);
 
@@ -283,6 +315,9 @@ REGISTER_OP(tanh, ops::ActivationOp, ops::TanhOpMaker, tanh_grad,
 REGISTER_OP(tanh_shrink, ops::ActivationOp, ops::TanhShrinkOpMaker,
             tanh_shrink_grad, ops::ActivationOpGrad);
 
+REGISTER_OP(softshrink, ops::ActivationOp, ops::SoftShrinkOpMaker<float>,
+            softshrink_grad, ops::ActivationOpGrad);
+
 REGISTER_OP(sqrt, ops::ActivationOp, ops::SqrtOpMaker, sqrt_grad,
             ops::ActivationOpGrad);
 
diff --git a/paddle/operators/activation_op.h b/paddle/operators/activation_op.h
index 123f0c4dbc..70d5a62052 100644
--- a/paddle/operators/activation_op.h
+++ b/paddle/operators/activation_op.h
@@ -95,6 +95,41 @@ struct SigmoidGradFunctor : public BaseActivationFunctor<T> {
   }
 };
 
+// Originally: logsigmoid(x) = -log (1 + exp(-x))
+// For numerical stability, we can use the log-sum-exp trick:
+// https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
+// We can rewrite the above equation as:
+// y = -log( exp(0) + exp(-x)) [since exp(0) = 1]
+//   = -log( exp(max(-x, 0) - max(-x, 0)) + exp(-x + max(-x, 0) - max(-x, 0)))
+//   = -log( exp(max(-x, 0)) * exp(-max(-x, 0)) - exp(max(-x, 0)) * exp(-x -
+//           max(-x, 0)))
+//   = -log( exp(max(-x, 0)) * (exp(-max(-x, 0)) + exp(-x - max(-x, 0))))
+//   = -log( exp(max(-x, 0)) - log(exp(-max(-x, 0)) + exp(-x - max(-x, 0)))
+//
+// Hence, logsigmoid(x) = - (max(-x, 0) + log(exp(-max(-x, 0))
+// + exp(-x - max(-x, 0))))
+template <typename T>
+struct LogSigmoidFunctor : public BaseActivationFunctor<T> {
+  template <typename Device, typename X, typename Y>
+  void operator()(Device d, X x, Y y) const {
+    auto temp = (-x).cwiseMax(static_cast<T>(0));  // temp = max(-x, 0)
+    y.device(d) = -temp - (((-temp).exp() + (-x - temp).exp()).log());
+  }
+};
+
+// Originally: f' = exp(-x) / (1 + exp(-x))
+// For numerical stability: f' = exp(-x - max(-x, 0)) / (exp(-max(-x, 0)) +
+// exp(-x - max(-x, 0)))
+template <typename T>
+struct LogSigmoidGradFunctor : public BaseActivationFunctor<T> {
+  template <typename Device, typename X, typename Y, typename dY, typename dX>
+  void operator()(Device d, X x, Y y, dY dy, dX dx) const {
+    auto temp = (-x).cwiseMax(static_cast<T>(0));  // temp = max(-x, 0)
+    dx.device(d) =
+        dy * ((-x - temp).exp() / ((-temp).exp() + (-x - temp).exp()));
+  }
+};
+
 // exp(x) = e^x
 template <typename T>
 struct ExpFunctor : public BaseActivationFunctor<T> {
@@ -164,6 +199,37 @@ struct TanhShrinkGradFunctor : public BaseActivationFunctor<T> {
   }
 };
 
+// softshrink(x) = x - lambda, if x > lambda; x + lambda, if x < lambda; 0
+// otherwise
+template <typename T>
+struct SoftShrinkFunctor : public BaseActivationFunctor<T> {
+  float lambda;
+  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
+    return {{"lambda", &lambda}};
+  }
+
+  template <typename Device, typename X, typename Y>
+  void operator()(Device d, X x, Y y) const {
+    auto temp1 = (x > lambda).template cast<T>().eval();
+    auto temp2 = (x < -lambda).template cast<T>().eval();
+    y.device(d) = temp1 * (x - lambda) + temp2 * (x + lambda);
+  }
+};
+
+template <typename T>
+struct SoftShrinkGradFunctor : public BaseActivationFunctor<T> {
+  float lambda;
+  typename BaseActivationFunctor<T>::AttrPair GetAttrs() {
+    return {{"lambda", &lambda}};
+  }
+  template <typename Device, typename X, typename Y, typename dY, typename dX>
+  void operator()(Device d, X x, Y y, dY dy, dX dx) const {
+    auto temp1 = (x > lambda).template cast<T>().eval();
+    auto temp2 = (x < -lambda).template cast<T>().eval();
+    dx.device(d) = dy * (temp1 + temp2).template cast<T>();
+  }
+};
+
 // sqrt(x) = x^(1/2)
 template <typename T>
 struct SqrtFunctor : public BaseActivationFunctor<T> {
@@ -471,9 +537,11 @@ struct STanhGradFunctor : public BaseActivationFunctor<T> {
 
 #define FOR_EACH_KERNEL_FUNCTOR(__macro)                          \
   __macro(sigmoid, SigmoidFunctor, SigmoidGradFunctor);           \
+  __macro(logsigmoid, LogSigmoidFunctor, LogSigmoidGradFunctor);  \
   __macro(exp, ExpFunctor, ExpGradFunctor);                       \
   __macro(relu, ReluFunctor, ReluGradFunctor);                    \
   __macro(tanh, TanhFunctor, TanhGradFunctor);                    \
+  __macro(softshrink, SoftShrinkFunctor, SoftShrinkGradFunctor);  \
   __macro(sqrt, SqrtFunctor, SqrtGradFunctor);                    \
   __macro(abs, AbsFunctor, AbsGradFunctor);                       \
   __macro(reciprocal, ReciprocalFunctor, ReciprocalGradFunctor);  \
@@ -484,7 +552,7 @@ struct STanhGradFunctor : public BaseActivationFunctor<T> {
   __macro(pow, PowFunctor, PowGradFunctor);                       \
   __macro(stanh, STanhFunctor, STanhGradFunctor);                 \
   __macro(softsign, SoftsignFunctor, SoftsignGradFunctor);        \
-  __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor);    \
   __macro(relu6, Relu6Functor, Relu6GradFunctor);                 \
+  __macro(leaky_relu, LeakyReluFunctor, LeakyReluGradFunctor);    \
   __macro(tanh_shrink, TanhShrinkFunctor, TanhShrinkGradFunctor); \
   __macro(elu, ELUFunctor, ELUGradFunctor)
diff --git a/python/paddle/v2/framework/tests/test_activation_op.py b/python/paddle/v2/framework/tests/test_activation_op.py
index 4528ed555d..9157e00f6e 100644
--- a/python/paddle/v2/framework/tests/test_activation_op.py
+++ b/python/paddle/v2/framework/tests/test_activation_op.py
@@ -33,6 +33,21 @@ class TestSigmoid(OpTest):
         self.check_grad(['X'], 'Y', max_relative_error=0.008)
 
 
+class TestLogSigmoid(OpTest):
+    def setUp(self):
+        self.op_type = "logsigmoid"
+        self.inputs = {
+            'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
+        }
+        self.outputs = {'Y': np.log(1 / (1 + np.exp(-self.inputs['X'])))}
+
+    def test_check_output(self):
+        self.check_output()
+
+    def test_check_grad(self):
+        self.check_grad(['X'], 'Y', max_relative_error=0.008)
+
+
 class TestTanh(OpTest):
     def setUp(self):
         self.op_type = "tanh"
@@ -63,6 +78,26 @@ class TestTanhShrink(OpTest):
         self.check_grad(['X'], 'Y', max_relative_error=0.008)
 
 
+class TestSoftShrink(OpTest):
+    def setUp(self):
+        self.op_type = "softshrink"
+        lambda_val = 0.1
+        self.attrs = {'lambda': lambda_val}
+        self.inputs = {
+            'X': np.random.uniform(0.25, 10, [4, 4]).astype("float32")
+        }
+        y = np.copy(self.inputs['X'])
+        y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * (
+            y - lambda_val)
+        self.outputs = {'Y': y}
+
+    def test_check_output(self):
+        self.check_output()
+
+    def test_check_grad(self):
+        self.check_grad(['X'], 'Y', max_relative_error=0.007)
+
+
 class TestSqrt(OpTest):
     def setUp(self):
         self.op_type = "sqrt"