From b63e1c6d8a3e44b68263399f9720165703deccfd Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 14 Aug 2017 11:49:21 +0800 Subject: [PATCH 001/115] "op name" --- paddle/operators/name_convention.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 paddle/operators/name_convention.md diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md new file mode 100644 index 0000000000..da5bcb7485 --- /dev/null +++ b/paddle/operators/name_convention.md @@ -0,0 +1,11 @@ +## Operator Name Convention + +To make the operator document itself more clear. we recommend operator names observe the listing conventions. + +### Input/Output names + +Variable name is uppercase. e.g. `X`, `Y` + +Tensor name is lowercase. e.g. `tensor` + +if only have one output, use `Out` From e9eee6f78559d6318e554b7b5ab021b271d8ddb6 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 15 Aug 2017 09:57:40 +0800 Subject: [PATCH 002/115] "polish words" --- paddle/operators/name_convention.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index da5bcb7485..2260bf5660 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -4,8 +4,12 @@ To make the operator document itself more clear. we recommend operator names obs ### Input/Output names -Variable name is uppercase. e.g. `X`, `Y` +* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with otherone. -Tensor name is lowercase. e.g. `tensor` +* element wise operator, math operator or similar op, please obey common name convention. if the operator only have one output, use `Out`. -if only have one output, use `Out` +* we prefer more meaningful input/output name. + +### Best Practice +e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out` +e.g. `cosine` , inputs : `X`, `axis`, outputs : `Out` From 26cec83901dc443a60aef911c1ad2baf882eb474 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 30 Aug 2017 19:54:14 +0800 Subject: [PATCH 003/115] Add pad op --- paddle/operators/CMakeLists.txt | 1 + paddle/operators/pad_op.cc | 77 ++++++++++++++++++ paddle/operators/pad_op.cu | 21 +++++ paddle/operators/pad_op.h | 81 +++++++++++++++++++ paddle/pybind/CMakeLists.txt | 3 +- paddle/pybind/pybind.cc | 1 + .../paddle/v2/framework/tests/test_pad_op.py | 32 ++++++++ 7 files changed, 215 insertions(+), 1 deletion(-) create mode 100644 paddle/operators/pad_op.cc create mode 100644 paddle/operators/pad_op.cu create mode 100644 paddle/operators/pad_op.h create mode 100644 python/paddle/v2/framework/tests/test_pad_op.py diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index f466dbc79a..1a759133e1 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -72,3 +72,4 @@ op_library(uniform_random_op SRCS uniform_random_op.cc uniform_random_op.cu) op_library(lookup_table_op SRCS lookup_table_op.cc lookup_table_op.cu) op_library(scale_op SRCS scale_op.cc scale_op.cu DEPS net_op) op_library(minus_op SRCS minus_op.cc minus_op.cu DEPS scale_op) +op_library(pad_op SRCS pad_op.cc pad_op.cu) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc new file mode 100644 index 0000000000..f96d61669b --- /dev/null +++ b/paddle/operators/pad_op.cc @@ -0,0 +1,77 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/pad_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class PadOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto dim0 = ctx.Input("X")->dims(); + auto dim1 = ctx.Output("Out")->dims(); + auto paddings = GetAttr>>("paddings"); + for (int i = 0; i < dim0.size(); ++i) { + dim1[i] = dim0[i] + paddings[i][0] + paddings[i][1]; + } + ctx.Output("Out")->Resize(dim1); + } +}; + +class MulOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input of pad op"); + AddOutput("Out", "The output of pad op"); + AddComment(R"DOC( +Pad Operator. +)DOC"); + AddAttr>>( + "paddings", "The padding rules for each dimension"); + AddAttr("pad_value", "The value to be padded into tensor") + .SetDefault(0.0f); + } +}; + +class PadOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + auto x_dims = ctx.Input("X")->dims(); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + + x_grad->Resize(x_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(pad, ops::PadOp, ops::PadOpMaker, pad_grad, ops::PadOpGrad); +REGISTER_OP_CPU_KERNEL(pad, ops::PadKernel); +REGISTER_OP_CPU_KERNEL(pad_grad, + ops::PadGradKernel); diff --git a/paddle/operators/pad_op.cu b/paddle/operators/pad_op.cu new file mode 100644 index 0000000000..555a7dba23 --- /dev/null +++ b/paddle/operators/pad_op.cu @@ -0,0 +1,21 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/pad_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(pad, ops::PadKernel); +REGISTER_OP_GPU_KERNEL(pad_grad, + ops::PadGradKernel); diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h new file mode 100644 index 0000000000..6a743bd31c --- /dev/null +++ b/paddle/operators/pad_op.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/operators/math/math_function.h" + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +using EigenTensor = framework::EigenTensor; + +template +class PadKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto paddings = + context.op_.GetAttr>>("paddings"); + T pad_value = context.op_.GetAttr("pad_value"); + + auto* X = context.Input("X"); + auto* Out = context.Output("Out"); + Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + + // Eigen::TensorMap> X_tensor = EigenTensor::From(*X); + // Eigen::TensorMap> + // Out_tensor = EigenTensor::From(*Out); + EigenTensor::ConstType X_tensor = + EigenTensor::From(*X); + EigenTensor::Type Out_tensor = + EigenTensor::From(*Out); + Out_tensor = X_tensor.pad(paddings, pad_value); + } +}; + +template +class PadGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + std::vector> paddings = + context.op_.GetAttr>>("paddings"); + for (int i = 0; i < paddings.size(); ++i) { + paddings[0].first = -paddings[0].first; + paddings[1].second = -paddings[1].second; + } + auto* dOut = ctx.Input(framework::GradVarName("Out")); + auto dims = dOut->dims(); + + auto* dX = ctx.Output(framework::GradVarName("X")); + dX->mutable_data(ctx.GetPlace()); + + EigenTensor::Type dX_tensor = + EigenTensor::From(*dX); + EigenTensor::ConstType dOut_tensor = + EigenTensor::From(*dOut); + dX_tensor = dOut_tensor.pad(paddings, 0); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index abb9c248ee..17ef1e8291 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -17,5 +17,6 @@ cc_library(paddle_pybind SHARED fill_zeros_like_op lookup_table_op scale_op - minus_op) + minus_op + pad_op) endif(WITH_PYTHON) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 8fa8be2cef..0176eb7a88 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -47,6 +47,7 @@ USE_OP(scale); USE_OP_ITSELF(identity); USE_OP(minus); USE_CPU_ONLY_OP(gather); +USE_OP(pad); namespace paddle { namespace framework { diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py new file mode 100644 index 0000000000..89ac7e7e1d --- /dev/null +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -0,0 +1,32 @@ +import unittest +import numpy as np +from gradient_checker import GradientChecker, create_op +from op_test_util import OpTestMeta + + +class TestPadOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "pad" + self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.attrs['paddings'] = ((0, 1), (2, 3)) + self.attrs['pad_value'] = 0 + self.outputs = { + 'Out': np.pad(self.inputs['X'], + self.attrs['paddings'], + mode='constant', + constant_value=0) + } + + +class PadGradOpTest(GradientChecker): + def test_pad(self): + op = Operator("pad", paddings=((0, 1), (2, 3)), pad_value=0) + inputs = {'X': np.random.random((16, 16)).astype("float32"), } + + self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) + + +if __name__ == '__main__': + unittest.main() From 7683e35816f448351e4a4037b5b4c6f55e34835d Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Sun, 3 Sep 2017 23:17:43 +0000 Subject: [PATCH 004/115] cond op --- paddle/operators/cond_op.cc | 56 +++++++++++++++ paddle/operators/cond_op.h | 131 ++++++++++++++++++++++++++++++++++++ 2 files changed, 187 insertions(+) create mode 100644 paddle/operators/cond_op.cc create mode 100644 paddle/operators/cond_op.h diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc new file mode 100644 index 0000000000..be5e0e6a5b --- /dev/null +++ b/paddle/operators/cond_op.cc @@ -0,0 +1,56 @@ +#include "paddle/operators/switch_op.h" + +namespace paddle { +namespace operators { + +void CondOp::InferShape(const std::shared_ptr& scope) const { + // Create two Nets + // Create two scopes + for (int i = 0; i < 2; ++i) + sub_scope.push_back(scope.NewScope()); + + for (int i = 0; i < 2; ++i) + sub_net_op_[i].InferShape(sub_scope[i]); + + for (int i = 0; i < 2; ++i) + tensor_index = new Tensor(); + + for (int i = 0; i < 2; ++i) + _index.push_back(vector()); + + for (int i = 0; i < 2; ++i) + { + // for (auto& input : net_op_[i]->Inputs()) { + for (auto& input : GetAttr>("True_inputs")) { + auto var_name = input.second; + // Create a new tensor in sub-scope for input-type tensor + sub_scope[i]->NewVar(var_name)->GetMutable(); + } + } +} + +class CondOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { +public: + CondOpProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Cond", "The condition, which is a bool vector"); + AddInput("Xs", "Inputs of Subnets"); + AddAttr>("sub_inputs", "Inputs of the Whole Op, net op and so forth"); + AddAttr>("sub_outputs", "True Outputs needs merge"); + AddOutput("Outs", "The output of cond op"); + + AddComment(R"DOC( +Sample dependent Cond Operator: +The equation is: Out[i] = subnet_t[i], if Cond[i] == true +Out[i] = subnet_t[i], if Cond[i] == false +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT(cond_op, + paddle::operators::CondOp, + paddle::operators::CondOpProtoAndCheckerMaker); + diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h new file mode 100644 index 0000000000..e9ae41b191 --- /dev/null +++ b/paddle/operators/cond_op.h @@ -0,0 +1,131 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "glog/logging.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/ddim.h" +#include "paddle/operators/gather.h" +#include + +namespace paddle { +namespace operators { + +using namespace paddle::framework; + +template +class CondOp final : public OperatorBase { +public: + /** + * InferShape must be called before Run. + */ + void InferShape(const std::shared_ptr& scope) const override; + + // Set True Block + void set_truenet(std::unique_ptr net) { + sub_net_op_[0] = std::move(net); + } + + // Set False Block + void set_falsenet(std::unique_ptr net) { + sub_net_op_[1] = std::move(net); + } + + virtual void Run(const std::shared_ptr& scope, + const platform::DeviceContext& dev_ctx) const override { + auto* cond = context.Input("Cond"); + // Step 1: get the true/false index at runtime + // _index[0]: vector, contains all index for cond[i] == true + // _index[1]: vector, contains all index for cond[i] == false + for(int i = 0; i < 2; ++i) + _index[i].clear(); + for(int i = 0; i < cond->dims()[0]; ++i) { + if (cond->data()[i]) + _index[0].push_back(i); + else + _index[1].push_back(i); + } + // put _index[0] and _index[1] into two tensors + // tensor_index[0] and tensor_index[1] + framework::DDim dim_ = paddle::framework::make_ddim({0}); + for(int i = 0; i < 2; ++i) { + dim_[0] = _index[i].size(); + int* tmp_ = _index[i]->mutable_data(dim_, CPUPlace()); + tensor_index[i]->Resize(dim_); + memcpy(tmp_, index_[i], dim_[0] * sizeof(int)); + } + + + // Step 2: collect data by calling gather + for (int i = 0; i < 2; ++i) { + // i= 0/i for True and False branches respectively + for (auto& input : GetAttr>("sub_inputs")) { + auto var_name = input.second; + // find Tensor + Tensor* Tensor_parent = scope.FindVar(var_name)->GetMutable(); + Tensor* Tensor_child = sub_scope_[i].FindVar(var_name)->GetMutable(); + Gather(dev_ctx.GetPlace(), tensor_parent, tensor_index[i], tensor_child); + } + } + + // Step 3: run + for (int i = 0; i < 2; ++i) + sub_net_op_[i]->Run(sub_scope_[i], dev_ctx); + + // Step 4: merge output results + for (int i = 0; i < 2; ++i) { + // i= 0/i for True and False branches respectively + for (auto& output : GetAttr>("sub_outputs")) { + auto var_name = output.second; + // find Tensor + Tensor* Tensor_parent = scope.FindVar(var_name)->GetMutable(); + Tensor* Tensor_child = sub_scope_[i].FindVar(var_name)->GetMutable(); + ScatterUpdate(dev_ctx.GetPlace(), tensor_child, tensor_index[i], tensor_parent); + } + } + } + +private: + // sub_scope_[0]: true scope + // sub_scope_[1]: false scope + std::vector sub_scope_; + + // sub_net_op_[0]: subnet_t + // sub_net_op_[1]: subnet_f + std::vector> sub_net_op_; + + // tensor_index[0]: True_index tensor + // tensor_index[1]: False_index; + std::vector tensor_index; + + // _index[0]: True_index; + // _index[1]: False_index; + vector > _index; +}; + +/* +class CondGradientOp final : public OperatorBase { +public: + void Init() override; + + virtual void InferShape(const std::shared_ptr& scope) const override; + + virtual void Run(const std::shared_ptr& scope, + const platform::DeviceContext& dev_ctx) const override; +};*/ + +} // namespace operators +} // namespace paddle + From adfef243d2d83e90fe59488864486f6db9449cc3 Mon Sep 17 00:00:00 2001 From: Zhuoyuan Date: Tue, 5 Sep 2017 23:41:48 +0000 Subject: [PATCH 005/115] tensor element size support --- paddle/framework/tensor.h | 11 ++- paddle/framework/tensor_impl.h | 2 +- paddle/framework/tensor_test.cc | 2 + paddle/operators/cond_op.cc | 56 -------------- paddle/operators/cond_op.h | 131 -------------------------------- 5 files changed, 13 insertions(+), 189 deletions(-) delete mode 100644 paddle/operators/cond_op.cc delete mode 100644 paddle/operators/cond_op.h diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 643f875491..657d3e6628 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -75,6 +75,9 @@ class Tensor { template inline T* mutable_data(DDim dims, platform::Place place); + /*! Size of a single element in data() */ + inline size_t element_size() { return holder_->element_size(); } + /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; @@ -123,6 +126,7 @@ class Tensor { virtual ~Placeholder() {} virtual void* ptr() const = 0; virtual size_t size() const = 0; + virtual size_t element_size() const = 0; virtual std::type_index type() const = 0; virtual platform::Place place() const = 0; }; @@ -133,7 +137,8 @@ class Tensor { : ptr_(static_cast(memory::Alloc(place, size)), memory::PODDeleter(place)), place_(place), - size_(size) { + size_(size), + element_size_(sizeof(T)) { PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", (is_cpu_place(place_) ? "CPU" : "GPU")); } @@ -142,6 +147,7 @@ class Tensor { virtual platform::Place place() const { return place_; } virtual void* ptr() const { return static_cast(ptr_.get()); } virtual std::type_index type() const { return std::type_index(typeid(T)); } + virtual size_t element_size() const { return element_size_; } /*! the pointer of memory block. */ std::unique_ptr> ptr_; @@ -151,6 +157,9 @@ class Tensor { /*! the size of memory block. */ size_t size_; + + /*! the size of a single element */ + size_t element_size_; }; /*! holds the memory block if allocated. */ diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 7893e233b7..6a989a31cc 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -22,7 +22,7 @@ namespace framework { template inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( - holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); + holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( holder_->size(), product(dims_) * sizeof(T) + offset_, "Tensor's dims_ is out of bound. Call Tensor::mutable_data " diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index 7db38d5cae..da0a4d6363 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -59,6 +59,8 @@ TEST(Tensor, MutableData) { // initialization p1 = src_tensor.mutable_data(make_ddim({1, 2, 3}), CPUPlace()); EXPECT_NE(p1, nullptr); + // check tensor type + EXPECT_EQ(src_tensor.element_size(), sizeof(float)); // set src_tensor a new dim with large size // momery is supposed to be re-allocated p2 = src_tensor.mutable_data(make_ddim({3, 4}), CPUPlace()); diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc deleted file mode 100644 index be5e0e6a5b..0000000000 --- a/paddle/operators/cond_op.cc +++ /dev/null @@ -1,56 +0,0 @@ -#include "paddle/operators/switch_op.h" - -namespace paddle { -namespace operators { - -void CondOp::InferShape(const std::shared_ptr& scope) const { - // Create two Nets - // Create two scopes - for (int i = 0; i < 2; ++i) - sub_scope.push_back(scope.NewScope()); - - for (int i = 0; i < 2; ++i) - sub_net_op_[i].InferShape(sub_scope[i]); - - for (int i = 0; i < 2; ++i) - tensor_index = new Tensor(); - - for (int i = 0; i < 2; ++i) - _index.push_back(vector()); - - for (int i = 0; i < 2; ++i) - { - // for (auto& input : net_op_[i]->Inputs()) { - for (auto& input : GetAttr>("True_inputs")) { - auto var_name = input.second; - // Create a new tensor in sub-scope for input-type tensor - sub_scope[i]->NewVar(var_name)->GetMutable(); - } - } -} - -class CondOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { -public: - CondOpProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Cond", "The condition, which is a bool vector"); - AddInput("Xs", "Inputs of Subnets"); - AddAttr>("sub_inputs", "Inputs of the Whole Op, net op and so forth"); - AddAttr>("sub_outputs", "True Outputs needs merge"); - AddOutput("Outs", "The output of cond op"); - - AddComment(R"DOC( -Sample dependent Cond Operator: -The equation is: Out[i] = subnet_t[i], if Cond[i] == true -Out[i] = subnet_t[i], if Cond[i] == false -)DOC"); - } -}; - -} // namespace operators -} // namespace paddle - -REGISTER_OP_WITHOUT_GRADIENT(cond_op, - paddle::operators::CondOp, - paddle::operators::CondOpProtoAndCheckerMaker); - diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h deleted file mode 100644 index e9ae41b191..0000000000 --- a/paddle/operators/cond_op.h +++ /dev/null @@ -1,131 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/ddim.h" -#include "paddle/operators/gather.h" -#include - -namespace paddle { -namespace operators { - -using namespace paddle::framework; - -template -class CondOp final : public OperatorBase { -public: - /** - * InferShape must be called before Run. - */ - void InferShape(const std::shared_ptr& scope) const override; - - // Set True Block - void set_truenet(std::unique_ptr net) { - sub_net_op_[0] = std::move(net); - } - - // Set False Block - void set_falsenet(std::unique_ptr net) { - sub_net_op_[1] = std::move(net); - } - - virtual void Run(const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const override { - auto* cond = context.Input("Cond"); - // Step 1: get the true/false index at runtime - // _index[0]: vector, contains all index for cond[i] == true - // _index[1]: vector, contains all index for cond[i] == false - for(int i = 0; i < 2; ++i) - _index[i].clear(); - for(int i = 0; i < cond->dims()[0]; ++i) { - if (cond->data()[i]) - _index[0].push_back(i); - else - _index[1].push_back(i); - } - // put _index[0] and _index[1] into two tensors - // tensor_index[0] and tensor_index[1] - framework::DDim dim_ = paddle::framework::make_ddim({0}); - for(int i = 0; i < 2; ++i) { - dim_[0] = _index[i].size(); - int* tmp_ = _index[i]->mutable_data(dim_, CPUPlace()); - tensor_index[i]->Resize(dim_); - memcpy(tmp_, index_[i], dim_[0] * sizeof(int)); - } - - - // Step 2: collect data by calling gather - for (int i = 0; i < 2; ++i) { - // i= 0/i for True and False branches respectively - for (auto& input : GetAttr>("sub_inputs")) { - auto var_name = input.second; - // find Tensor - Tensor* Tensor_parent = scope.FindVar(var_name)->GetMutable(); - Tensor* Tensor_child = sub_scope_[i].FindVar(var_name)->GetMutable(); - Gather(dev_ctx.GetPlace(), tensor_parent, tensor_index[i], tensor_child); - } - } - - // Step 3: run - for (int i = 0; i < 2; ++i) - sub_net_op_[i]->Run(sub_scope_[i], dev_ctx); - - // Step 4: merge output results - for (int i = 0; i < 2; ++i) { - // i= 0/i for True and False branches respectively - for (auto& output : GetAttr>("sub_outputs")) { - auto var_name = output.second; - // find Tensor - Tensor* Tensor_parent = scope.FindVar(var_name)->GetMutable(); - Tensor* Tensor_child = sub_scope_[i].FindVar(var_name)->GetMutable(); - ScatterUpdate(dev_ctx.GetPlace(), tensor_child, tensor_index[i], tensor_parent); - } - } - } - -private: - // sub_scope_[0]: true scope - // sub_scope_[1]: false scope - std::vector sub_scope_; - - // sub_net_op_[0]: subnet_t - // sub_net_op_[1]: subnet_f - std::vector> sub_net_op_; - - // tensor_index[0]: True_index tensor - // tensor_index[1]: False_index; - std::vector tensor_index; - - // _index[0]: True_index; - // _index[1]: False_index; - vector > _index; -}; - -/* -class CondGradientOp final : public OperatorBase { -public: - void Init() override; - - virtual void InferShape(const std::shared_ptr& scope) const override; - - virtual void Run(const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const override; -};*/ - -} // namespace operators -} // namespace paddle - From 3eadb42d3d6e5c78b385104b47d5f564b20e3957 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 6 Sep 2017 10:58:23 +0800 Subject: [PATCH 006/115] Fix eigen error. --- paddle/operators/pad_op.cc | 12 +- paddle/operators/pad_op.h | 120 +++++++++++++----- .../paddle/v2/framework/tests/test_pad_op.py | 13 +- 3 files changed, 101 insertions(+), 44 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index f96d61669b..5dee8d0f5e 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -26,18 +26,18 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); - auto dim1 = ctx.Output("Out")->dims(); - auto paddings = GetAttr>>("paddings"); + auto paddings = GetAttr>>("paddings"); + std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { - dim1[i] = dim0[i] + paddings[i][0] + paddings[i][1]; + dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; } - ctx.Output("Out")->Resize(dim1); + ctx.Output("Out")->Resize(paddle::framework::make_ddim(dim1)); } }; -class MulOpMaker : public framework::OpProtoAndCheckerMaker { +class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of pad op"); AddOutput("Out", "The output of pad op"); diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 6a743bd31c..9a0a064d75 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,52 +28,102 @@ template using EigenTensor = framework::EigenTensor; +template +void PadFunction(const framework::ExecutionContext& context) { + auto pads = context.op_.GetAttr>>("paddings"); + Eigen::array, D> paddings; + for (int i = 0; i < pads.size(); ++i) { + paddings[i] = pads[i]; + } + T pad_value = context.op_.GetAttr("pad_value"); + + auto* X = context.Input("X"); + auto* Out = context.Output("Out"); + Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + + auto X_tensor = EigenTensor::From(*X); + auto Out_tensor = EigenTensor::From(*Out); + auto place = context.GetEigenDevice(); + Out_tensor.device(place) = X_tensor.pad(paddings, pad_value); +} + template class PadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto paddings = - context.op_.GetAttr>>("paddings"); - T pad_value = context.op_.GetAttr("pad_value"); - - auto* X = context.Input("X"); - auto* Out = context.Output("Out"); - Out->mutable_data(context.GetPlace()); - auto dims = X->dims(); - - // Eigen::TensorMap> X_tensor = EigenTensor::From(*X); - // Eigen::TensorMap> - // Out_tensor = EigenTensor::From(*Out); - EigenTensor::ConstType X_tensor = - EigenTensor::From(*X); - EigenTensor::Type Out_tensor = - EigenTensor::From(*Out); - Out_tensor = X_tensor.pad(paddings, pad_value); + int dim = context.Input("X")->dims().size(); + switch (dim) { + case 1: + PadFunction(context); + break; + case 2: + PadFunction(context); + break; + case 3: + PadFunction(context); + break; + case 4: + PadFunction(context); + break; + case 5: + PadFunction(context); + break; + case 6: + PadFunction(context); + break; + default: + LOG(ERROR) << "Only ranks up to 6 supported."; + } } }; +template +void PadGradFunction(const framework::ExecutionContext& context) { + auto pads = context.op_.GetAttr>>("paddings"); + Eigen::array, D> paddings; + for (int i = 0; i < pads.size(); ++i) { + paddings[0].first = -paddings[0].first; + paddings[1].second = -paddings[1].second; + } + auto* dOut = context.Input(framework::GradVarName("Out")); + auto* dX = context.Output(framework::GradVarName("X")); + dX->mutable_data(context.GetPlace()); + + auto dX_tensor = EigenTensor::From(*dX); + auto dOut_tensor = EigenTensor::From(*dOut); + auto place = context.GetEigenDevice(); + dX_tensor.device(place) = dOut_tensor.pad(paddings, 0); +} + template class PadGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - std::vector> paddings = - context.op_.GetAttr>>("paddings"); - for (int i = 0; i < paddings.size(); ++i) { - paddings[0].first = -paddings[0].first; - paddings[1].second = -paddings[1].second; + void Compute(const framework::ExecutionContext& context) const override { + size_t dim = + context.Input(framework::GradVarName("Out"))->dims().size(); + switch (dim) { + case 1: + PadGradFunction(context); + break; + case 2: + PadGradFunction(context); + break; + case 3: + PadGradFunction(context); + break; + case 4: + PadGradFunction(context); + break; + case 5: + PadGradFunction(context); + break; + case 6: + PadGradFunction(context); + break; + default: + LOG(ERROR) << "Only ranks up to 6 supported."; } - auto* dOut = ctx.Input(framework::GradVarName("Out")); - auto dims = dOut->dims(); - - auto* dX = ctx.Output(framework::GradVarName("X")); - dX->mutable_data(ctx.GetPlace()); - - EigenTensor::Type dX_tensor = - EigenTensor::From(*dX); - EigenTensor::ConstType dOut_tensor = - EigenTensor::From(*dOut); - dX_tensor = dOut_tensor.pad(paddings, 0); } }; diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 89ac7e7e1d..b862033d8c 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -1,5 +1,6 @@ import unittest import numpy as np +from paddle.v2.framework.op import Operator from gradient_checker import GradientChecker, create_op from op_test_util import OpTestMeta @@ -10,19 +11,25 @@ class TestPadOp(unittest.TestCase): def setUp(self): self.type = "pad" self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } - self.attrs['paddings'] = ((0, 1), (2, 3)) + self.attrs = {} + self.attrs['paddings'] = [(0, 1), (2, 3)] self.attrs['pad_value'] = 0 self.outputs = { 'Out': np.pad(self.inputs['X'], self.attrs['paddings'], mode='constant', - constant_value=0) + constant_values=0) } class PadGradOpTest(GradientChecker): def test_pad(self): - op = Operator("pad", paddings=((0, 1), (2, 3)), pad_value=0) + op = Operator( + type="pad", + X="X", + Out="Out", + paddings=[(0, 1), (2, 3)], + pad_value=0) inputs = {'X': np.random.random((16, 16)).astype("float32"), } self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) From 9f8e4981384d247e461290d7ceb642486663390d Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 6 Sep 2017 11:59:10 +0800 Subject: [PATCH 007/115] Fix some issues. --- paddle/operators/pad_op.cc | 3 +++ paddle/operators/pad_op.h | 10 +++++----- python/paddle/v2/framework/op.py | 2 +- python/paddle/v2/framework/tests/test_pad_op.py | 15 ++++++++++----- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 5dee8d0f5e..dac1c56bdd 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,6 +27,9 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); auto paddings = GetAttr>>("paddings"); + PADDLE_ENFORCE_EQ( + dim0.size(), paddings.size(), + "Paddings size should be equal to dimension size of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 9a0a064d75..234019394c 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -14,8 +14,6 @@ #pragma once -#include "paddle/operators/math/math_function.h" - #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" @@ -30,12 +28,13 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = context.op_.GetAttr>>("paddings"); + auto pads = + context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { paddings[i] = pads[i]; } - T pad_value = context.op_.GetAttr("pad_value"); + T pad_value = context.op().GetAttr("pad_value"); auto* X = context.Input("X"); auto* Out = context.Output("Out"); @@ -80,7 +79,8 @@ class PadKernel : public framework::OpKernel { template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = context.op_.GetAttr>>("paddings"); + auto pads = + context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { paddings[0].first = -paddings[0].first; diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 0349407a85..359ccec814 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -96,7 +96,7 @@ class OpDescCreationMethod(object): new_attr.strings.extend(user_defined_attr) elif attr.type == framework_pb2.INT_PAIRS: for p in user_defined_attr: - pair = new_attr.pairs.add() + pair = new_attr.int_pairs.add() pair.first = p[0] pair.second = p[1] else: diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index b862033d8c..10aeaa752f 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -22,17 +22,22 @@ class TestPadOp(unittest.TestCase): } -class PadGradOpTest(GradientChecker): - def test_pad(self): - op = Operator( +class TestPadGradOp(GradientChecker): + def setUp(self): + self.op = Operator( type="pad", X="X", Out="Out", paddings=[(0, 1), (2, 3)], pad_value=0) - inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + + def test_normal(self): + self.check_grad( + self.op, self.inputs, set(["X"]), "Out", max_relative_error=0.5) - self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) + def test_cpu_gpu_compare(self): + self.compare_grad(self.op, self.inputs) if __name__ == '__main__': From 7c30251d165ee9b3b9fd4fbd2440824ebcfbb5d7 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 6 Sep 2017 13:10:52 +0800 Subject: [PATCH 008/115] Fix padding attribute error. --- paddle/operators/pad_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 234019394c..ed547d0a7f 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -83,8 +83,8 @@ void PadGradFunction(const framework::ExecutionContext& context) { context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { - paddings[0].first = -paddings[0].first; - paddings[1].second = -paddings[1].second; + paddings[i].first = -pads[i].first; + paddings[i].second = -pads[i].second; } auto* dOut = context.Input(framework::GradVarName("Out")); auto* dX = context.Output(framework::GradVarName("X")); From f196ad0210aadb715c12cafea2798ca235d84940 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 5 Sep 2017 11:33:40 +0000 Subject: [PATCH 009/115] Port fully connected operator, the FCOp c++ implementation and python unittest. --- paddle/operators/CMakeLists.txt | 5 +- paddle/operators/fc_op.cc | 107 ++++++++++++++++++ paddle/operators/scale_op.cc | 1 + paddle/pybind/CMakeLists.txt | 2 +- paddle/pybind/pybind.cc | 1 + .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../paddle/v2/framework/tests/test_fc_op.py | 30 +++++ 7 files changed, 145 insertions(+), 2 deletions(-) create mode 100644 paddle/operators/fc_op.cc create mode 100644 python/paddle/v2/framework/tests/test_fc_op.py diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e5efcccb0e..2a8beda2c8 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -47,17 +47,20 @@ endfunction() add_subdirectory(math) list(REMOVE_ITEM GENERAL_OPS + fc_op net_op minus_op mul_op recurrent_op scale_op) +op_library(fc_op SRCS fc_op.cc + DEPS mul_op rowwise_add_op scale_op softmax_op sigmoid_op) op_library(net_op SRCS net_op.cc) op_library(minus_op SRCS minus_op.cc minus_op.cu DEPS scale_op) op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc - DEPS framework_proto tensor operator net_op) + DEPS framework_proto tensor operator net_op) op_library(scale_op SRCS scale_op.cc scale_op.cu DEPS net_op) foreach(src ${GENERAL_OPS}) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc new file mode 100644 index 0000000000..ebf8908db7 --- /dev/null +++ b/paddle/operators/fc_op.cc @@ -0,0 +1,107 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/op_registry.h" +#include "paddle/operators/net_op.h" + +namespace paddle { +namespace operators { + +class FCOp : public NetOp { + public: + FCOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : NetOp(type, inputs, outputs, attrs) { + AppendOp(framework::OpRegistry::CreateOp( + "mul", {{"X", {Input("X")}}, {"Y", {Input("W")}}}, + {{"Out", {Output("mul_out")}}}, {})); + auto b = Input("b"); + if (b != framework::kEmptyVarName) { + AppendOp(framework::OpRegistry::CreateOp( + "rowwise_add", {{"X", {Output("mul_out")}}, {"b", {Input("b")}}}, + {{"Out", {Output("mul_out")}}}, {})); + } + + auto activation = GetAttr("activation"); + AppendOp(framework::OpRegistry::CreateOp( + activation, {{"X", {Output("mul_out")}}}, {{"Y", {Output("Y")}}}, {})); + CompleteAddOp(false); + } +}; + +class FCOpMaker : public framework::OpProtoAndCheckerMaker { + public: + FCOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The 2D input matrix of FC operator."); + AddInput("W", "The 2D weight matrix of FC operator."); + AddInput("b", "The 1D bias vector of FC operator"); + + AddOutput("Y", "The activated output matrix of FC operator"); + AddOutput("mul_out", "The non-actived output of FC operator, X * W + b") + .AsIntermediate(); + AddAttr("activation", "The activation type of FC operator.") + .SetDefault("identity") + .InEnum({"identity", "sigmoid", "softmax"}); + + AddComment(R"DOC( +Fully Connected Operator, known as Fully Connected Layer or Inner Product Layer +in Convolutional Neural Networks. Neurons in a fully connected layer have +full connections to all activations in the previous layer. +It computes an inner product of a set of +learned weights with a matrix multiplication followed by a bias offset +(optionally). + +Equation: + Y = Act(sum_n{X_i * W_i} + b) + +where X_i is a 2D matrix of size (M x K), usually M is the minibatch size and +K is the number of features. W_i is also a 2D matrix of size (K x N), +where N means the number of neurons in the fully connected layer. +b is a 1D vector of size N. Thus, the output Y is a 2D matrix of size (M x N). +Activation type can be set to `identity` (default), `sigmoid` or `softmax`. + + The config api is `paddle.v2.layer.fc`. +)DOC"); + } +}; + +class FCGradOp : public NetOp { + public: + FCGradOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : NetOp(type, inputs, outputs, attrs) { + auto y_grad = Input(framework::GradVarName("Y")); + auto mul_out_grad = Input(framework::GradVarName("mul_out")); + auto x_grad = Output(framework::GradVarName("X")); + auto w_grad = Output(framework::GradVarName("W")); + auto b_grad = Output(framework::GradVarName("b")); + + CompleteAddOp(false); + } +}; + +} // namespace operators +} // namespace paddle + +USE_OP(mul); +USE_OP(rowwise_add); +USE_NO_KERNEL_OP(identity); +USE_OP(sigmoid); +USE_OP(softmax); + +namespace ops = paddle::operators; +REGISTER_OP(fc, ops::FCOp, ops::FCOpMaker, fc_grad, ops::FCGradOp); diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 8e96a74c94..ffc2f02b0b 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -89,6 +89,7 @@ class IdentityOp : public NetOp { AppendOp(framework::OpRegistry::CreateOp( "scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}}, {{"scale", static_cast(1)}})); + CompleteAddOp(false); } }; diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 0003005070..4f05406c7f 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -1,5 +1,5 @@ if(WITH_PYTHON) -cc_library(paddle_pybind SHARED + cc_library(paddle_pybind SHARED SRCS pybind.cc DEPS pybind python backward ${GLOB_OP_LIB}) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 6896422617..ff6bae8f85 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -45,6 +45,7 @@ USE_OP(uniform_random); USE_OP(lookup_table); USE_OP(scale); USE_NO_KERNEL_OP(identity); +USE_NO_KERNEL_OP(fc); USE_OP(minus); USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(scatter); diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 661ebd8964..807ca2961e 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -16,6 +16,7 @@ py_test(test_cross_entropy_op SRCS test_cross_entropy_op.py) py_test(test_gather_op SRCS test_gather_op.py) py_test(test_scatter_op SRCS test_scatter_op.py) py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) +py_test(test_fc_op SRCS test_fc_op.py) py_test(gradient_checker SRCS gradient_checker.py) diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py new file mode 100644 index 0000000000..bc469a5f47 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -0,0 +1,30 @@ +import unittest +import numpy as np +from gradient_checker import GradientChecker, create_op +from op_test_util import OpTestMeta + + +class TestFCOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "fc" + self.inputs = { + "X": np.random.random((32, 784)).astype("float32"), + "W": np.random.random((784, 1000)).astype("float32"), + "b": np.random.random(1000).astype("float32") + } + self.attrs = {"activation": "sigmoid"} + mul_out = np.dot(self.inputs["X"], self.inputs["W"]) + add_out = np.add(mul_out, self.inputs["b"]) + sigmoid_out = 1 / (1 + np.exp(-add_out)) + self.outputs = {"mul_out": add_out, "Y": sigmoid_out} + + +class TestFCGradOp(GradientChecker): + def test_normal(self): + print "nothing" + + +if __name__ == '__main__': + unittest.main() From 16fddf32a54d19913c372f4ab59c98ee507fd6ff Mon Sep 17 00:00:00 2001 From: Xinghai Sun Date: Sun, 3 Sep 2017 17:51:25 +0800 Subject: [PATCH 010/115] Add broadcasting support (e.g. matrix-vector) for cos sim operator. --- paddle/operators/cos_sim_op.cc | 81 +++++++--- paddle/operators/cos_sim_op.h | 142 +++++++++++------- .../v2/framework/tests/test_cos_sim_op.py | 93 +++++++++++- 3 files changed, 238 insertions(+), 78 deletions(-) diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index c033af3b74..428ee7d9d0 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -25,16 +25,29 @@ class CosSimOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + // notnull check PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); - PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), - ctx.Input("Y")->dims(), - "Dimensions of Input(X) and Input(Y) must be the same."); - - auto dims = ctx.Input("X")->dims(); - ctx.Output("Out")->Resize({dims[0], 1}); - ctx.Output("XNorm")->Resize({dims[0], 1}); - ctx.Output("YNorm")->Resize({dims[0], 1}); + + // shape check + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_EQ(framework::arity(x_dims), framework::arity(y_dims), + "Ranks of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE_GE(framework::arity(x_dims), 2, + "Rank of Input(X) must not be less than 2."); + PADDLE_ENFORCE_EQ( + framework::slice_ddim(x_dims, 1, framework::arity(x_dims)), + framework::slice_ddim(y_dims, 1, framework::arity(y_dims)), + "All dimensions except 1st of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, + "1st dimension of Input(Y) must be equal to Input(X) or " + "just 1 (which will be broadcasted to match Input(X))."); + + // resize tensor + ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.Output("XNorm")->Resize({x_dims[0], 1}); + ctx.Output("YNorm")->Resize({y_dims[0], 1}); } }; @@ -42,8 +55,8 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { public: CosSimOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The first input of cos_sim op."); - AddInput("Y", "The second input of cos_sim op."); + AddInput("X", "The 1st input of cos_sim op."); + AddInput("Y", "The 2nd input of cos_sim op."); AddOutput("Out", "The output of cos_sim op."); AddOutput("XNorm", "Row norm of the first input.").AsIntermediate(); AddOutput("YNorm", "Row norm of the second input.").AsIntermediate(); @@ -51,7 +64,12 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Cosine Similarity Operator. -The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)) +The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)). + +Input(X) and Input(Y) must have the same shape, except that the 1st dimension +of Input(Y) could be just 1 (different from Input(X)), which will be +broadcasted to match the shape of Input(X) before computing their cosine +similarity. )DOC"); } }; @@ -62,32 +80,47 @@ class CosSimOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + // notnull check PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("XNorm"), "Input(XNorm) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("YNorm"), "Input(YNorm) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Out"), + "Input(Out) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) must not be null."); + // shape check auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_GE(framework::arity(x_dims), framework::arity(y_dims), + "Ranks of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE_GE(framework::arity(x_dims), 2, + "Rank of Input(X) must not be less than 2."); + PADDLE_ENFORCE_EQ( + framework::slice_ddim(x_dims, 1, framework::arity(x_dims)), + framework::slice_ddim(y_dims, 1, framework::arity(y_dims)), + "All dimensions except 1st of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, + "1st dimension of Input(Y) must be equal to Input(X) or " + "just 1 (which will be broadcasted to match Input(X))."); auto xnorm_dims = ctx.Input("XNorm")->dims(); + PADDLE_ENFORCE_EQ(xnorm_dims, framework::make_ddim({x_dims[0], 1}), + "Shape of Input(XNorm) must be [X.Dim(0), 1]."); auto ynorm_dims = ctx.Input("YNorm")->dims(); - auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - PADDLE_ENFORCE_EQ(x_dims, y_dims, - "Dimensions of Input(X) and Input(Y) must be the same."); - PADDLE_ENFORCE_EQ(xnorm_dims[0], x_dims[0], - "1st dimension of XNorm must equal that of Input(X)."); - PADDLE_ENFORCE_EQ(xnorm_dims[1], 1, "2st dimension of XNorm must be one."); - PADDLE_ENFORCE_EQ(ynorm_dims[0], y_dims[0], - "1st dimension of YNorm must equal that of Input(Y)."); - PADDLE_ENFORCE_EQ(ynorm_dims[1], 1, "2st dimension of YNorm must be one."); - PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0], - "1st dimension of Out@GRAD must equal that of Input(X)"); - PADDLE_ENFORCE_EQ(out_dims[1], 1, "1st dimension of Out@GRAD must be one."); - + PADDLE_ENFORCE_EQ(ynorm_dims, framework::make_ddim({y_dims[0], 1}), + "Shape of Input(YNorm) must be [Y.Dim(0), 1]."); + auto out_dims = ctx.Input("Out")->dims(); + PADDLE_ENFORCE_EQ(out_dims, framework::make_ddim({x_dims[0], 1}), + "Shape of Input(Out) must be [X.Dim(0), 1]."); + auto out_grad_dims = + ctx.Input(framework::GradVarName("Out"))->dims(); + PADDLE_ENFORCE_EQ(out_grad_dims, framework::make_ddim({x_dims[0], 1}), + "Shape of Input(Out@Grad) must be [X.Dim(0), 1]."); + + // resize tensor auto *x_grad = ctx.Output(framework::GradVarName("X")); auto *y_grad = ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 9e3ff26815..62298ccbce 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -28,30 +28,38 @@ template class CosSimKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* input_x = context.Input("X"); - auto* input_y = context.Input("Y"); - auto* output_z = context.Output("Out"); - auto* output_x_norm = context.Output("XNorm"); - auto* output_y_norm = context.Output("YNorm"); + // get Tensor + auto* in_x = context.Input("X"); + auto* in_y = context.Input("Y"); + auto* out_z = context.Output("Out"); + auto* out_x_norm = context.Output("XNorm"); + auto* out_y_norm = context.Output("YNorm"); + out_z->mutable_data(context.GetPlace()); + out_x_norm->mutable_data(context.GetPlace()); + out_y_norm->mutable_data(context.GetPlace()); - output_z->mutable_data(context.GetPlace()); - output_x_norm->mutable_data(context.GetPlace()); - output_y_norm->mutable_data(context.GetPlace()); - - auto dims = input_x->dims(); - int size = static_cast(framework::product(dims)); - auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); - auto x = EigenMatrix::From(*input_x, new_dims); - auto y = EigenMatrix::From(*input_y, new_dims); - auto z = EigenMatrix::From(*output_z); - auto x_norm = EigenMatrix::From(*output_x_norm); - auto y_norm = EigenMatrix::From(*output_y_norm); + // convert Tensor to Eigen Tensor + int rows_x = in_x->dims()[0]; + int rows_y = in_y->dims()[0]; + int cols = framework::product(in_x->dims()) / rows_x; + auto x = EigenMatrix::From(*in_x, framework::make_ddim({rows_x, cols})); + auto y = EigenMatrix::From(*in_y, framework::make_ddim({rows_y, cols})); + auto z = EigenMatrix::From(*out_z); + auto x_norm = EigenMatrix::From(*out_x_norm); + auto y_norm = EigenMatrix::From(*out_y_norm); + // compute auto place = context.GetEigenDevice(); - auto xy = (x * y).sum(Eigen::array({1})); x_norm.device(place) = x.square().sum(Eigen::array({1})).sqrt(); y_norm.device(place) = y.square().sum(Eigen::array({1})).sqrt(); - z.device(place) = xy / x_norm / y_norm; + if (rows_x == rows_y) { + auto xy = (x * y).sum(Eigen::array({1})); + z.device(place) = xy / x_norm / y_norm; + } else { + Eigen::DSizes bcast(rows_x, 1); + auto xy = (x * y.broadcast(bcast)).sum(Eigen::array({1})); + z.device(place) = xy / x_norm / y_norm.broadcast(bcast); + } } }; @@ -59,43 +67,75 @@ template class CosSimGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* input_x = context.Input("X"); - auto* input_y = context.Input("Y"); - auto* input_z = context.Input("Out"); - auto* input_x_norm = context.Input("XNorm"); - auto* input_y_norm = context.Input("YNorm"); - auto* output_grad_x = context.Output(framework::GradVarName("X")); - auto* output_grad_y = context.Output(framework::GradVarName("Y")); - auto* input_grad_z = context.Input(framework::GradVarName("Out")); + // get Tensor + auto* in_x = context.Input("X"); + auto* in_y = context.Input("Y"); + auto* in_z = context.Input("Out"); + auto* in_x_norm = context.Input("XNorm"); + auto* in_y_norm = context.Input("YNorm"); + auto* out_grad_x = context.Output(framework::GradVarName("X")); + auto* out_grad_y = context.Output(framework::GradVarName("Y")); + auto* in_grad_z = context.Input(framework::GradVarName("Out")); - auto dims = input_x->dims(); - int size = static_cast(framework::product(dims)); - auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); - auto x = EigenMatrix::From(*input_x, new_dims); - auto y = EigenMatrix::From(*input_y, new_dims); - auto z = EigenMatrix::From(*input_z); - auto x_norm = EigenMatrix::From(*input_x_norm); - auto y_norm = EigenMatrix::From(*input_y_norm); - auto dz = EigenMatrix::From(*input_grad_z); + // convert Tensor to Eigen Tensor + int rows_x = in_x->dims()[0]; + int rows_y = in_y->dims()[0]; + int cols = framework::product(in_x->dims()) / rows_x; + auto x = EigenMatrix::From(*in_x, framework::make_ddim({rows_x, cols})); + auto y = EigenMatrix::From(*in_y, framework::make_ddim({rows_y, cols})); + auto z = EigenMatrix::From(*in_z); + auto x_norm = EigenMatrix::From(*in_x_norm); + auto y_norm = EigenMatrix::From(*in_y_norm); + auto dz = EigenMatrix::From(*in_grad_z); - Eigen::DSizes bcast(1, new_dims[1]); + // compute gradident + Eigen::DSizes bcast(1, cols); auto z_bcast = z.broadcast(bcast); auto dz_bcast = dz.broadcast(bcast); - auto place = context.GetEigenDevice(); auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast); - auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast); - auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast); - if (output_grad_x) { - output_grad_x->mutable_data(context.GetPlace()); - auto dx = EigenMatrix::From(*output_grad_x, new_dims); - dx.device(place) = - dz_bcast * (y / norm_prod_bcast - z_bcast * x / x_snorm_bcast); - } - if (output_grad_y) { - output_grad_y->mutable_data(context.GetPlace()); - auto dy = EigenMatrix::From(*output_grad_y, new_dims); - dy.device(place) = - dz_bcast * (x / norm_prod_bcast - z_bcast * y / y_snorm_bcast); + auto place = context.GetEigenDevice(); + if (rows_x == rows_y) { + auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast); + auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast); + // compute dx + if (out_grad_x) { + out_grad_x->mutable_data(context.GetPlace()); + auto dx = EigenMatrix::From(*out_grad_x, + framework::make_ddim({rows_x, cols})); + auto grad = y / norm_prod_bcast - z_bcast * x / x_snorm_bcast; + dx.device(place) = dz_bcast * grad; + } + // compute dy + if (out_grad_y) { + out_grad_y->mutable_data(context.GetPlace()); + auto dy = EigenMatrix::From(*out_grad_y, + framework::make_ddim({rows_y, cols})); + auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast; + dy.device(place) = dz_bcast * grad; + } + } else { + Eigen::DSizes bcast_row(rows_x, 1); + auto y_bcast = y.broadcast(bcast_row); + auto y_snorm_bcast = + y_norm.square().eval().broadcast(bcast_row).eval().broadcast(bcast); + auto norm_prod_bcast = + (x_norm * y_norm.broadcast(bcast_row)).eval().broadcast(bcast); + // compute dx + if (out_grad_x) { + out_grad_x->mutable_data(context.GetPlace()); + auto dx = EigenMatrix::From( + *out_grad_x, framework::make_ddim({rows_x, cols})); + auto grad = y_bcast / norm_prod_bcast - z_bcast * x / x_snorm_bcast; + dx.device(place) = dz_bcast * grad; + } + // compute dy + if (out_grad_y) { + out_grad_y->mutable_data(context.GetPlace()); + auto dy = EigenMatrix::From( + *out_grad_y, framework::make_ddim({rows_y, cols})); + auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast; + dy.device(place) = (dz_bcast * grad).sum(Eigen::array({0})); + } } } }; diff --git a/python/paddle/v2/framework/tests/test_cos_sim_op.py b/python/paddle/v2/framework/tests/test_cos_sim_op.py index 32013a7999..3f2feaa933 100644 --- a/python/paddle/v2/framework/tests/test_cos_sim_op.py +++ b/python/paddle/v2/framework/tests/test_cos_sim_op.py @@ -4,7 +4,7 @@ from gradient_checker import GradientChecker, create_op from op_test_util import OpTestMeta -class TestCosSimOp(unittest.TestCase): +class TestCosSimOpWithRank2(unittest.TestCase): __metaclass__ = OpTestMeta def setUp(self): @@ -24,12 +24,72 @@ class TestCosSimOp(unittest.TestCase): } +class TestCosSimOpWithRank2Bcast(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "cos_sim" + self.inputs = { + 'X': np.random.random((32, 64)).astype("float32"), + 'Y': np.random.random((1, 64)).astype("float32") + } + expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) + expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) + expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \ + expect_x_norm / expect_y_norm + self.outputs = { + 'XNorm': np.expand_dims(expect_x_norm, 1), + 'YNorm': np.expand_dims(expect_y_norm, 1), + 'Out': np.expand_dims(expect_out, 1) + } + + +class TestCosSimOpWithRank3(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "cos_sim" + self.inputs = { + 'X': np.random.random((32, 64, 10)).astype("float32"), + 'Y': np.random.random((32, 64, 10)).astype("float32") + } + expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) + expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) + expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \ + expect_x_norm / expect_y_norm + self.outputs = { + 'XNorm': np.expand_dims(expect_x_norm, 1), + 'YNorm': np.expand_dims(expect_y_norm, 1), + 'Out': np.expand_dims(expect_out, 1) + } + + +class TestCosSimOpWithRank3Bcast(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "cos_sim" + self.inputs = { + 'X': np.random.random((32, 64, 10)).astype("float32"), + 'Y': np.random.random((1, 64, 10)).astype("float32") + } + expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) + expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) + expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \ + expect_x_norm / expect_y_norm + self.outputs = { + 'XNorm': np.expand_dims(expect_x_norm, 1), + 'YNorm': np.expand_dims(expect_y_norm, 1), + 'Out': np.expand_dims(expect_out, 1) + } + + class TestCosSimGradOp(GradientChecker): def setUp(self): self.op = create_op("cos_sim") self.inputs = { - 'X': np.random.random((10, 5)).astype("float32"), - 'Y': np.random.random((10, 5)).astype("float32") + 'X': np.random.random((6, 5)).astype("float32"), + 'Y': np.random.random((6, 5)).astype("float32") } def test_cpu_gpu_compare(self): @@ -56,5 +116,32 @@ class TestCosSimGradOp(GradientChecker): no_grad_set={"Y"}) +class TestCosSimGradOpWithRank2Bcast(TestCosSimGradOp): + def setUp(self): + self.op = create_op("cos_sim") + self.inputs = { + 'X': np.random.random((6, 5)).astype("float32"), + 'Y': np.random.random((1, 5)).astype("float32") + } + + +class TestCosSimGradOpWithRank3(TestCosSimGradOp): + def setUp(self): + self.op = create_op("cos_sim") + self.inputs = { + 'X': np.random.random((6, 5, 2)).astype("float32"), + 'Y': np.random.random((6, 5, 2)).astype("float32") + } + + +class TestCosSimGradOpWithRank3Bcast(TestCosSimGradOp): + def setUp(self): + self.op = create_op("cos_sim") + self.inputs = { + 'X': np.random.random((6, 5, 2)).astype("float32"), + 'Y': np.random.random((1, 5, 2)).astype("float32") + } + + if __name__ == '__main__': unittest.main() From 12eaa22ad2d099717e6ddf2da856b67b6d887510 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Wed, 6 Sep 2017 21:25:58 -0700 Subject: [PATCH 011/115] add reshape operator --- paddle/operators/reshape_op.cc | 84 +++++++++++++++++++ paddle/operators/reshape_op.cu | 22 +++++ paddle/operators/reshape_op.h | 60 +++++++++++++ paddle/pybind/pybind.cc | 1 + .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../v2/framework/tests/test_reshape_op.py | 28 +++++++ 6 files changed, 196 insertions(+) create mode 100644 paddle/operators/reshape_op.cc create mode 100644 paddle/operators/reshape_op.cu create mode 100644 paddle/operators/reshape_op.h create mode 100644 python/paddle/v2/framework/tests/test_reshape_op.py diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc new file mode 100644 index 0000000000..1b073a79bc --- /dev/null +++ b/paddle/operators/reshape_op.cc @@ -0,0 +1,84 @@ + +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/reshape_op.h" + +namespace paddle { +namespace operators { + +class ReshapeOp : public framework::OperatorWithKernel { + public: + ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto *in = ctx.Input("X"); + auto shape = ctx.Attr>("shape"); + PADDLE_ENFORCE_EQ((unsigned)shape.size(), in->dims().size(), + "The dimension of Input(X) mismatches with Attr(shape)."); + size_t shape_size = 1; + for (auto dim : shape) { + shape_size *= dim; + } + size_t in_size = framework::product(in->dims()); + PADDLE_ENFORCE_EQ(shape_size, in_size, + "The size of Input(X) mismatches with Attr(shape)."); + } +}; + +class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReshapeOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input tensor of reshape operator."); + AddOutput("Out", "The output tensor of reshape operator."); + AddAttr>("shape", "Target shape of reshape operator."); + AddComment(R"DOC(Reshape operator + +The input tensor will be reshaped with Attr(shape). +)DOC"); + } +}; + +class ReshapeGradOp : public framework::OperatorWithKernel { + public: + ReshapeGradOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto dims = ctx.Input("X")->dims(); + auto *d_in = ctx.Output(framework::GradVarName("X")); + d_in->Resize(dims); + } +}; + +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +REGISTER_OP(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, reshape_grad, + ops::ReshapeGradOp); +REGISTER_OP_CPU_KERNEL(reshape, + ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL( + reshape_grad, ops::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.cu b/paddle/operators/reshape_op.cu new file mode 100644 index 0000000000..23dbe089d3 --- /dev/null +++ b/paddle/operators/reshape_op.cu @@ -0,0 +1,22 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/reshape_op.h" + +REGISTER_OP_GPU_KERNEL( + reshape, + paddle::operators::ReshapeKernel); +REGISTER_OP_GPU_KERNEL( + reshape_grad, + paddle::operators::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h new file mode 100644 index 0000000000..22ede88b12 --- /dev/null +++ b/paddle/operators/reshape_op.h @@ -0,0 +1,60 @@ + +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class ReshapeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); + out->mutable_data(in->place()); + + auto shape = ctx.Attr>("shape"); + std::vector tmp; + for (auto dim : shape) { + tmp.push_back(dim); + } + auto out_dims = framework::make_ddim(tmp); + out->CopyFrom(*in, ctx.GetPlace()); + out->Resize(out_dims); + } +}; + +template +class ReshapeGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* d_x = ctx.Output(framework::GradVarName("X")); + d_x->mutable_data(ctx.GetPlace()); + + auto in_dims = d_x->dims(); + + d_x->CopyFrom(*d_out, ctx.GetPlace()); + d_x->Resize(in_dims); + } +}; +} +} diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index c21ad3470b..bf1a321c3f 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -50,6 +50,7 @@ USE_OP(cos_sim); USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(scatter); USE_OP(squared_l2_distance); +USE_OP(reshape); namespace paddle { namespace framework { diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index a9c33ea163..9d41b84e57 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -34,3 +34,4 @@ py_test(test_lookup_table SRCS test_lookup_table.py) py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py) py_test(mnist SRCS mnist.py) py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py) +py_test(test_reshape_op SRCS test_reshape_op.py) diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py new file mode 100644 index 0000000000..c101b0df9a --- /dev/null +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -0,0 +1,28 @@ +import unittest +import numpy as np +from gradient_checker import GradientChecker, create_op +from op_test_util import OpTestMeta + + +class TestReshapeOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "reshape" + self.inputs = {'X': np.random.random((2, 4)).astype("float32"), } + print self.inputs + self.attrs = {'shape': [4, 2]} + self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + print self.outputs + + +class ReshapeGradOpTest(GradientChecker): + def test_normal(self): + op = create_op("reshape") + inputs = {"X": np.random.random((2, 4)).astype("float32")} + attrs = {'shape': [4, 2]} + self.check_grad(op, inputs, attrs, set("X"), "Out") + + +if __name__ == '__main__': + unittest.main() From 734a9eeaa464510de1374c196e40325efd2f8edb Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 7 Sep 2017 09:29:23 +0000 Subject: [PATCH 012/115] Correct the definition of Operator in TestFCGradOp, and rename the output name of identity to Y. --- paddle/operators/fc_op.cc | 24 +++++++++---------- paddle/operators/identity_op.cc | 4 ++-- paddle/operators/minus_op.cc | 2 +- .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../v2/framework/tests/gradient_checker.py | 4 ---- .../paddle/v2/framework/tests/test_fc_op.py | 17 +++++++++---- .../v2/framework/tests/test_minus_op.py | 4 ++-- .../tests/test_scale_and_identity_op.py | 4 ++-- 8 files changed, 32 insertions(+), 28 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 60bf6e9dae..40b5128bff 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -24,30 +24,30 @@ class FCOp : public NetOp { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : NetOp(type, inputs, outputs, attrs) { + // mul_out = X * W AppendOp(framework::OpRegistry::CreateOp( "mul", {{"X", {Input("X")}}, {"Y", {Input("W")}}}, {{"Out", {Output("mul_out")}}}, {})); + + std::string add_out_name = "mul_out"; auto b = Input("b"); if (b != framework::kEmptyVarName) { + // add_out = mul_out + b AppendOp(framework::OpRegistry::CreateOp( "rowwise_add", {{"X", {Output("mul_out")}}, {"b", {Input("b")}}}, {{"Out", {Output("add_out")}}}, {})); + add_out_name = "add_out"; } else { - AppendOp(framework::OpRegistry::CreateOp( - "identity", {{"X", {Output("mul_out")}}}, - {{"Out", {Output("add_out")}}}, {})); + auto add_out = Output("add_out"); + if (add_out != framework::kEmptyVarName) { + this->Rename(add_out, framework::kEmptyVarName); + } } auto activation = GetAttr("activation"); - if (activation == "identity") { - AppendOp(framework::OpRegistry::CreateOp(activation, - {{"X", {Output("add_out")}}}, - {{"Out", {Output("Out")}}}, {})); - } else { - AppendOp(framework::OpRegistry::CreateOp(activation, - {{"X", {Output("add_out")}}}, - {{"Y", {Output("Out")}}}, {})); - } + AppendOp(framework::OpRegistry::CreateOp(activation, + {{"X", {Output(add_out_name)}}}, + {{"Y", {Output("Out")}}}, {})); CompleteAddOp(false); } }; diff --git a/paddle/operators/identity_op.cc b/paddle/operators/identity_op.cc index b67240fb9f..b9f0a450fd 100644 --- a/paddle/operators/identity_op.cc +++ b/paddle/operators/identity_op.cc @@ -27,7 +27,7 @@ class IdentityOpMaker : public framework::OpProtoAndCheckerMaker { framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "input tensor of identity op"); - AddOutput("Out", "output tensor of identity op"); + AddOutput("Y", "output tensor of identity op"); AddComment("identity operator. Just a alias of scale op which scale = 1.0"); } }; @@ -40,7 +40,7 @@ class IdentityOp : public NetOp { const framework::AttributeMap &attrs) : NetOp(type, inputs, outputs, attrs) { AppendOp(framework::OpRegistry::CreateOp( - "scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}}, + "scale", {{"X", {Input("X")}}}, {{"Out", {Output("Y")}}}, {{"scale", static_cast(1)}})); CompleteAddOp(false); } diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 069fb5e1ab..1d7276a194 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -65,7 +65,7 @@ class MinusGradOp : public NetOp { // x_grad = out_grad AppendOp(framework::OpRegistry::CreateOp("identity", {{"X", {out_grad}}}, - {{"Out", {x_grad}}}, {})); + {{"Y", {x_grad}}}, {})); framework::AttributeMap scale_attr; scale_attr["scale"] = static_cast(-1); diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index f9c787a446..60ee996e4a 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -18,6 +18,7 @@ py_test(test_gather_op SRCS test_gather_op.py) py_test(test_scatter_op SRCS test_scatter_op.py) py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) py_test(test_fc_op SRCS test_fc_op.py) +py_test(test_minus_op SRCS test_minus_op.py) py_test(gradient_checker SRCS gradient_checker.py) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index fdb06b7988..0607275a4e 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -277,10 +277,6 @@ class GradientChecker(unittest.TestCase): if no_grad_set is None: no_grad_set = set() - no_tmp_out = forward_op.no_intermediate_outputs() - if len(no_tmp_out) != 1: - raise ValueError("non temp out_names should be 1") - inputs = forward_op.inputs() in_names = [item for k in inputs for item in inputs[k]] for no_grad in no_grad_set: diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 140442db96..76b68ad614 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -29,13 +29,20 @@ class TestFCOp(unittest.TestCase): class TestFCGradOp(GradientChecker): def test_normal(self): self.inputs = { - "X": np.random.random((4, 4)).astype("float32"), - "W": np.random.random((4, 4)).astype("float32"), - "b": np.random.random(4).astype("float32") + "X": np.random.random((32, 256)).astype("float32"), + "W": np.random.random((256, 100)).astype("float32"), + "b": np.random.random(100).astype("float32") } op = Operator( - "fc", X="X", W="W", b="b", Out="Out", activation="sigmoid") - #self.check_grad(op, self.inputs, ["X", "W", "b"], "Out") + "fc", + X="X", + W="W", + b="b", + Out="Out", + mul_out="mul_out", + add_out="add_out", + activation="sigmoid") + self.check_grad(op, self.inputs, ["X", "W", "b"], "Out") if __name__ == '__main__': diff --git a/python/paddle/v2/framework/tests/test_minus_op.py b/python/paddle/v2/framework/tests/test_minus_op.py index 5abdd4a69b..aa05d87baa 100644 --- a/python/paddle/v2/framework/tests/test_minus_op.py +++ b/python/paddle/v2/framework/tests/test_minus_op.py @@ -4,7 +4,7 @@ from gradient_checker import GradientChecker, create_op from op_test_util import OpTestMeta -class MinusOpTest(unittest.TestCase): +class TestMinusOp(unittest.TestCase): __metaclass__ = OpTestMeta def setUp(self): @@ -16,7 +16,7 @@ class MinusOpTest(unittest.TestCase): self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} -class MinusGradTest(GradientChecker): +class TestMinusGrad(GradientChecker): def test_left(self): op = create_op("minus") inputs = { diff --git a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py index 69b301c376..4c1d484991 100644 --- a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py +++ b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py @@ -11,14 +11,14 @@ class IdentityTest(unittest.TestCase): def setUp(self): self.type = "identity" self.inputs = {'X': np.random.random((32, 784)).astype("float32")} - self.outputs = {'Out': self.inputs['X']} + self.outputs = {'Y': self.inputs['X']} class IdentityGradOpTest(GradientChecker): def test_normal(self): op = create_op("identity") inputs = {"X": np.random.random((10, 10)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") + self.check_grad(op, inputs, set("X"), "Y") class ScaleTest(unittest.TestCase): From 899c7d6b353c04565ebaa46d85de57348631f2e1 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Thu, 7 Sep 2017 04:16:32 -0700 Subject: [PATCH 013/115] pass unit test --- paddle/operators/reshape_op.cc | 3 ++- paddle/operators/reshape_op.h | 7 +++---- .../paddle/v2/framework/tests/test_reshape_op.py | 15 ++++++--------- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 1b073a79bc..d75ec76632 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -38,6 +38,7 @@ class ReshapeOp : public framework::OperatorWithKernel { size_t in_size = framework::product(in->dims()); PADDLE_ENFORCE_EQ(shape_size, in_size, "The size of Input(X) mismatches with Attr(shape)."); + ctx.Output("Out")->Resize(in->dims()); } }; @@ -51,7 +52,7 @@ class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>("shape", "Target shape of reshape operator."); AddComment(R"DOC(Reshape operator -The input tensor will be reshaped with Attr(shape). +Reshape Input(X) into the shape specified by Attr(shape). )DOC"); } }; diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 22ede88b12..61d502c836 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -23,13 +23,13 @@ namespace operators { using Tensor = framework::Tensor; -template +template class ReshapeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* out = ctx.Output("Out"); auto* in = ctx.Input("X"); - out->mutable_data(in->place()); + out->mutable_data(ctx.GetPlace()); auto shape = ctx.Attr>("shape"); std::vector tmp; @@ -42,7 +42,7 @@ class ReshapeKernel : public framework::OpKernel { } }; -template +template class ReshapeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -51,7 +51,6 @@ class ReshapeGradKernel : public framework::OpKernel { d_x->mutable_data(ctx.GetPlace()); auto in_dims = d_x->dims(); - d_x->CopyFrom(*d_out, ctx.GetPlace()); d_x->Resize(in_dims); } diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index c101b0df9a..4797019435 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -1,6 +1,6 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op +from gradient_checker import GradientChecker, Operator from op_test_util import OpTestMeta @@ -9,19 +9,16 @@ class TestReshapeOp(unittest.TestCase): def setUp(self): self.type = "reshape" - self.inputs = {'X': np.random.random((2, 4)).astype("float32"), } - print self.inputs - self.attrs = {'shape': [4, 2]} + self.inputs = {'X': np.random.random((37, 51)).astype("float32"), } + self.attrs = {'shape': [51, 37]} self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} - print self.outputs class ReshapeGradOpTest(GradientChecker): def test_normal(self): - op = create_op("reshape") - inputs = {"X": np.random.random((2, 4)).astype("float32")} - attrs = {'shape': [4, 2]} - self.check_grad(op, inputs, attrs, set("X"), "Out") + op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) + inputs = {"X": np.random.random((10, 20)).astype("float32")} + self.check_grad(op, inputs, set("X"), "Out") if __name__ == '__main__': From a2a69f2a54cd7588ede6846deac758e8e8dc6b6e Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Thu, 7 Sep 2017 20:33:48 +0800 Subject: [PATCH 014/115] Add function to get element count from tensor. --- paddle/framework/tensor.h | 6 ++++++ paddle/framework/tensor_impl.h | 13 ++++++++----- paddle/operators/cos_sim_op.h | 4 ++-- paddle/operators/gaussian_random_op.cc | 2 +- paddle/operators/gaussian_random_op.cu | 4 ++-- paddle/operators/lookup_table_op.cu | 4 ++-- paddle/operators/lookup_table_op.h | 4 ++-- paddle/operators/mean_op.h | 5 ++--- paddle/operators/minus_op.cc | 3 +-- paddle/operators/squared_l2_distance_op.cc | 6 ++---- paddle/operators/squared_l2_distance_op.h | 4 ++-- paddle/operators/uniform_random_op.cc | 2 +- paddle/operators/uniform_random_op.cu | 4 ++-- 13 files changed, 33 insertions(+), 28 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 643f875491..fc54ed697f 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -78,6 +78,9 @@ class Tensor { /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; + /*! Return the numel of the memory block. */ + inline int64_t numel() const; + /*! Resize the dimensions of the memory block. */ inline Tensor& Resize(const DDim& dims); @@ -159,6 +162,9 @@ class Tensor { /*! points to dimensions of memory block. */ DDim dims_; + /*! the element count of tensor. */ + int64_t numel_; + /** * @brief A PlaceHolder may be shared by more than one tensor. * diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 94f436294f..03678784b4 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( - holder_->size(), product(dims_) * sizeof(T) + offset_, + holder_->size(), numel_ * sizeof(T) + offset_, "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" "or maybe the required data-type mismatches the data already stored."); @@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - PADDLE_ENFORCE_GT(product(dims_), 0, + PADDLE_ENFORCE_GT(numel_, 0, "Tensor's numel must be larger than zero to call " "Tensor::mutable_data. Call Tensor::set_dim first."); /* some versions of boost::variant don't have operator!= */ - int64_t size = product(dims_) * sizeof(T); + int64_t size = numel_ * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { @@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src, auto dst_ptr = static_cast(mutable_data(dst_place)); - auto size = product(src.dims_) * sizeof(T); + auto size = src.numel() * sizeof(T); if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { memory::Copy(boost::get(dst_place), dst_ptr, @@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { PADDLE_ENFORCE_LT(begin_idx, end_idx, "Begin index must be less than end index."); PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); - size_t base = product(dims_) / dims_[0]; + size_t base = numel_ / dims_[0]; Tensor dst; dst.holder_ = holder_; DDim dst_dims = dims_; @@ -143,10 +143,13 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { inline Tensor& Tensor::Resize(const DDim& dims) { dims_ = dims; + numel_ = product(dims_); return *this; } inline const DDim& Tensor::dims() const { return dims_; } +inline int64_t Tensor::numel() const { return numel_; } + } // namespace framework } // namespace paddle diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 9e2bcebe3b..0dc5099525 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -42,7 +42,7 @@ class CosSimKernel : public framework::OpKernel { output_y_norm->mutable_data(context.GetPlace()); auto dims = input_x->dims(); - int size = static_cast(framework::product(dims)); + int64_t size = input_x->numel(); auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto x = EigenMatrix::From(*input_x, new_dims); auto y = EigenMatrix::From(*input_y, new_dims); @@ -72,7 +72,7 @@ class CosSimGradKernel : public framework::OpKernel { auto* input_grad_z = context.Input(framework::GradVarName("Out")); auto dims = input_x->dims(); - int size = static_cast(framework::product(dims)); + int64_t size = input_x->numel(); auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto x = EigenMatrix::From(*input_x, new_dims); auto y = EigenMatrix::From(*input_y, new_dims); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 6574880c0e..3d76516405 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -31,7 +31,7 @@ class CPUGaussianRandomKernel : public framework::OpKernel { } engine.seed(seed); std::normal_distribution dist(mean, std); - int64_t size = framework::product(tensor->dims()); + int64_t size = tensor->numel(); for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index d9dbc1dcfe..2d63b30499 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -50,8 +50,8 @@ class GPUGaussianRandomKernel : public framework::OpKernel { T mean = static_cast(context.Attr("mean")); T std = static_cast(context.Attr("std")); thrust::counting_iterator index_sequence_begin(0); - ssize_t N = framework::product(tensor->dims()); - thrust::transform(index_sequence_begin, index_sequence_begin + N, + int64_t size = tensor->numel(); + thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), GaussianGenerator(mean, std, seed)); } diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 27eee3436a..7083440467 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -70,7 +70,7 @@ class LookupTableCUDAKernel : public framework::OpKernel { size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; - size_t K = product(ids_t->dims()); + size_t K = ids_t->numel(); auto ids = ids_t->data(); auto table = table_t->data(); auto output = output_t->mutable_data(context.GetPlace()); @@ -91,7 +91,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; - int K = product(ids_t->dims()); + int K = ids_t->numel(); const int32_t* ids = ids_t->data(); const T* d_output = d_output_t->data(); T* d_table = d_table_t->mutable_data(context.GetPlace()); diff --git a/paddle/operators/lookup_table_op.h b/paddle/operators/lookup_table_op.h index 877b36cef4..a1298906dd 100644 --- a/paddle/operators/lookup_table_op.h +++ b/paddle/operators/lookup_table_op.h @@ -35,7 +35,7 @@ class LookupTableKernel : public framework::OpKernel { auto ids = ids_t->data(); auto table = table_t->data(); auto output = output_t->mutable_data(context.GetPlace()); - for (ssize_t i = 0; i < product(ids_t->dims()); ++i) { + for (int64_t i = 0; i < ids_t->numel(); ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); memcpy(output + i * D, table + ids[i] * D, D * sizeof(T)); @@ -61,7 +61,7 @@ class LookupTableGradKernel : public framework::OpKernel { t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); - for (ssize_t i = 0; i < product(ids_t->dims()); ++i) { + for (int64_t i = 0; i < ids_t->numel(); ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); for (int j = 0; j < D; ++j) { diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index 9848af280b..ce31e178d8 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -49,12 +49,11 @@ class MeanGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto OG = context.Input(framework::GradVarName("Out")); - PADDLE_ENFORCE(framework::product(OG->dims()) == 1, - "Mean Gradient should be scalar"); + PADDLE_ENFORCE(OG->numel() == 1, "Mean Gradient should be scalar"); auto IG = context.Output(framework::GradVarName("X")); IG->mutable_data(context.GetPlace()); - T ig_size = (T)framework::product(IG->dims()); + T ig_size = static_cast(IG->numel()); Eigen::DSizes bcast(ig_size); EigenVector::Flatten(*IG).device(context.GetEigenDevice()) = diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 069fb5e1ab..a4876feb2e 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -31,8 +31,7 @@ class MinusOp : public framework::OperatorWithKernel { auto *right_tensor = ctx.Input("Y"); PADDLE_ENFORCE_EQ( - framework::product(left_tensor->dims()), - framework::product(right_tensor->dims()), + left_tensor->numel(), right_tensor->numel(), "Minus operator must take two tensor with same num of elements"); ctx.Output("Out")->Resize(left_tensor->dims()); } diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index dc30644a5e..9f51d3efa8 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -41,8 +41,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { int rank = framework::arity(x_dims); PADDLE_ENFORCE_GE(rank, 2, "Tensor rank should be at least equal to 2."); - PADDLE_ENFORCE_EQ(framework::product(x_dims) / x_dims[0], - framework::product(y_dims) / y_dims[0], + PADDLE_ENFORCE_EQ(x->numel() / x_dims[0], y->numel() / y_dims[0], "Product of dimensions expcet the first dimension of " "input and target must be equal."); PADDLE_ENFORCE(y_dims[0] == 1 || y_dims[0] == x_dims[0], @@ -50,8 +49,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "or to 1."); ctx.Output("sub_result") - ->Resize({static_cast(x_dims[0]), - static_cast(framework::product(x_dims) / x_dims[0])}); + ->Resize({x_dims[0], x->numel() / x_dims[0]}); ctx.Output("Out")->Resize({x_dims[0], 1}); } }; diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index ad3347a0b3..097ac04fc0 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -39,7 +39,7 @@ class SquaredL2DistanceKernel : public framework::OpKernel { auto in0_dims = in0->dims(); auto in1_dims = in1->dims(); - int cols = framework::product(in0_dims) / in0_dims[0]; + int cols = in0->numel() / in0_dims[0]; // reduce dimensions except the first auto x = EigenMatrix::From(*in0, framework::make_ddim({in0_dims[0], cols})); @@ -82,7 +82,7 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { auto x_dims = x_g->dims(); auto y_dims = y_g->dims(); - int cols = framework::product(x_dims) / x_dims[0]; + int cols = x_g->numel() / x_dims[0]; // calculate gradient auto grad_mat = 2 * (out_grad.broadcast(Eigen::array({{1, cols}}))) * diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index f2aeef6c31..b8fbc9b52a 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -35,7 +35,7 @@ class CPUUniformRandomKernel : public framework::OpKernel { std::uniform_real_distribution dist( static_cast(context.Attr("min")), static_cast(context.Attr("max"))); - int64_t size = framework::product(tensor->dims()); + int64_t size = tensor->numel(); for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index c2c041b144..6614b53b3f 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -53,8 +53,8 @@ class GPUUniformRandomKernel : public framework::OpKernel { T min = static_cast(context.Attr("min")); T max = static_cast(context.Attr("max")); thrust::counting_iterator index_sequence_begin(0); - ssize_t N = framework::product(tensor->dims()); - thrust::transform(index_sequence_begin, index_sequence_begin + N, + int64_t size = tensor->numel(); + thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), UniformGenerator(min, max, seed)); } From d8921e9d4b825650b79ba52a281b01317d0d0952 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 7 Sep 2017 21:32:39 -0700 Subject: [PATCH 015/115] Fix CI test --- paddle/framework/tensor.h | 2 +- paddle/framework/tensor_test.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 657d3e6628..9d05b87408 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -76,7 +76,7 @@ class Tensor { inline T* mutable_data(DDim dims, platform::Place place); /*! Size of a single element in data() */ - inline size_t element_size() { return holder_->element_size(); } + inline size_t element_size() const { return holder_->element_size(); } /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index da0a4d6363..7747cb34fc 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -36,7 +36,7 @@ TEST(Tensor, DataAssert) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "holder_ should not be null\nTenosr holds no memory. Call " + "holder_ should not be null\nTensor holds no memory. Call " "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { @@ -114,7 +114,7 @@ TEST(Tensor, ShareDataWith) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "holder_ should not be null\nTenosr holds no memory. Call " + "holder_ should not be null\nTensor holds no memory. Call " "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { From 0910a9bac4d78347e06ecb90ad4e031bd6c7b0bd Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Fri, 8 Sep 2017 16:39:59 +0800 Subject: [PATCH 016/115] Refine pad op 1. Rename variables by Google style. 2. Add more test cases. 3. Add more detail and meaningful comments. 4. Change type of "padding" to vector --- paddle/operators/pad_op.cc | 44 ++++++++--- paddle/operators/pad_op.h | 49 ++++++------- .../paddle/v2/framework/tests/test_pad_op.py | 73 ++++++++++++++++--- 3 files changed, 122 insertions(+), 44 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index dac1c56bdd..94a6d20583 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -26,13 +26,13 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); - auto paddings = GetAttr>>("paddings"); + auto paddings = GetAttr>("paddings"); PADDLE_ENFORCE_EQ( - dim0.size(), paddings.size(), + dim0.size(), (int)(paddings.size() / 2), "Paddings size should be equal to dimension size of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { - dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; + dim1[i] = dim0[i] + paddings[i * 2] + paddings[i * 2 + 1]; } ctx.Output("Out")->Resize(paddle::framework::make_ddim(dim1)); } @@ -42,14 +42,40 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of pad op"); - AddOutput("Out", "The output of pad op"); + AddInput("X", "The input of pad op."); + AddOutput("Out", "The output of pad op."); AddComment(R"DOC( -Pad Operator. +Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: + +Given: + +X = [[1, 2], + [3, 4]] + +and + +paddings = [(0,1),(1,2)] + +and + +pad_value = 0 + +then we get + +Out = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]] )DOC"); - AddAttr>>( - "paddings", "The padding rules for each dimension"); - AddAttr("pad_value", "The value to be padded into tensor") + AddAttr>( + "paddings", + "A pair list to describes padding rules for each dimension." + " For 2-D image tensor, paddings=[(0, 1), (2, 3)] means" + " padding 0 row to top, 1 row to bottom, 2 columns to left" + " and 3 columns to right.Paddings size should be equal to" + " dimension size of input tensor."); + AddAttr("pad_value", + "(float) default to 0; " + "The value to be padded into tensor. ") .SetDefault(0.0f); } }; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index ed547d0a7f..dcf957b47e 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,23 +28,23 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = - context.op().GetAttr>>("paddings"); + auto pads = context.GetAttr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < pads.size(); ++i) { - paddings[i] = pads[i]; + for (int i = 0; i < paddings.size(); ++i) { + paddings[i].first = pads[i * 2]; + paddings[i].second = pads[i * 2 + 1]; } - T pad_value = context.op().GetAttr("pad_value"); + T pad_value = context.GetAttr("pad_value"); - auto* X = context.Input("X"); - auto* Out = context.Output("Out"); - Out->mutable_data(context.GetPlace()); - auto dims = X->dims(); + auto* x = context.Input("X"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + auto dims = x->dims(); - auto X_tensor = EigenTensor::From(*X); - auto Out_tensor = EigenTensor::From(*Out); + auto x_tensor = EigenTensor::From(*x); + auto out_tensor = EigenTensor::From(*out); auto place = context.GetEigenDevice(); - Out_tensor.device(place) = X_tensor.pad(paddings, pad_value); + out_tensor.device(place) = x_tensor.pad(paddings, pad_value); } template @@ -72,28 +72,27 @@ class PadKernel : public framework::OpKernel { PadFunction(context); break; default: - LOG(ERROR) << "Only ranks up to 6 supported."; + PADDLE_THROW("Only ranks up to 6 supported."); } } }; template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = - context.op().GetAttr>>("paddings"); + auto pads = context.GetAttr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < pads.size(); ++i) { - paddings[i].first = -pads[i].first; - paddings[i].second = -pads[i].second; + for (int i = 0; i < paddings.size(); ++i) { + paddings[i].first = -pads[i * 2]; + paddings[i].second = -pads[i * 2 + 1]; } - auto* dOut = context.Input(framework::GradVarName("Out")); - auto* dX = context.Output(framework::GradVarName("X")); - dX->mutable_data(context.GetPlace()); + auto* d_out = context.Input(framework::GradVarName("Out")); + auto* d_x = context.Output(framework::GradVarName("X")); + d_x->mutable_data(context.GetPlace()); - auto dX_tensor = EigenTensor::From(*dX); - auto dOut_tensor = EigenTensor::From(*dOut); + auto d_x_tensor = EigenTensor::From(*d_x); + auto d_out_tensor = EigenTensor::From(*d_out); auto place = context.GetEigenDevice(); - dX_tensor.device(place) = dOut_tensor.pad(paddings, 0); + d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); } template @@ -122,7 +121,7 @@ class PadGradKernel : public framework::OpKernel { PadGradFunction(context); break; default: - LOG(ERROR) << "Only ranks up to 6 supported."; + PADDLE_THROW("Only ranks up to 6 supported."); } } }; diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 10aeaa752f..56b9c88f7d 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -9,36 +9,89 @@ class TestPadOp(unittest.TestCase): __metaclass__ = OpTestMeta def setUp(self): + self.initTestCase() self.type = "pad" - self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.attrs = {} - self.attrs['paddings'] = [(0, 1), (2, 3)] - self.attrs['pad_value'] = 0 + self.attrs['paddings'] = np.array(self.paddings).flatten() + self.attrs['pad_value'] = self.pad_value self.outputs = { 'Out': np.pad(self.inputs['X'], - self.attrs['paddings'], + self.paddings, mode='constant', - constant_values=0) + constant_values=self.pad_value) } + def initTestCase(self): + self.shape = (16, 16) + self.paddings = [(0, 1), (2, 3)] + self.pad_value = 0 + + +class TestCase1(TestPadOp): + def initTestCase(self): + self.shape = (2, 3, 4, 4) + self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] + self.pad_value = 0.5 + + +class TestCase2(TestPadOp): + def initTestCase(self): + self.shape = (2, 2, 2) + self.paddings = [(0, 0), (0, 0), (1, 2)] + self.pad_value = 1 + + +class TestCase3(TestPadOp): + def initTestCase(self): + self.shape = (8) + self.paddings = [(0, 1)] + self.pad_value = 0.9 + class TestPadGradOp(GradientChecker): def setUp(self): + self.initTestCase() self.op = Operator( type="pad", X="X", Out="Out", - paddings=[(0, 1), (2, 3)], - pad_value=0) - self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + paddings=np.array(self.paddings).flatten(), + pad_value=self.pad_value) + self.inputs = {'X': np.random.random(self.shape).astype("float32"), } + + def initTestCase(self): + self.shape = (16, 16) + self.paddings = [(0, 1), (2, 3)] + self.pad_value = 0 def test_normal(self): - self.check_grad( - self.op, self.inputs, set(["X"]), "Out", max_relative_error=0.5) + self.check_grad(self.op, self.inputs, set(["X"]), "Out") def test_cpu_gpu_compare(self): self.compare_grad(self.op, self.inputs) +class TestiGradCase1(TestPadOp): + def initTestCase(self): + self.shape = (2, 3, 4, 4) + self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] + self.pad_value = 0.5 + + +class TestGradCase2(TestPadOp): + def initTestCase(self): + self.shape = (2, 2, 2) + self.paddings = [(0, 0), (0, 0), (1, 2)] + self.pad_value = 1 + + +class TestGradCase3(TestPadOp): + def initTestCase(self): + self.shape = (8) + self.paddings = [(0, 1)] + self.pad_value = 0.9 + + if __name__ == '__main__': unittest.main() From d960cbdcf3f162c0da17fd04c8bc8eb770c9965b Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Fri, 8 Sep 2017 16:48:39 +0800 Subject: [PATCH 017/115] Fix comment --- paddle/operators/pad_op.cc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 94a6d20583..6ea2a25f0b 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,9 +27,9 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); auto paddings = GetAttr>("paddings"); - PADDLE_ENFORCE_EQ( - dim0.size(), (int)(paddings.size() / 2), - "Paddings size should be equal to dimension size of input tensor."); + PADDLE_ENFORCE_EQ(dim0.size(), (int)(paddings.size() / 2), + "Size of paddings should be equal to 2 * dimension size " + "of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { dim1[i] = dim0[i] + paddings[i * 2] + paddings[i * 2 + 1]; @@ -54,7 +54,7 @@ X = [[1, 2], and -paddings = [(0,1),(1,2)] +paddings = [0, 1, 1, 2] and @@ -68,11 +68,11 @@ Out = [[0, 1, 2, 0, 0] )DOC"); AddAttr>( "paddings", - "A pair list to describes padding rules for each dimension." - " For 2-D image tensor, paddings=[(0, 1), (2, 3)] means" + "A list to describes padding rules for each dimension." + " For 2-D image tensor, paddings=[0, 1, 2, 3] means" " padding 0 row to top, 1 row to bottom, 2 columns to left" - " and 3 columns to right.Paddings size should be equal to" - " dimension size of input tensor."); + " and 3 columns to right.Size of paddings should be equal to" + " 2 * dimension size of input tensor."); AddAttr("pad_value", "(float) default to 0; " "The value to be padded into tensor. ") From c5fa417c62257d14d5fc426d5b8319cb4c747b9a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 8 Sep 2017 11:10:44 -0700 Subject: [PATCH 018/115] Host and device transform API * with unit-tests * Also complete `memcpy` --- paddle/memory/memcpy.cc | 18 +++++++ paddle/platform/CMakeLists.txt | 1 + paddle/platform/transform.h | 61 ++++++++++++++++++++++ paddle/platform/transform_test.cu | 84 +++++++++++++++++++++++++++++++ 4 files changed, 164 insertions(+) create mode 100644 paddle/platform/transform.h create mode 100644 paddle/platform/transform_test.cu diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc index a19a3e3675..19ec9ba9b2 100644 --- a/paddle/memory/memcpy.cc +++ b/paddle/memory/memcpy.cc @@ -62,6 +62,24 @@ void Copy(platform::GPUPlace dst_place, } } +template <> +void Copy(platform::CPUPlace dst_place, + void* dst, + platform::GPUPlace src_place, + const void* src, size_t num) { + platform::SetDeviceId(src_place.device); + platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToHost); +} + +template <> +void Copy(platform::GPUPlace dst_place, + void* dst, + platform::CPUPlace src_place, + const void* src, size_t num) { + platform::SetDeviceId(dst_place.device); + platform::GpuMemcpySync(dst, src, num, cudaMemcpyHostToDevice); +} + #endif // PADDLE_ONLY_CPU } // namespace memory diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index 17bdac8749..8b605e51c3 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -24,3 +24,4 @@ cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) +nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place) diff --git a/paddle/platform/transform.h b/paddle/platform/transform.h new file mode 100644 index 0000000000..fcd300f2d9 --- /dev/null +++ b/paddle/platform/transform.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/platform/enforce.h" +#include "paddle/platform/hostdevice.h" +#include "paddle/platform/place.h" + +#include +#ifdef __NVCC__ +#include +#endif + +namespace paddle { +namespace platform { + +// Transform on host or device. It provides the same API in std library. +template +void Transform(Place place, InputIter first, InputIter last, OutputIter result, + UnaryOperation op) { + if (is_cpu_place(place)) { + std::transform(first, last, result, op); + } else { +#ifdef __NVCC__ + thrust::transform(first, last, result, op); +#else + PADDLE_THROW("Do not invoke `Transform` in .cc file"); +#endif + } +} + +template +void Transform(Place place, InputIter1 first1, InputIter1 last1, + InputIter2 first2, OutputIter result, BinaryOperation op) { + if (is_cpu_place(place)) { + std::transform(first1, last1, first2, result, op); + } else { +#ifdef __NVCC__ + thrust::transform(first1, last1, first2, result, op); +#else + PADDLE_THROW("Do not invoke `Transform` in .cc file"); +#endif + } +}; + +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/transform_test.cu b/paddle/platform/transform_test.cu new file mode 100644 index 0000000000..600fed8f45 --- /dev/null +++ b/paddle/platform/transform_test.cu @@ -0,0 +1,84 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include "paddle/memory/memcpy.h" +#include "paddle/memory/memory.h" +#include "paddle/platform/transform.h" + +template +class Scale { + public: + explicit Scale(const T& scale) : scale_(scale) {} + + HOSTDEVICE T operator()(const T& a) const { return a * scale_; } + + private: + T scale_; +}; + +template +class Multiply { + public: + HOSTDEVICE T operator()(const T& a, const T& b) const { return a * b; } +}; + +TEST(Transform, CPUUnary) { + using namespace paddle::platform; + float buf[4] = {0.1, 0.2, 0.3, 0.4}; + Transform(CPUPlace(), buf, buf + 4, buf, Scale(10)); + for (int i = 0; i < 4; ++i) { + ASSERT_NEAR(buf[i], static_cast(i + 1), 1e-5); + } +} + +TEST(Transform, GPUUnary) { + using namespace paddle::platform; + using namespace paddle::memory; + GPUPlace gpu0(0); + float cpu_buf[4] = {0.1, 0.2, 0.3, 0.4}; + float* gpu_buf = static_cast(Alloc(gpu0, sizeof(float) * 4)); + Copy(gpu0, gpu_buf, CPUPlace(), cpu_buf, sizeof(cpu_buf)); + Transform(gpu0, gpu_buf, gpu_buf + 4, gpu_buf, Scale(10)); + Copy(CPUPlace(), cpu_buf, gpu0, gpu_buf, sizeof(cpu_buf)); + Free(gpu0, gpu_buf); + for (int i = 0; i < 4; ++i) { + ASSERT_NEAR(cpu_buf[i], static_cast(i + 1), 1e-5); + } +} + +TEST(Transform, CPUBinary) { + using namespace paddle::platform; + using namespace paddle::memory; + int buf[4] = {1, 2, 3, 4}; + Transform(CPUPlace(), buf, buf + 4, buf, buf, Multiply()); + for (int i = 0; i < 4; ++i) { + ASSERT_EQ((i + 1) * (i + 1), buf[i]); + } +} + +TEST(Transform, GPUBinary) { + using namespace paddle::platform; + using namespace paddle::memory; + int buf[4] = {1, 2, 3, 4}; + GPUPlace gpu0(0); + int* gpu_buf = static_cast(Alloc(gpu0, sizeof(buf))); + Copy(gpu0, gpu_buf, CPUPlace(), buf, sizeof(buf)); + Transform(gpu0, gpu_buf, gpu_buf + 4, gpu_buf, gpu_buf, Multiply()); + Copy(CPUPlace(), buf, gpu0, gpu_buf, sizeof(buf)); + Free(gpu0, gpu_buf); + for (int i = 0; i < 4; ++i) { + ASSERT_EQ((i + 1) * (i + 1), buf[i]); + } +} \ No newline at end of file From c7b347887dd6285dcb171499c17d705d424924ad Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 11 Sep 2017 11:46:04 +0800 Subject: [PATCH 019/115] Fix variable names and comments --- paddle/operators/pad_op.cc | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 6ea2a25f0b..894fe2cecf 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -25,16 +25,16 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto dim0 = ctx.Input("X")->dims(); - auto paddings = GetAttr>("paddings"); - PADDLE_ENFORCE_EQ(dim0.size(), (int)(paddings.size() / 2), + auto x_dim = ctx.Input("X")->dims(); + auto paddings = Attr>("paddings"); + PADDLE_ENFORCE_EQ(x_dim.size() * 2, int(paddings.size()), "Size of paddings should be equal to 2 * dimension size " "of input tensor."); - std::vector dim1(dim0.size()); - for (int i = 0; i < dim0.size(); ++i) { - dim1[i] = dim0[i] + paddings[i * 2] + paddings[i * 2 + 1]; + std::vector out_dims(x_dim.size()); + for (int i = 0; i < x_dim.size(); ++i) { + out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } - ctx.Output("Out")->Resize(paddle::framework::make_ddim(dim1)); + ctx.Output("Out")->Resize(framework::make_ddim(out_dims)); } }; @@ -42,8 +42,12 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of pad op."); - AddOutput("Out", "The output of pad op."); + AddInput("X", + "The input of pad op. " + "The input should be a k-D tensor(k > 0 and k < 7)"); + AddOutput("Out", + "The output of pad op." + "A tensor with the same shape as X."); AddComment(R"DOC( Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: @@ -75,7 +79,7 @@ Out = [[0, 1, 2, 0, 0] " 2 * dimension size of input tensor."); AddAttr("pad_value", "(float) default to 0; " - "The value to be padded into tensor. ") + "The value to fill padded areas.") .SetDefault(0.0f); } }; From d874fca46bb77be244ed38d3edcc029a446cfafa Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 11 Sep 2017 04:29:38 +0000 Subject: [PATCH 020/115] Support multiple inputs in FCOp. --- paddle/operators/fc_op.cc | 55 +++++++++++------ .../paddle/v2/framework/tests/test_fc_op.py | 61 +++++++++---------- 2 files changed, 67 insertions(+), 49 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 40b5128bff..ec76d6c659 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -24,30 +24,49 @@ class FCOp : public NetOp { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : NetOp(type, inputs, outputs, attrs) { - // mul_out = X * W - AppendOp(framework::OpRegistry::CreateOp( - "mul", {{"X", {Input("X")}}, {"Y", {Input("W")}}}, - {{"Out", {Output("mul_out")}}}, {})); + auto x = Inputs("X"); + auto w = Inputs("W"); + PADDLE_ENFORCE_EQ( + x.size(), w.size(), + "The size of inputs X(%d) should be the same as that of weights W(%d).", + x.size(), w.size()); + + int n = x.size(); + PADDLE_ENFORCE_GE(n, 1, + "The size of inputs X(%d) should be no less than 1.", n); + + // mul_out = X[0] * W[0] + ... + X[n-1] * W[n-1] + AppendOp( + framework::OpRegistry::CreateOp("mul", {{"X", {x[0]}}, {"W", {w[0]}}}, + {{"Out", {Output("mul_out")}}}, {})); + + for (int i = 1; i < n; i++) { + // mul_out = mul_out + X[i] * W[i] + AppendOp( + framework::OpRegistry::CreateOp("mul", {{"X", {x[i]}}, {"Y", {w[i]}}}, + {{"Out", {Output("add_out")}}}, {})); + AppendOp(framework::OpRegistry::CreateOp( + "add", {{"X", {Output("mul_out")}}, {"Y", {Output("add_out")}}}, + {{"Out", {Output("mul_out")}}}, {})); + } - std::string add_out_name = "mul_out"; auto b = Input("b"); + std::string add_out = "mul_out"; if (b != framework::kEmptyVarName) { // add_out = mul_out + b AppendOp(framework::OpRegistry::CreateOp( "rowwise_add", {{"X", {Output("mul_out")}}, {"b", {Input("b")}}}, {{"Out", {Output("add_out")}}}, {})); - add_out_name = "add_out"; + add_out = "add_out"; } else { - auto add_out = Output("add_out"); - if (add_out != framework::kEmptyVarName) { - this->Rename(add_out, framework::kEmptyVarName); + if (Output("add_out") != framework::kEmptyVarName) { + this->Rename(Output("add_out"), framework::kEmptyVarName); } } - auto activation = GetAttr("activation"); - AppendOp(framework::OpRegistry::CreateOp(activation, - {{"X", {Output(add_out_name)}}}, - {{"Y", {Output("Out")}}}, {})); + auto activation = Attr("activation"); + AppendOp(framework::OpRegistry::CreateOp( + activation, {{"X", {Output(add_out)}}}, {{"Y", {Output("Y")}}}, {})); CompleteAddOp(false); } }; @@ -56,11 +75,11 @@ class FCOpMaker : public framework::OpProtoAndCheckerMaker { public: FCOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The 2D input matrix of FC operator."); - AddInput("W", "The 2D weight matrix of FC operator."); - AddInput("b", "The 1D bias vector of FC operator"); + AddInput("X", "The 2-D input matrix of FC operator.").AsDuplicable(); + AddInput("W", "The 2-D weight matrix of FC operator.").AsDuplicable(); + AddInput("b", "The 1-D bias vector of FC operator"); - AddOutput("Out", "The activated output matrix of FC operator"); + AddOutput("Y", "The activated output matrix of FC operator"); AddOutput("mul_out", "The non-actived output of FC operator, X * W") .AsIntermediate(); AddOutput("add_out", "The non-actived output of FC operator, X * W + b") @@ -78,7 +97,7 @@ learned weights with a matrix multiplication followed by a bias offset (optionally). Equation: - Out = Act(sum_n{X_i * W_i} + b) + Y = Act(sum_n{X_i * W_i} + b) where X_i is a 2D matrix of size (M x K), usually M is the minibatch size and K is the number of features. W_i is also a 2D matrix of size (K x N), diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 76b68ad614..72d750111c 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -1,48 +1,47 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest +import paddle.v2.framework.core as core from paddle.v2.framework.op import Operator -class TestFCOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestFCOp(OpTest): def setUp(self): - self.type = "fc" + print "Run" + self.op_type = "fc" + x0 = np.random.random((32, 256)).astype("float32") + x1 = np.random.random((32, 256)).astype("float32") + w0 = np.random.random((256, 100)).astype("float32") + w1 = np.random.random((256, 100)).astype("float32") + b = np.random.random(100).astype("float32") self.inputs = { - "X": np.random.random((32, 784)).astype("float32"), - "W": np.random.random((784, 1000)).astype("float32"), - "b": np.random.random(1000).astype("float32") + "X": { + "X0": x0, + "X1": x1 + }, + "W": { + "W0": w0, + "W1": w1 + }, + "b": b } - self.attrs = {"activation": "sigmoid"} - mul_out = np.dot(self.inputs["X"], self.inputs["W"]) - add_out = np.add(mul_out, self.inputs["b"]) - sigmoid_out = 1 / (1 + np.exp(-add_out)) + #self.attrs = {"activation": "sigmoid"} + mul_out = np.dot(x0, w0) + np.dot(x1, w1) + add_out = np.add(mul_out, b) + #sigmoid_out = 1 / (1 + np.exp(-add_out)) + sigmoid_out = add_out self.outputs = { "mul_out": mul_out, "add_out": add_out, - "Out": sigmoid_out + "Y": sigmoid_out } + def test_check_output(self): + self.check_output(core.CPUPlace()) + self.check_output(core.GPUPlace(0)) -class TestFCGradOp(GradientChecker): - def test_normal(self): - self.inputs = { - "X": np.random.random((32, 256)).astype("float32"), - "W": np.random.random((256, 100)).astype("float32"), - "b": np.random.random(100).astype("float32") - } - op = Operator( - "fc", - X="X", - W="W", - b="b", - Out="Out", - mul_out="mul_out", - add_out="add_out", - activation="sigmoid") - self.check_grad(op, self.inputs, ["X", "W", "b"], "Out") + #def test_check_grad(self): + # self.check_grad(["X0", "X1", "W0", "W1", "b"], "Y") if __name__ == '__main__': From f31217fc2e535d0d1079a02895214c2c2f434809 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 11 Sep 2017 14:50:54 +0800 Subject: [PATCH 021/115] Fix issues --- paddle/operators/pad_op.cc | 5 +++-- paddle/operators/pad_op.h | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 894fe2cecf..ef678cf3d3 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,10 +27,10 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto x_dim = ctx.Input("X")->dims(); auto paddings = Attr>("paddings"); - PADDLE_ENFORCE_EQ(x_dim.size() * 2, int(paddings.size()), + PADDLE_ENFORCE_EQ(x_dim.size() * 2, int64_t(paddings.size()), "Size of paddings should be equal to 2 * dimension size " "of input tensor."); - std::vector out_dims(x_dim.size()); + std::vector out_dims(x_dim.size()); for (int i = 0; i < x_dim.size(); ++i) { out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } @@ -95,6 +95,7 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); + PADDLE_ENFORCE_NOT_NULL(x_grad, "Output(X@GRAD) should not be null"); x_grad->Resize(x_dims); } diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index dcf957b47e..53451f925a 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,18 +28,17 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = context.GetAttr>("paddings"); + auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < paddings.size(); ++i) { paddings[i].first = pads[i * 2]; paddings[i].second = pads[i * 2 + 1]; } - T pad_value = context.GetAttr("pad_value"); + T pad_value = context.Attr("pad_value"); auto* x = context.Input("X"); auto* out = context.Output("Out"); out->mutable_data(context.GetPlace()); - auto dims = x->dims(); auto x_tensor = EigenTensor::From(*x); auto out_tensor = EigenTensor::From(*out); @@ -51,8 +50,8 @@ template class PadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - int dim = context.Input("X")->dims().size(); - switch (dim) { + int rank = context.Input("X")->dims().size(); + switch (rank) { case 1: PadFunction(context); break; @@ -72,14 +71,15 @@ class PadKernel : public framework::OpKernel { PadFunction(context); break; default: - PADDLE_THROW("Only ranks up to 6 supported."); + PADDLE_THROW( + "PadOp only support tensors with no more than 6 dimensions."); } } }; template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = context.GetAttr>("paddings"); + auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < paddings.size(); ++i) { paddings[i].first = -pads[i * 2]; @@ -99,9 +99,9 @@ template class PadGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - size_t dim = + size_t rank = context.Input(framework::GradVarName("Out"))->dims().size(); - switch (dim) { + switch (rank) { case 1: PadGradFunction(context); break; @@ -121,7 +121,8 @@ class PadGradKernel : public framework::OpKernel { PadGradFunction(context); break; default: - PADDLE_THROW("Only ranks up to 6 supported."); + PADDLE_THROW( + "PadOp only support tensors with no more than 6 dimensions."); } } }; From 4223ff8c27a8681096ad73659a6ea03441cf3831 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 11 Sep 2017 07:00:04 +0000 Subject: [PATCH 022/115] Correct the key name of "mul" op in FCOp, and add some annotations for debug. --- paddle/operators/fc_op.cc | 4 +++- python/paddle/v2/framework/tests/op_test.py | 8 ++++---- python/paddle/v2/framework/tests/test_fc_op.py | 5 +---- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index ec76d6c659..6e6a09bc3f 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -37,7 +37,7 @@ class FCOp : public NetOp { // mul_out = X[0] * W[0] + ... + X[n-1] * W[n-1] AppendOp( - framework::OpRegistry::CreateOp("mul", {{"X", {x[0]}}, {"W", {w[0]}}}, + framework::OpRegistry::CreateOp("mul", {{"X", {x[0]}}, {"Y", {w[0]}}}, {{"Out", {Output("mul_out")}}}, {})); for (int i = 1; i < n; i++) { @@ -68,6 +68,8 @@ class FCOp : public NetOp { AppendOp(framework::OpRegistry::CreateOp( activation, {{"X", {Output(add_out)}}}, {{"Y", {Output("Y")}}}, {})); CompleteAddOp(false); + + std::cout << DebugString() << std::endl; } }; diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 3a6a5dca4c..b524f88551 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -36,8 +36,8 @@ def create_op(scope, op_type, inputs, outputs, attrs=None): var = scope.new_var(out_name) kwargs[out_name].append(out_name) - for attr_name in Operator.get_op_attr_names(op_type): - kwargs[attr_name] = attrs[attr_name] + #for attr_name in Operator.get_op_attr_names(op_type): + # kwargs[attr_name] = attrs[attr_name] return Operator(op_type, **kwargs) @@ -186,14 +186,14 @@ class OpTest(unittest.TestCase): self.assertTrue( np.allclose( actual, expect, atol=1e-05), - "output name: " + out_name + "has diff") + "output name: " + out_name + " has diff") else: actual = np.array(self.scope.find_var(out_name).get_tensor()) expect = self.outputs[out_name] self.assertTrue( np.allclose( actual, expect, atol=1e-05), - "output name: " + out_name + "has diff") + "output name: " + out_name + " has diff") def check_output(self): places = [core.CPUPlace()] diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 72d750111c..959bd7e405 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -1,8 +1,6 @@ import unittest import numpy as np from op_test import OpTest -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator class TestFCOp(OpTest): @@ -37,8 +35,7 @@ class TestFCOp(OpTest): } def test_check_output(self): - self.check_output(core.CPUPlace()) - self.check_output(core.GPUPlace(0)) + self.check_output() #def test_check_grad(self): # self.check_grad(["X0", "X1", "W0", "W1", "b"], "Y") From dd64349a9213b419c6a50c81e06e2d6a8fa9ebd5 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 00:06:06 -0700 Subject: [PATCH 023/115] refine reshape operator --- paddle/operators/reshape_op.cc | 15 +++++++++------ paddle/operators/reshape_op.h | 10 ++++------ .../paddle/v2/framework/tests/test_reshape_op.py | 16 ++++++++++++++-- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index d75ec76632..37cbecbf25 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -29,14 +29,17 @@ class ReshapeOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto *in = ctx.Input("X"); auto shape = ctx.Attr>("shape"); - PADDLE_ENFORCE_EQ((unsigned)shape.size(), in->dims().size(), - "The dimension of Input(X) mismatches with Attr(shape)."); - size_t shape_size = 1; + int64_t capacity = -1; for (auto dim : shape) { - shape_size *= dim; + PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive."); + if (capacity < 0) { + capacity = dim; + } else { + capacity *= dim; + } } - size_t in_size = framework::product(in->dims()); - PADDLE_ENFORCE_EQ(shape_size, in_size, + int64_t in_size = framework::product(in->dims()); + PADDLE_ENFORCE_EQ(capacity, in_size, "The size of Input(X) mismatches with Attr(shape)."); ctx.Output("Out")->Resize(in->dims()); } diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 61d502c836..0e920329d9 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -21,14 +21,12 @@ namespace paddle { namespace operators { -using Tensor = framework::Tensor; - template class ReshapeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { - auto* out = ctx.Output("Out"); - auto* in = ctx.Input("X"); + auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); out->mutable_data(ctx.GetPlace()); auto shape = ctx.Attr>("shape"); @@ -46,8 +44,8 @@ template class ReshapeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { - auto* d_out = ctx.Input(framework::GradVarName("Out")); - auto* d_x = ctx.Output(framework::GradVarName("X")); + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* d_x = ctx.Output(framework::GradVarName("X")); d_x->mutable_data(ctx.GetPlace()); auto in_dims = d_x->dims(); diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index 4797019435..df7d913ba4 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -10,15 +10,27 @@ class TestReshapeOp(unittest.TestCase): def setUp(self): self.type = "reshape" self.inputs = {'X': np.random.random((37, 51)).astype("float32"), } - self.attrs = {'shape': [51, 37]} + self.attrs = {'shape': [51 * 37]} self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} -class ReshapeGradOpTest(GradientChecker): +class TestReshapeGradOp(GradientChecker): + """ def test_normal(self): op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) inputs = {"X": np.random.random((10, 20)).astype("float32")} self.check_grad(op, inputs, set("X"), "Out") + """ + + def setUp(self): + self.op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) + self.inputs = {"X": np.random.random((10, 20)).astype("float32")} + + def test_normal(self): + self.check_grad(self.op, self.inputs, ["X"], "Out") + + def test_dev_compare(self): + self.compare_grad(self.op, self.inputs) if __name__ == '__main__': From 9c929a495980643672f66c882e76ca67e761954f Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 11 Sep 2017 15:19:19 +0800 Subject: [PATCH 024/115] Fix warning log --- paddle/operators/pad_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 53451f925a..ca8832f26a 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -30,7 +30,7 @@ template void PadFunction(const framework::ExecutionContext& context) { auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < paddings.size(); ++i) { + for (size_t i = 0; i < paddings.size(); ++i) { paddings[i].first = pads[i * 2]; paddings[i].second = pads[i * 2 + 1]; } @@ -81,7 +81,7 @@ template void PadGradFunction(const framework::ExecutionContext& context) { auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < paddings.size(); ++i) { + for (size_t i = 0; i < paddings.size(); ++i) { paddings[i].first = -pads[i * 2]; paddings[i].second = -pads[i * 2 + 1]; } From 7ae72f752d1dcf3a818b9e9a3bef001fa8344b8e Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 01:09:20 -0700 Subject: [PATCH 025/115] remove unused code in test --- python/paddle/v2/framework/tests/test_reshape_op.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index df7d913ba4..50653f58ee 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -15,13 +15,6 @@ class TestReshapeOp(unittest.TestCase): class TestReshapeGradOp(GradientChecker): - """ - def test_normal(self): - op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) - inputs = {"X": np.random.random((10, 20)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") - """ - def setUp(self): self.op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) self.inputs = {"X": np.random.random((10, 20)).astype("float32")} From 4f2ee63c4466e018b9a1196281c1a059094698f3 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 11 Sep 2017 11:10:25 +0000 Subject: [PATCH 026/115] Get rid of the calling of inplace op in FCOp. --- paddle/operators/fc_op.cc | 57 +++++++++------- .../paddle/v2/framework/tests/test_fc_op.py | 65 ++++++++++++++----- 2 files changed, 84 insertions(+), 38 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 6e6a09bc3f..1c6c045427 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -26,38 +26,43 @@ class FCOp : public NetOp { : NetOp(type, inputs, outputs, attrs) { auto x = Inputs("X"); auto w = Inputs("W"); + auto mul_out = Outputs("mul_out"); PADDLE_ENFORCE_EQ( x.size(), w.size(), "The size of inputs X(%d) should be the same as that of weights W(%d).", x.size(), w.size()); + PADDLE_ENFORCE_EQ(mul_out.size(), x.size(), + "The size of intermediate mul_out(%d) should be the same " + "as that of inputs X(%d).", + mul_out.size(), x.size()); int n = x.size(); PADDLE_ENFORCE_GE(n, 1, "The size of inputs X(%d) should be no less than 1.", n); - // mul_out = X[0] * W[0] + ... + X[n-1] * W[n-1] - AppendOp( - framework::OpRegistry::CreateOp("mul", {{"X", {x[0]}}, {"Y", {w[0]}}}, - {{"Out", {Output("mul_out")}}}, {})); + // mul_out[i] = X[i] * W[i] + for (int i = 0; i < n; i++) { + AppendOp(framework::OpRegistry::CreateOp( + "mul", {{"X", {x[i]}}, {"Y", {w[i]}}}, {{"Out", {mul_out[i]}}}, {})); + } - for (int i = 1; i < n; i++) { - // mul_out = mul_out + X[i] * W[i] - AppendOp( - framework::OpRegistry::CreateOp("mul", {{"X", {x[i]}}, {"Y", {w[i]}}}, - {{"Out", {Output("add_out")}}}, {})); + // sum_out = X[0] * W[0] + ... + X[n-1] * W[n-1] + if (n > 1) { + AppendOp(framework::OpRegistry::CreateOp( + "sum", {{"X", {mul_out}}}, {{"Out", {Output("sum_out")}}}, {})); + } else { AppendOp(framework::OpRegistry::CreateOp( - "add", {{"X", {Output("mul_out")}}, {"Y", {Output("add_out")}}}, - {{"Out", {Output("mul_out")}}}, {})); + "identity", {{"X", {mul_out[0]}}}, {{"Y", {Output("sum_out")}}}, {})); } + // add_out = sum_out + b auto b = Input("b"); - std::string add_out = "mul_out"; + std::string add_out = "sum_out"; if (b != framework::kEmptyVarName) { - // add_out = mul_out + b - AppendOp(framework::OpRegistry::CreateOp( - "rowwise_add", {{"X", {Output("mul_out")}}, {"b", {Input("b")}}}, - {{"Out", {Output("add_out")}}}, {})); add_out = "add_out"; + AppendOp(framework::OpRegistry::CreateOp( + "rowwise_add", {{"X", {Output("sum_out")}}, {"b", {Input("b")}}}, + {{"Out", {Output(add_out)}}}, {})); } else { if (Output("add_out") != framework::kEmptyVarName) { this->Rename(Output("add_out"), framework::kEmptyVarName); @@ -68,8 +73,6 @@ class FCOp : public NetOp { AppendOp(framework::OpRegistry::CreateOp( activation, {{"X", {Output(add_out)}}}, {{"Y", {Output("Y")}}}, {})); CompleteAddOp(false); - - std::cout << DebugString() << std::endl; } }; @@ -77,14 +80,24 @@ class FCOpMaker : public framework::OpProtoAndCheckerMaker { public: FCOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The 2-D input matrix of FC operator.").AsDuplicable(); - AddInput("W", "The 2-D weight matrix of FC operator.").AsDuplicable(); + AddInput("X", "The inputs of FC operator, a ordered vector of 2-D matrix.") + .AsDuplicable(); + AddInput("W", "The weights of FC operator, a ordered vector of 2-D matrix.") + .AsDuplicable(); AddInput("b", "The 1-D bias vector of FC operator"); AddOutput("Y", "The activated output matrix of FC operator"); - AddOutput("mul_out", "The non-actived output of FC operator, X * W") + AddOutput("mul_out", + "The intermediate outputs of FC operator, " + "saving the product of X[i] * W[i]") + .AsIntermediate() + .AsDuplicable(); + AddOutput("sum_out", + "The intermediate output of FC operator, " + "saving the sum of products, sum(X[i] * W[i])") .AsIntermediate(); - AddOutput("add_out", "The non-actived output of FC operator, X * W + b") + AddOutput("add_out", + "The non-actived output of FC operator, saving X * W + b") .AsIntermediate(); AddAttr("activation", "The activation type of FC operator.") .SetDefault("identity") diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 959bd7e405..4355191223 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -3,33 +3,65 @@ import numpy as np from op_test import OpTest -class TestFCOp(OpTest): +class TestFCOp1(OpTest): def setUp(self): - print "Run" self.op_type = "fc" - x0 = np.random.random((32, 256)).astype("float32") - x1 = np.random.random((32, 256)).astype("float32") - w0 = np.random.random((256, 100)).astype("float32") - w1 = np.random.random((256, 100)).astype("float32") - b = np.random.random(100).astype("float32") + x1 = np.random.random((16, 32)).astype("float32") + w1 = np.random.random((32, 10)).astype("float32") + b = np.random.random(10).astype("float32") + self.inputs = {"X": {"X1": x1}, "W": {"W1": w1}, "b": b} + mul_out1 = np.dot(x1, w1) + sum_out = mul_out1 + add_out = sum_out + b + identity_out = add_out + self.outputs = { + "mul_out": { + "mul_out1": mul_out1, + }, + "sum_out": sum_out, + "add_out": add_out, + "Y": identity_out + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X1", "W1", "b"], "Y", max_relative_error=0.05) + + +class TestFCOp2(OpTest): + def setUp(self): + self.op_type = "fc" + x1 = np.random.random((16, 32)).astype("float32") + x2 = np.random.random((16, 32)).astype("float32") + w1 = np.random.random((32, 10)).astype("float32") + w2 = np.random.random((32, 10)).astype("float32") + b = np.random.random(10).astype("float32") self.inputs = { "X": { - "X0": x0, - "X1": x1 + "X1": x1, + "X2": x2 }, "W": { - "W0": w0, - "W1": w1 + "W1": w1, + "W2": w2 }, "b": b } #self.attrs = {"activation": "sigmoid"} - mul_out = np.dot(x0, w0) + np.dot(x1, w1) - add_out = np.add(mul_out, b) + mul_out1 = np.dot(x1, w1) + mul_out2 = np.dot(x2, w2) + sum_out = mul_out1 + mul_out2 + add_out = np.add(sum_out, b) #sigmoid_out = 1 / (1 + np.exp(-add_out)) sigmoid_out = add_out self.outputs = { - "mul_out": mul_out, + "mul_out": { + "mul_out0": mul_out1, + "mul_out1": mul_out2 + }, + "sum_out": sum_out, "add_out": add_out, "Y": sigmoid_out } @@ -37,8 +69,9 @@ class TestFCOp(OpTest): def test_check_output(self): self.check_output() - #def test_check_grad(self): - # self.check_grad(["X0", "X1", "W0", "W1", "b"], "Y") + def test_check_grad(self): + self.check_grad( + ["X1", "X2", "W1", "W2", "b"], "Y", max_relative_error=0.05) if __name__ == '__main__': From 21d49744051a6ba0d2f6901cd8db8a242cfcc05a Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 11 Sep 2017 11:01:29 -0700 Subject: [PATCH 027/115] "fix name" --- paddle/operators/name_convention.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 2260bf5660..280ab8d317 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -4,7 +4,7 @@ To make the operator document itself more clear. we recommend operator names obs ### Input/Output names -* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with otherone. +* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with other one. * element wise operator, math operator or similar op, please obey common name convention. if the operator only have one output, use `Out`. From 355e35fecd2866a1894c304647f6875cf15f7571 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 12 Sep 2017 10:12:33 +0800 Subject: [PATCH 028/115] fix paddle enforce check --- paddle/operators/pad_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index ef678cf3d3..449463c830 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -95,7 +95,6 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); - PADDLE_ENFORCE_NOT_NULL(x_grad, "Output(X@GRAD) should not be null"); x_grad->Resize(x_dims); } From 2b1450f1512753fa53717334a07b024efa8ffefa Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 11 Sep 2017 19:28:39 -0700 Subject: [PATCH 029/115] rewrite the document --- paddle/framework/backward.md | 60 ++++++++++++------ paddle/framework/images/duplicate_op2.graffle | Bin 2434 -> 2611 bytes paddle/framework/images/duplicate_op2.png | Bin 24393 -> 24748 bytes 3 files changed, 39 insertions(+), 21 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index c762811dfc..0859bf1d9b 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -2,9 +2,20 @@ ## Motivation -In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. +In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. -## Backward Operator Registry +## Implementation + +In this design doc, we exported only one API for generating the backward pass. + +```c++ +std::unique_ptr Backward(const OperatorBase& forwardOp, + const std::unordered_set& no_grad_vars); +``` + +The implementation behind it can be divided into two parts. Namely, ** Backward Operator Creating** and **Backward Operator Building**. + +###Backward Operator Registry A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients. @@ -25,7 +36,7 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); `mul_grad` is the type of backward operator, and `MulOpGrad` is its class name. -## Backward Opeartor Creating +###Backward Opeartor Creating Given a certain forward operator, we can get its corresponding backward operator by calling: @@ -43,40 +54,47 @@ The function `BuildGradOp` will sequentially execute following processes: 4. Building backward operator with `inputs`, `outputs` and forward operator's attributes. -## Backward Network Building - -A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and put them together. +###Backward Network Building -In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network. - -given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`, `InputGradients`. +A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially. 1. Op - when the input forward network is an Op, return its gradient Operator Immediately. + when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NoGradient` operator 2. NetOp - when the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp. + In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp. + +3. RnnOp + + RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet. + +4. Shared Variable **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their shared input variable. -

-
+

+
+ +​ pic 1. Shared variable in operators. + +

- 1. Shared variable in operators. +​ Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links. -

+

+
- Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links. +​ pic 2. Replace shared variable's gradient with `Add` operator. -

-
+

- 2. Replace shared variable's gradient with `Add` operator. +​ Because our framework find variable accord to its name, we need rename the output links. We add a suffix of number represent its position in clockwise. -

+5. Part of Gradient is Zero. + In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implement, we insert a special `fillZeroLike` operator. -​ Then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. +Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. diff --git a/paddle/framework/images/duplicate_op2.graffle b/paddle/framework/images/duplicate_op2.graffle index ede3bca30ae17d5af52505fd94dc2f79b23b57e0..5cec3bc64dbd44dc99e348485969f29bd128ceb1 100644 GIT binary patch literal 2611 zcmV-33e5E%iwFP!000030PS3BQ`<@s{e1ZqTE1-6Zjk3CX_QO2*I*LJ65wJ3o0O}z z26=325t1Xx0g~dsZ;ynnHyA?5V@;(Z;W<6i8g-xP?qR(C$JK?WeUf412L7v6tfN&; z`c5!#{n4w{f-O^UqTCMhE;>lL4)9Yx5 zd%NA927RNt)kFI%n4%S&A-lfYc-`3CN^+wx?-A{5)T+49RAIPiqmiiPLT|bs18E&BU*6emB0%N<5h@s1q zwCA~y2t%Q6ep`>?5FU90G}MC&-yMa)bfO=?-J4Jhhn_UoTepxL8k@FGD5y^?(=SMCHfBj=x0B&#b4v2lo5H&BwI?&=Oyjf>x(v>< z)?9Pi-U-EJrdFod{y||O3>YjaG;AhK9s`SpkR8*%?t{^C|mA`xS|e0 z*jhu@8pRqmUNZP+Y5zpPT-F|jNNgdlqgDc+-Ty7>#ifb%CJ~-eeIQ| zE@iRTt|xoflZuD{k0@gkJM09`^g{Y^*;VIrM=FzdYrm0VlN#498f3|ba!zEynv4ON(Aq=SGa)a}Fq)fH-n}ZGpUu3%3%+0H{FR}HuIJ^4&YI_TpIKoUkwVV$&9}-UouT3n$vw?X za`b4)utpLWaJd;P-Y29t7K8jNtUh;{_L_LFrtcD=K26_c11MQ*Nrz%<=0g229^$_6 zBRBcanT#+oNZ$#ZK&U1Iv6V0VAsB3Y=JkiDKlEL=I%5$=GDg~t^ghWL*6MvZmA~Hr z)=~BW;NSb991z3kk3uPZb#vxTC0rBKKl<`&A|Mxg(G#LG=We%fiRXpKzEc%tBZ*fh-m>t_3MVdh^L|O`X3$jLF|sk zvGj++0hUE8E6Kw~1ZjO|h_p{4e5v*SZqw60-yF3&e`2_2Hw_D3I_0N8Emck^zR}zw zxhUrmg7qcs-A#M;lJ=Igx1_zNr#2)CA%ylyqUX&zXceC4`>&s?t zyO)0yqQ+e2Z@Xlb&6Gv%psXD!2F)tu2`$HHR*BITp+OVZ(BNSJALVCm9(Cv3gBX@h z5wQ(0H$pifM6EJRXSR(kLbzd2Vw0QFbY#+|ZWGH!2DeRSTO5Qf4jDJY1dN$t3lpshggK`zc1@OdL=;%uN>&BSBlJ3+hJIk ztNBzwp6JP0bAC=z**!TD&UHa>RzS%x;I8>pvtaWj>zy&WaJ@#AP@N*D?jRl)&}ovM zfA+f;7#Q%O?n-%+yDpjUS(pz!miaK|z;dvVa>kH>O>C#k2N8i`FlG@TB+8q}3uCMU zSy9X_f(_e9n2+fuA*SL!$}JPKMq$&|k%fTw2q%V(=q>XxMr_7xP7T}U_LBLQ%=aTR zpY>qoYfg2sNt6pvmFP0ad71HqiS8qlQTE_u)T&EHm2y>`x7^V2YBJ~V;?>Jc>sc}_ zo@QF@!Qke;{gVfH4uW+HQ>q)vcPs>Mgno`9QLPU#;}Z_~VjxN_ZWt=aX1@kYu^N_Af!;%dC{_*t6YRlo2*z8DNO&Qf zOEaCwjdMG(Ir6gLO(2dSi(!;9jLeGgQ4lAa)urgPes{iVMoU&+vhtFZe+*V$)|48O zRQnM%B}N}zQ!<%ua%v-!a)ZGV2(dY3ROgr)+~C+ks?^<77E%Q$gESY7sLlvgq)rGd zr^tGtnWl~nkRf27hQ*m-E{jIXqR|grG%~DZx%{c9jX#Fk9=%bBIfV08^G+Z$bw)X? zoeX0ONXmfV-^My694?gF*d+G<=VqRX2%))nl`YIxi%5o#kbHhVdq#3v63LSl^6w%N z^o@uFQy>l7LW)T^BobIv5p1Lr&JaX7rft~b%kW4H+-X#`3;y^8=8n=RRpAh~zs2@u{Y&IO{Z{0o_QKFJ zPqX#wOmpc}+?&uH4%vsGIN`~#qi3mkuBc-Ndl;FKY&24{_-|d;{G6>>|GQK*@ zr*elq>5p=e$ZP;I+s3dtB9stT8ZeQ6fP(R5FmV^c7o81MJa6)zsG<>C)>5W3Qj3)5SF z26u)AV_CR!#Y&s-hR&k}u*#8}U1q<#8N_jL@lJ%f5W1fOUwELm>z7QfI*=0#6qnZwwAqe74|d*q?@uw3|K9XI?Cu0@_SfcLyWQ<^ zdwg(=419L_)^kQ%@cZE47=IX_?VNaRXy4j4dOh;?9I)ZRVaZg)Qd=Jl0#x%K95 z_4nj3{+j7gd~#R+hY$;dg*?8d`|8f~ETl0p(GzkoEkrm+>Tl$&(%<9P464lHNUEdy zvc^__hWB!$_SL@Si|GZEZ_#~`Q&LyLdVq; zKJK}3m6y^}^@ZRemADXLoT#DBfu|u?`n3-WL|lomT9k$F$OpqGqA%&}Y3Foc;%X0rN?45<2z4BNLm*#AkVI2T)G>)~z1FwBQ_ zxip7Bnqwq??}w4vVzdaU`BDiLqZdk&Sv6(eel+#6b+Q707j!1>YcAGTvUL#EJUcjD z*Ar(uQh9eNGNH5-$lR`&M{-o)u7`ZDM*d4Dsylmg$cgZ#mG{tC!K`tO`hCOdM|m2m VP)2r5zG-D+<$oBADfa+I007jCD%=17 literal 2434 zcmV-|34Qh-iwFP!000030PS0CQ`234qrC$1GRZk|M9#3Ffj2#sw*g@!Nvb?7E-y&w)e z^|>dv*IsO0vAgY-C;2CKNbgv55c=$PtErwsArB=RuxJ}?HK(`JD$qCgsDviZ)0StI zXcSR(^I|KO5xnv?NZ1h9As<9yJZc=n-J6K^`vJ4IniEJ4jV-r9bP%7|mSvF~WUD!C zIu&6oMa%zl9LrhPG8(hyw0wtpSE|dC7#ObNO(q8>Cc!$*t;+d7zDax;a(KgyQdR93_x2-};;-qf*% z&6fuJ+S)%6{8XeGfeW(J_|lE3A4o!8?)c;0qcoCL;(J+7crlh zW^WKN7OI=eV9el}p#ITgw<8J@lAqzdcmO8Y^RRM21hC$SMt)D;nihh;7?b2;IqE`) zkz5?4F1nsTbT@$*MgjrZQCeGFESx|r8z8D(9)(nnBO3HRdg_7XgP~+$pL*cuXl*Td z*_b$5&+8+t<@@&!5$y7wL+lOBfvM0$Ah=~=AVrht%8#%r($Qb&NL-dIV}f|U*Qr4V zc|)B-Pg$HeTNz#NP3L&Lh>Fcm}D=w~M=^A{i}yrQEO9epo4`hKAk zavF|{UDre=b{$v8ikOTvkg^@qG_i$^UxTHL8kRcb;R2G%x07oK7aK~k$NfGSZ*{@P z$@4{mJP|RMxs|XSdD$=$$U&xK8M z%B=uM?T}uwpdE>PGkqGw$yU;0f>qr073lHgUBHZwDG%m~gz5yj;JzFd(CNNmAsDEe9#&NIO{j)EnQCme zfpwU59m7C2umxVoJT?I|%w-00Ovkkh#Y4I|4~6L`nP4V^e}(RCMy z%f!`Vutnen{ zemsJepx-7 zUg{>9s!r&D#Y-H^Kez_qE?Avp(Gu`~GdAjq;t}~-h?!1wDsd%#@Q}KC@rO1FQsfc6 zuCchJ8x2?CC-CpGo2Z?Ddn{f~jC0Oz9&C1=!GW}5Ea#hCE9BR?WQTbhC#uu~R}=4e z*v_Q_)NHM0*bW=fh!*x&YgTs>e2|E_7Y_29z*AX#AuGa!D#t>7I~=lbkc&j79gykn zrt2W06JnG)OzJ;EKzSoZd?|d<*+9grdEQfXws&+8GWi&M4BpcjGA2ugKl6%r!sq=- ziq`zJ7Izcb8o@7-{?ah&(^}Go*kkfqj)Y{(zszyOSttwYtxt`bqQO{}Zd|d_4vf%M zyaZM`@_d&$>g)(9#q~QH<+7aepJmgg`+=ac=+KjU0~eh)BdfHBeG&1`BBTMx&BKz( zRXg&8ueiLXqn(!g?C-w~-d|wE{<9N&IM^30Yjr*X=JjoGv-@VT_=l`7zeYWZPZqU*0Qk>zIq^K8q!#pG!k;KY(yLfITn07yTy;^nogBjoHCU`$!cu%XBd}LwcY-l1Jf0w z=V-pjsmVYbhfMkOoLJx#KA4Zg;O=2(KNUC2CK3X4sVu^TkGot}c_~eHECdf}jcXdo zL=2Ulmxf$vHtrG;S5jF`$~5%YBkP`p{NtEqQTT$lKa1I3D0#FgDw9}Rh)~hviWebVi@Jay4+_DfzO;YLlGn0RjX-cO|Gws z(F;qG*=owH{B#^-^JE1819V9rDlV?CWb+`Zd3SKSPCzgBney&ZWTNY(KxQ&yp0YuK zyB?G7oX-WW?!wI==QJ2s#$oOXW{qpq@9|Y1SfZtyGA9Pozp_e}Z0H0>8 A_y7O^ diff --git a/paddle/framework/images/duplicate_op2.png b/paddle/framework/images/duplicate_op2.png index 4e872dc2caf3b0cbd0d5176f11a14801b538dc86..21cdd5cabf1b5203e1435a75b57770d2f702fa92 100644 GIT binary patch literal 24748 zcmZ_01ymeimnet_cXw-oySoz_C%6Q6cXx*X!6A5XcMVR^;1GhlySr><{@HnN_MGL= zO?TD(?kCr!h*DORK|v%$gn)oRk^Lm83IPEb0lfO)p@1tfhlzKfgNQm!Q_yE8kD>W@wEd_ag69+qHBU1-sGiFaaM*tcELeP^R_-JS5 zYDDg7XKU}m?rxgSlIaZ_*hukS=iZ`00<@*FMC%bPbPa8 zs{bJQfAL6~xtKUxIl5Xo*pvUmYh>);<|;%<`ER2C`T5T}U9HUj&rJ3%|Gh0>gDn5< zu&^<+viuKjV5s1~tNaoUc8<-?EgLb{~zN2i%ZGb$_&``e~o7Q@6rGFwf}|}Wcj!8|LY+BbISj96*y*L zL_wDSA(=2@?Kb-=1cWGrtfZKlC*-LC@ek$9lCtp1&$9RLuh z9kmX;V<_W6je(EqKiKpG0At`6)PIw=qZ?|drU_{g{hM85%j4e~=E5Kv65CIDmjD2W z2Fz|~UwdZVT_~GWu3j2puW zW$o$dxiDAT;;;^2bsJ5l`#17W#owMFXg^0!SD53)>bxQ`gKc!`@&hSj3f9 zCupi>kX-6qwf?dO7>biNIG7fd0v$F8y+U`m)o4O0@EJTYuw2w66t#m41A|knnnjAg| z9cdE;P8O9q3#MzCWrKG#7-4HImp>X!VPFg3^tn4}cRS`+Ylxnw`*jQW1Y*8!tzyPr1UKSmoRFHxUr=8#flSyiDRO zWg1mFKiyBAQ9=3kYN1E$7875~w@drT@F9=U4cG0D9cRp$uSv7OA3lu3#+ zdcrUK%hk#fIV$$rT>}AJu7;Ac3O&(8?M&%gPB(gqheuJYLvvNSEha;8m*rV}k-V$a z0Kd@~O-mRTNXORFa(mxc;&Ja))O7I!yE*z(po%9uT2gvxOAhv=5}i6-b9U5O@+ZrN&*P{Sz?UZl;bB8lwj~|${|K~m0p|k{n=^*6>pS8kf>mBrFH`;uVWyU7C*Ly zAN+n-qVNfA6I75rhPtXH0u71GiVpg)xq$Hj2Klyio#X0AO@S)V2CGs}f(f)usn2EP@%A$@-RdeylBTZIo2(Qc}{HHHMaFVAf1O)}v%hg7Qoxfre z5B{#)!MAh+rD)5F?8zsFYO39uD0tW-G$i_0*1YAw^MjRDMIv3~i2q49K!h+g-%W!q z-^1wdRaIC=Yz*2+sYLrGzosF@kFP+|PFN_ZTaAE|dzan}G zjQj3!|8Xd+W8)M2M{hhvEyvBiuoQZAXD^}$Nr$vdQ5Qr<13*6Hw2a8e$Xu14ZjbRu zNU|O6W%dH0{GRUyoYs^_5^ig4_5axo2A9!TD?mX4!U2auNL|vp1BWOY%Wb#RTIgP_ zRseqiuUHT+r)8H#gNcnDI#2_s6q>AMNIq`cg+MBocruY3VL$H=kroN<|MXfr7gH<5 zXE{e~2o~P%gmgvmKgxl0Baa(4oziCvR;$4X2i<^jX4Y@txEZCCil+Xi-ZoUCl*_Es zco2=l@SW4@N0wUZ4)Z?-T%U2gtp*hFTBi?#M&&{1{0|#kOw4-A=^x*@Z20r}tHlHU zEiQnY%N}C^ay1oAh?jgN6Gev}tGDmTNB(^7&3&~)C@*ln?Z^}fEp zKVNCPZgbh+k^JdE4)X)u2-El!w-1&8QsUo4C%KRob9e+|p9As9G(qih=lC_9T&;A$4#p=JnwXbsF}cVFXksU`1Q(R|#J*iHWn=Oa|{K4;Si8 zWRqx^zI3!^IlsTXk_fnyuYhRc(T5R8We;dCn{^4ux>R5Q9VTU{7V$0n`0%fo>t5Zw?HeB7>-<iE~^&_Ns0%JHi+kGvsa31v!uuqgbNTvJ_JDZz9 z(trU*Es+Zj5momUk7B4g;dcAzl9_tcn9svJ+j?%fYD~$o3HiOTAtx~q!}Eh?zIfP_ z8zx0Rd9~}|O!zi$w)n`nDiYNETWGnI!Mud@#x(Skfk?S#Ih>rW4%R&32mdklm$4{F+^;o z>~o3y?k=aas28xGEMcxVALzcknUCIvx+MebAkyQxnGBU>D#5!k8pGMf2X0vLBTK6 z_zOC1g+4mJgkmTo1JoGEbB)b=kvd3nP|ze5ldp^vmB%$WJ`gI&8KGL z9?j8Tu)OK-}T; zU;dkipgcVtk?&nm^$$q*`dx(xa-pZK&1mbjnLG1Uy20p_$m_p|9$B;yRzl+!ggiF; zVEM8g@F52P`=m;&I)vm)ES+sLduk)D@I{!@i)oh$U)Sc;yzvAbhaMx-cy%>}LGza) z`qumN<0AnMIF&Jj_>l+$>KB1if#6F5hVp-e!S%fQhi}6wG2i+g_nrnQ6kOU znp!8YcrIH6iUy|3z&UNyVY(GKg&yb3pxfVXPyQ=`Lo1VNGjVUpo5 zvh;4oiJm&tZ4CoA0|*A47+WD`QWxkLNysqqn;6W|1z3@m5US?*PM?-#0=&{ z6!42tr}R`zEm?H9G|O9u{?6Iu1%ro4K(1lFzP^ldgL8^ks8S60rX|S!;u|aYI@W?5_SzJ=QXjFzm0B6pd z?Ec8Y3ZMGQrn(xtJpD~1`42SFdTlXWMpsu?2D@3kG_UjMv1_`1U-%aruoJ0t4;bK! zN|;fpMMt#}vi_oW8=jH`4m(6ftJs;VK_1uSN|8>?UV1>>P#-rpH=Vyi+LUh#eiaP{ zP9FrHaLhjTDiJxz^hN!6b^#U}>yxfn7%L1`0gcfr6A-G*%MJP`2g8E?jsrHt1mDt6 zdtwA$3y6(I*oCWo+FkikJ5tJ7$mF} ze*=*L5-^`O#E@IO##sJ&-31FwXdx!f!KC^Q-HF)eyXeTsZ~NuDUX;fbqW6fiYsUVy zn1$K~sV?kLxPa?a4-pqbTBT3hN+UHSPk~3L@cD02d<~UaFD{nT55l+vtJ&hF0>qoz zUp;FJqC-KQrx4#_(j|daQZrV|tvsNi*eabH$1O0UQ^P=iGGk+J>HHoXzZ2`A!4LDR zX4DZ4aRmu*W^hH&N?4(qDFNy+7{`ruyW#jPhOZ5d5%05rIa(Glw&uaFtnL!>n5NRc zn8XkkW@QhH^`;}i5+As*Q9=T@)A$1uDXG&*D2s zB!Q|OkaUe|{7LEco4NTx*b^P=YQ$b!?;D0Fts0yd4*bcYq~CYhI(wJh676N$kSR!~ zsj&RiPh|MPpw>$!F%s5~ARin@!*iI;Eo1~>oyp{qag%}9G(OztW2U3>-UQnmy5JoP-l^z ziT?{P)>Hy86i1&74C!0vs$ll z#zAx}Rf;2gk)QZhT5v_nK}6JnQ@xA%i)~gPJar^QgK983A7g*e>5UatG&XKu=(U5G z>{}a&hIXZ)SB`F3Tp}&u@)jaqD&;kB{u(QnM7j*?Y)~3NEF(5>j@Lk&-9|ou;v(m`{a0Kx}OSm!6NGEkd zqBCLchcZh)r%)ql1k#01!93i>?c0K%&q7Yrq8hpfSm{Pb%w_F^00$at92wlFmuSR7 zm!^(kg`n|oN%oPg&ZFj3&Doch8F~8D zdY2!vNnt)aP!FtL75TP2NdH0bLRW7V*?oBr@ysPiN5j zlgbqK$YE{ss$2nn<*zn(8MR;RZsGqTCFFK9(5)TwfZP=tI_=2ivb{nx3@R9P>A}37 zzd&-#s0f^b7`1@wu{TCgPy$#p)+k_+WpG(?5!**Vj~?@dNSuX^IT!W_?Vm4c*KRI$;soJ^2oK#dcK6{(sl)sr zb6D{!>g|4WrM0s1w{%D&`u@H?MvJ`?AfuG%eBLwKRu@6D%OvmO&dWb#7hL`u6;ivw z9LMA>DafaG^K1 zw>a^(a>=c~aKcfq&@mh2j1Pi;MeWQ0(T+CVbg}*g-s==}Qd@AII4BGYOzahxd3!W^ zy`s4AU>>UxhKM7Mt-mA5D*w3f#Rk!t)B|8AIw#y*j)d^LWZE)Ig6~a#oJv-g3TffM z#^E!!QqppK4C80n388GIG1vCrg7>^0@>w*$J*-en+lHGUL zDD)Cj6|Woti;1kQX(}+dMN{ML<#AwppOTVtB3Hzr2A{da1(2x|u8ZVp&MUbx$1*Ud zA660Aq=qr<_oe{LB<)Y%oaI`(gI1#@U3o|bF{Sjr>!BKM8dT$Rsy?lNzCpiwlN8&Q$p7vnC)* z452_uhf}JP`=%pB*J(?4{ zD>9J+0u$;1al%=$-*jOd!Td`wuyB%FX~fW=57`gqNy2;VCe3ofZE4UxnGz#*vl%Is ztU?pYL`yMy^55#`?G>?7DcK`&K-t++XS6_mL0>P#qZv-)@5gg7F;SVs&npR8}F5bf5F{E@7z z1N(ozLnH6}-|jPcxnghzVM~F*zDm6*?(fzU3R+p$^ub;kUiuj`Z!LYcySzx!oJhHk z_g9t7;{&9~Q=yF@G$xbSIgjP3ink6ZXCK%7ZTR(nmXV6^fjtICRfdlw ztHA+x`tfI(Je(UQI^9|`g;E|*>+8wXQO7tYDEN?_REB$L2Iu37d44qpNGkSbVC&@= zIy%T-4{ZO!iWSZ12s5>0N8Fxf^_dXV+-Z;Qf#1InfGqF^Ex>kLltehPote|sZmQO4 zkeml;t$YtyK<=3r7vK?SUn;87zY&cV1<7;UL`Ubf1O_42kEuXb85!UP@A$!rw7$dT z=ax^oKChy=C;{(Z-z|V#_CDZgVu#%R#=DV~c=CvV@$KH6d-hX<*k25`F$V5K0;qrB zO_uri@ptHYQ+2&$2tR&bM+K7BjyO>E=L8pmS?zyfiwbM}r(r-<+Afy`slP8~#xeuS zZCaXIt(JVP-k<^qx};w}*eWER?873NP*rq#-||b1s`Q7SueDwxl5WB&IdNRxON7Hd z=3qE7L7k0*Cv#QOmRwD1OhX(7RsKR&H$|j0p}jkuCwCuHT$fIN@XysE;!&^I>2Q6# z?2lkELs<6EyZ`jv^!e`Cc{JdC)$esr|C8fcqm|G#96THS>x|FtXok-EM7}ig+f|Y1 z__|*=FdYw{%lp|O7h?R*udtYjz?#V%zhPu%ldVP~9`hm#rP9w#{QjbSPtOQ*hb|?X zIm6NRtKQt^C*9|3QtRzFZ_5ww^X#9BkdJS_Blz@_&Dhc5e8+6mboHK;w(GX>P3lrIj?b|Q_;%(LFab2bWABKy3r<9fA@f0H!zO?IxUCOCDW^Vxjy zisM3#;rA~Gho+4-&)WoczrjmmvCkd8(k0hKF+o~d=~KOnwVTdW{796PYd0rN6b}9r zlD&V|Z6m&YKr{X+Qs0Qj@`c5~iL~94iX6SVwWQ?FF_qiAYnH_>P09O%`-76(|} zjiu&S@BCo@Jo+2v0qhr8jXxC#lII%#*^>PTScz1n)d@LMG8Jb2*Mo6Nx}jFj1pAOTrxF+bbUI zlE#<;(qBpza7Q%Yw(VbQId^xw)oYzhpt@aKCCO7VG<@>&;V%U5j$XJ)gr7atE~;9B z3|8A~q$q{MBV_JQTSi3oCBL>g?>?IRQku~cNI;I8K8s1KQOas%nU55+|K!O0lJuNX zil-D30$aHf-D`%`dN=qwzd;&-knDPQs7YW-Syj^c5X(_XyZx(-nBhe(!bM0XsVa* zpvT0l6JK^&rx3qCJF&Dw7}D0xSiFwXD10x}X(E1@_giT`$lySmu0yP5et9cVWSUlf zx~Y!8AW{?LfYZif4uyq#5^MRWL}@<#e4|v5vN;UT&J| zeS0;E+~xI~tC5h8iKDgT3iV%S+ifF*e5C?WC=q zAOVMUDI#{yrA9g+mPCZRzg0JB6loM!O3S=WAr&f&+#KYhy^7f zo6y4cORhNE#ZR}_TmENRgYP6KUo%UTsG4ot+RcueN@>e@LM3`X1Q=)+Qkz~}UrU$3 zVTbtGFv~MV=?6aF8)ON;)=Gb}Pew$TU~-3KrM|qC{nBj9Z6nFYfo9md;4&~8V_bAV zA1q18IgF&`D3TZONY+)eQmGvz^6}4j^7QlYw2v`&Mz6)VZ74SDLu^IoIGbmi^CCJs zJ~HzR`lmL@sIe_#V(G=e3h&#j11sZsvRT=0aFs6&C8N0_LT3pC$)QB7yq_^09sA5q ziOylviNC5`?4Z*2SsR(6RF8_l!?FPA`!pdDgM zGD-`G3%(v08(PbbuXElou$}v#uensh8D4!EJX#MpHyow2d#Zy)Qp(|aHvP^io-5Ub z*}rk18C*Fn2Za*UDwSbKiOwJ1qfEoAg@1IliwUz5lG1nf`Ps1D6CB7}_TiO1;(MbD z)Qa6QcCXID(`_1tC7%ArxkwPVG8_$9T--UPv+#FXE^R47m+b+Rp-P!_>`J|s?;~AU~2^5u#cj3m*{?7QvL|6nwZE zy+yKM*4J*6Arz-lq&kY;HZ+p<7{%Rdyc+eGloel+^;SrRphW)AsUmzli-bhNCbu{# zHsjI6f%^s)ZXO%L$T(#Bfv*VCh`KHM{f+SLa3+JKdQX#W7L$~0BZLM;K?ZgVD%Rg+ zCj%cc2Cyrcjk;d?24t?zMP?+#ovYRzK?>G@6UULsaIoY$mfd8l0w2PQj#b;Yfbw** z=7M#?YHvFp4ExGzhc@I&UYwkh-oT*4xxG^+lPeqxBN1gR@<2{B#(9(m-R4p>lC6vX z{_jL@Ccwrlvc=R{uv4|Mvw?$XKF1ni)3Ut=b*hE(_y#t|^T+-f{@0U_&t0!Asm%87 z`xDo9B$`8ikB*u?4k@kh91@_5G^7V-U`%3@h9Nx$q3jB7P5S>S=c~-+P5aG%TC8x~ zr`z)7*<#U)fZ>5N?=Gc+xo4NqWd{GTPY#Kg>B*OX&KxgfA3{4Ms`h{ee0?LYQD=PT zcFaQL+^!<^GK~O#u*4x+SoqrlhIm?zkR$h~#TBYJT>XWeALbqTGrtF-^=v!5AW?&b z5(stcZ1pgW24!MhG5pr2KZ~5PKYo5i!tmGcny=2rOmt7 z{BvbkQ^o3=NZ@j1oF^dL77M0 z*}OWRhx8FPQmVTUu4)dB=lEYQ0s>_oo(ASND2fOyDqCP7Fv~(GmdpqJ4^w;~vliV; z8T!H9iAw%rU-NRt4&%AqanN2C;EfuBpqB16#ZOKrnht-7<@e@M!+f+_njRgr0WBBF zrNxor@bb7F%>lpD(Mxet*SE)ikLRmm`pS+af%^Jzu1GhPwNXa3l<2k}i1_5SV z;0o7mZyYhv%*s%!FyV2_cMXK#njzJnYtcRV+!PO^t8FeabI}nF^=>b(ud^k1eSf=v z_JX;x^jhsJj;?88C_>XB9*2YL)JB_57-7_nvzM?t6r*-nAV1o>NbQ+Q_nAiCEpuYW zzic5}x5Lw^ZuM`CLBN&d$gtPhhms?mN-hm?wV@jRsreMlZ+_vHTM+ioV{vg1TC$9p zEC#~Qp$AQBgI@_6%zo1}pZ$tT?-}y8D}Qd1`&=3NrGG=o@)T-Be)WUOdcHS%#&q-y z&>{d-Y^Oz8SXds3N=xo&>Y38@F(tl-hKH~H{Fv>R_$fnA4+Y*bI>rAjF;zZA`MDJh z1-}dq24~JlX)KH-o!2^Pzm2V+usu>7k$d@z3jWOA2$iJ7P}o@_{P%~1cWXk()5^n_ zJLvHzg}!#Hk55Ly#ZEkBFPS_Jq{6=R9Xb^(;%P@Ds)u zLnlyEJ)A9R#Sr=^s>EZz0+9%?l)Hsv`4*ex|$ z?N4NHB@$=RRAVft0FMDzVg-sTuOI{om0@1zX)umt6KI zK(xPB*?St)zT`=geYO`KbYl6^@lE6^HXL1! z-zBOQ3%KDTcTR<_F z^G7K>WK-6obgOG9A*92eX;FgJpxrK(;( z6Z>1-H*sGnY|m^AJjgf4Z z-fZNc51bl(t*|d|PX&}*x2vqU^0 z&QtkW5tZ_TG7P9LN7dkq>;9e&=3!WU~IpP)62a0MQC<=^e5JRAISfN z=7ZTQLQ>3QOER{pCHZ}&^}Ey%_$SnWNQ4*E%N0d8f_j0Ghl%78zIo}{w~UIf|3wnE zveAf+LNdzpkG(oug(;~B{h()34EYlu!uooSZqZvg_*{NhJxA&K49p5hGPKx zng)K2^5fynMqnl7bKU`3gZ8vWzy%8>5NTj)C}yFet5Gj&{e&E0%q4W?Z@4E#jfYr^ zq$$`#T~p(kJSHNiKobG$EF=^ZN|Y(}4Axt4Igm0V1ezN1#6onMtT&&bHLh-M)MT+- z^Wd_&>*T6|ddn7LD~a@`BW<-8O6}_?*WZGOX|5~j4~Z8w#;FIHn)(IdGfi7Et@=OR z)0axM8}?eAw$W)u#8Et>1&59$NKHAy_crEimDjB-=M9&A&f-ByGl^l zVk;1>9{LsC%E9kLMQX;J%@uXSr7w3x!)v2W3We6Mw>XgPHH=#{ZX)c65}Jy$|L9s$ zPHY$imOA|uv!SQKf5`$QWd~9#l|#Z6XE=THkQu9k1X3Bbf1DVj6pGPMo4iIwjTEPa z#u{Vk9gUqxP6kd1%yl3OwK70JiUdkMbCC?ATv}JCSA?4++?}l+^UfL*N9>*gT?>Vu z68fB)4y4(Mq{xb0aT%nfRDN2!z61v9=!obze{=+9R&5mKxf$51CNl)7LQ zq$BazI$2)}L`d#vIyKIWJ?wy@$ACfR_VE{mBwhkq6g+%h5M9%jIr+i~699WMWQv=2 zV96Z~>&SUOd`ZIlD40N|paUa)oza4FOZ$*;vI+AnC z%YQFCy$#}3P`!aBl2YN-XB_LT3?(=-FDfEmkpj;K3+aeVp%4lGPNsv_l(jwcC_W?= z@(w*%1q!PZNkdFO(w9S-hz4j{=W-WK4yN+sW-vKcR*$Fuq;G$jH7nHCrOW=@JrZZB z+3o*&1eoobFG4c#E#z389G-A**OMaS-?X%2>0|ogZsu2jJv|;KzCg%Xx9J-|903;vK$p^%0f~5!QRXDAAk*o#6Qzmi?$wZQ@?Ph}bZyNG~ z!bvH#F#9Oll#L{f#m8vy==1u@7isA^!IbLGJ@99L40ULtE!U8&(B|oId;y@ zhZa9>*F#C6?U7cAqDWSkP|;}j3iE%EoT-@}nyXBzZ*SN5?ui~~<0g8FUkkJ-HA4|e zg^MfkjMmK@|Gkm2@xGK7{b(Uf_*9Z(D6ZQBYY3CZgIJC5K>xq6@@WuWqh!A*DKmQ^ zKG|VB7OQHgq{%_!l?J8cK{q76u-B@X(%!fhpfj8yAI zeXztd5W1H)Dma)q1J}-6I|dW+NFhPM^8AQ97fQK-O$XDsP6-%lB(5`ZM=h%*%kRY&AUebn?`M2H3k;2h;|qs)t-m#sF}6YWLh zsk`Olre|+eom@K!Q+M)12#K-v9OCg&7HH|&LhG%m<5D&TU-fycPBgA1tfzFechx{l z9%n1OfkbP6nCB3iBqD&`1-zrYa)7?&>=CM?z=%PAxfbJ>ub)e1a~VW^BZ;`I8@+Fj z9u5`doE~#nN}A@Lq8DiAO^Q;oq6~}reXJ%V%d*#FOTIoC;5$F=Y1xj4q3U?GWEFfQ z8sOsM1mvW?5^xMgcZ^yP2)Nqm;XjK$_bZAyO#L zs)KNVPGkmJ+Jm{W&x*^qA0|fteFk*yuB@*cfXGV~P`IO)7!&dRY;!fA9%JrN!3wM2 z5(BwPjfTHbxE;?A2-+CuwbV0Oi|Lmp(2YPD+hQYaSe_-s`V}?B9Q?-Dl!bxQ@(2gi zrd~*jYI=O5CZa6tZmkM7%xu^XTO*Js^c>SCk7}eN_vdK1IOqH|<{Ex_`f|Wz0v*>> z(qA33*l`#%LNETeX8u^*xQTpqXi@+z`=CkT;`+crgm}bR(aMDS>+)<>U?3AdW8MFm z=?i<7M{%no36g2SK~Jm3Bm>1>dMhl&Ct@pRhy;{4wtH(H{m+Lo_08WS@E4a+pUz zBgfT$gW&sS{@H25789@>0DFD2#EIf&VQ@xVn=1?N zsmb}$xXoYW@hTg8^wCFN(_yh5Hb`d4E?b>TdyWllz4tMA6Dba9R=O447LQ{Qelz?+ z6orTz2x^L3T3Vi#i>y%03dld<#*s^ZnzRK{u1Esf8;hM@kMvc(`~85b^Q$rhWnDGf zC*++C`B`2(2ZHHU40f$o+QfR>^0tZ2)odarFh=0>i+dFwVh*a|i%PF>57dQ-VdGpp z42Yk3Oe`w+%Ft^$dh_C8&ER&1_|Lst^wYXT)*HUpEX(la8kJHU z+*T46Ei+eJR@Pc-mQG!ijd#hqT>9g&3YN`2zn!BJp<>uTi?+k>?y^!~1m-2*GSndK zoJ6onLiy}?EDks_MPodv$7*FFR;9k;@O*80Z&!8Xs@FYgLmX9_eAfr2Typ}0QKBmKm0iMnvyWY{1jA4Uwtll^ErcBWVY>i>juj+d)50eN=Qe^$=FN1c* z-(X(-8wv`yn<+C@7UcC{4k?}#PZ}}(9d;(Em_)&RQ&OFr{C#Dus~FKw3xV>w5@>Jx z_?|>wt`|{Pj{+2_Y3apjRVx{BSS)n4Cr;&IuCTvZ|Ln1GocOh>s=udwc*16N%j9D} znl+w?>~9_l0q?g)rmFL^OB(I5pdEc!Q)_0QvLR82(Ih7&?ljGnQC*g4)LJTD-KFRO zk)U4rHC1PNN#T|epPK9CMV3cO)Nh=75-f8syTs10Xv)nCBToQIIe6e3A!Xx6*c?-o z$WxXrD|Z=!Z{C)Jh1W6!oiQw$<=)B_J|LFOU=C^4B8iva_MJ!$R*J{6^nv>Mn!KyW zV4%#k4o9fkeI$m@b|r$?m>+3IomB;4eiR(m+#7WW+)%C0T{=4kKcUm=NY5Pp>OT5B zLBhq}4~{4OWN11%*u~A=fv2l66PA96;9zick%0osSAm29-}e~K_V+S`c`mpj6@wK3 zCj;}%n0;%wQs{$!c}+?#v10*e3U9t-Nua>P_>o)nRu;m!q~B_KW73ojRho8Y-`S-I zcA!U=%LN?&c2dNnQtqg1Jl}aih>k>v-!u+e>GVEhpu2~Ye^*buRl&0&3vsU3PK3tj zxiJtG6MI|caeaz}W-y+fr}&1>;It!q>{wWVXL&D8!Q$@Yz89C6Qd&lnc@n^KZlN4& zZC@Hmppod2)D^p!eH`Aiuw>esA)rHxz4Fa@r>==}--8Y6ihZR_f_Xl)f=8ex6dr90 z2X~Yf^c8}JDs?=$zGD$5P)t6Q{Jwvn8TVXA zi6sX?%K3xp!1v~q-4$z5h?tf3;;CMir~fMD7t6b~*jny(u3ru%~B5FvV-`)hW(@wmVx9wuvxxG;H*lq za8!mCwP4f1B9n^bJ$tOeB0n!7e8jWO?@IxFIG?CP6IV|jP5rF8y~d<|ussM)Hv*xd z(d%mmo1N3}3=TyXlcltwSg0^SIV*$BL7JJv$P8jtqfMYyNgiTNhVr$ifmnwr zrM1?)FZ$XX*YJkFOFW}2MAIjSm55^ly)??z#I|e42SLU3j)c`yOIE-17bN;vyoNPj}hEB2H#a ze3dyV(P0!~RchYEycz4dCO9#4=M2+$`}#l6VSPqtodlP5DJaQ)AhyHx9-@141V}@* zaRz9FqM4LW$V;#MiRNG3JW5+%|6Rw1VXVjGJP`h*YBIfkb5r+OZ3_%MZq-wT@hLK; z7FyV0EJ&LrcXrIl9tDRd2dIE&1&adOk?;J>K=%8FXuiwGtRFaAEC#p%WBR=@ zK;$R3R0uyNYWQvpFaRGig2E7%fdXn}k^{c3)M=BtTO?dSo}d!}HU4YIE2DXg-b(5! zN&xB8i_}WvFaP%Ym&a|*8cc?;-&F1ft~a+Im8jM_*r2G6PL~RZXieMhr%pWfv~{Fnvbo-Giqxl_ufw*ya7c}NJr;Z&q(!Y4cJ zoOM=L%$r}{4#>rOT;`^ZvwQmd&2G6^N-F81Q|<7&XHJU0Us&RbF&~W_d$wOIXE&Kx zV=&!oYPurVAbOdWGq&veOaa8vSpsa79*s_j>qvfel>R zc+RPpn%dL!_I#t2ihu)z<#52tdD@fzQHtQ>PQT21r(cQIo_tGASif|EHJlI7cKQ=} zq0w#VmgsS|M8NI67mqCYSfza@NVmmiQXl`|W0q+MvH}tuYMF)<3^tsCxa@oYV_4HA zAzS;WuVM+O+s0B)nzXr`vWc2C254(_M8#v-+~Xrt&#b%qWXTdv>CW?)NyCdjZOh`s z2Jo_lIW{-)3B8;nx;!sE`n~OqSR3kSw_8XlaOH*q;~+Q<8jEZ3bI%_ZZ!$6Ou6C;~ zF1GXU3+UR#<4$DGSFr-N+}B$lDd~@_LTeR_hMJC>psqGjj+*SZmAJ~)M>lgP7*Jz-)JDox#Rt3nO3vG@m!-hz{O@Om} z9~Sb!I2W35O&*Ig!|sJLcpdezc+95nFF5KfwsK6szEJ7J+L`QLh9 zj%9G`cSS#Au@ypqb)5HX7b-MG!qwz*IIaFBiotR`psaNv5zr97pcOhc%A|pqzLOj* zVU&LHdx1G5d$0)MyFiOVQpy%XLy9_3>h!~VFSNudh&TyL-QhF`b%J)w_0j91T$RSl+DeyKv=F$NBxC2uxQm?KeL%+l?k~CaBN}3PziahPFKR#-x=A z2@%2VExX_Hw{A!tgj~HXrsfK~t87Ajjw9~BhZOoDjg)r7r0Ko;81s7~yN2M~Pqlf* zA7f`l;J^^m$1R05*TX!_vrB2xb{`L|TY7#3q77M5lQU5fWecimFnQH(gZTgO*kg&HrKXN zI>yNOhWOk{K@)CdO^3A~e;w9?v87MebnW(56w+#7Ub${>Uhju0F?a9f0&YiUPs&uo zgafpxl$c+lMJI|GG-dw4uX(+~on5xL%PYB`Ma>@kX@0YSWe;dFBZ7aOG3h1Ro0O25 z=1>SG#lx7=eVLB%ByeOgpi2TtylZmKHWmy`Q&{UZUd&Mq?ILf*Dn(<9gsElvTwxEM zvwZ26UUwg!Smm27tQK&-1t@0dYsf@0|jhBe8fp zzWK#=Z%l4GZK@>J7`2p7{#E!#x5nqIoh9)Su{U4Od^Vcz0au(|%lo9^P4ub}JtMCh z;db#>2FI44kT(xD<`mlc{N{E9i@{l^6H)<(BW{-*sNPgsyCYfTbTNX+v^t3ef(QT1 z?BXuk_xWz7U`uR(}tS1lmc_-SypI_n7dxM^@i;r5Zx=v37B?+LzDoMu9Gm(kHw#u z-R<_W1)Hkv7+$E=a=GxNf2H|8wjnL^H%cL+IB`w?sf41O}!eG?e`8cXR)%%v&l7QNNsl-G)A$+|MRd6rvG)v1$ zW{aq>^wI#8B+io~RFODKwkq9AC{Gs;8B5-`glkM4T|aBqUtGtZ4u|3jc!G*I@i4mGT6@pco7L)AS}=II zu671%&IaW)k9U!J0?#&11@OC!5}_6^_BM-##-OYpjh?IFg1q*#1z%M*gE!!vu85Kp zR>5$aWa)C)%Qbo#PHxtUrMeY*&6V%JTo=pTE)bxonsuq(2rvNO5Kd_tKRH3_K#)nN&g_J^J>iD$Q5Qc&Kv{S4KJT zF2TPDC1t#|!759!n?cmJ%@&GHwz!M-0K&vd@PS!0A+p8ctlKk4@ZlBWSui#dhKiiT zZ8FAnGc{)?&d=#EMsqmOa4g;%4!OU_2dNX>~rvP_B0jSWcH#KKnK%q(I1DPt19w z!%U6xaa!j!_w)%w{PkFvSWUkXQ-X1UQFq>bs4(hO6dM=__w=T|jy%lW24Pvm$aAAZ zv)z<40}m%3?0?8;n<>uldNDms?^6HW2Yb1;jWXx~_ZjV>bNt{mc6~Ed3XzG=1@#)$ zbNce9_fxNgtDWSg|JoOPsErS863T3>gfwET@s6!Bc=94f^$6=^h4|%=aDvaQ$iz2~ zAI$F8+heQWYO{SV+_Jr1Bu6SU2I|6%{7F_?7`|Zp#s-{50+1c+<~ zTUpg*Y|wdeGx_&lruiu>LO_t3{`+45F2PoN-j8AH@$HXLgCq|%rnm@fuCW}?Ph-69 z#@o9LJ-EnP^c7PrQX)6cb^=aG914WUu`T*PnWG0JaVo)R+iO?n5IHeN8IE{#5o) zw!@^6<>!lRpEOdTM6U1tCot`oo+CTh{Y0!%`CkcGhZt$*$B6>Z5!SeU<#!LG+#26c zmy)R5tjEkYKGs=RXMOr5(ot5GT0Ok89kRYAPVh2nHu?XwcUDnRMe!bn0cDWxh9LzR zS{mt=0SO0?knWU{t^uSQN$HZ3?(R^A5a~u5Nol-?d)K{d-S_)=-}b|-nKOI;&yF+S z?;9IdqRX9!fCFy&XUtm)J1h|9_c!PMcOK-4nrw-lSDv0#_EUrhq4e~ArYCYeRU(G2 zQMmKOBqClntw=XHvW<(@M)I@{-%HMR?GcN}e@fDb2ZGSeb%xKgdp(IPjt(pG)X>2} z)beTnDR@-t3D|%sYPI7^^4*((lx6+^DYvK(w6f-qB&VK=iZq7IM2<<8Fs;D}*rj`|i=tq)#tG(<<69+D&Nji57Gus%sSO z1W4X-z?)^4<`!Pdj)PxL*MwS0^OoS=LTTfDpMnA-`RaQ3^+>E%hIo zGoz~G0hsW#<)`Vc#D5z5%C2V-EeoUbJb))+ohcOEKRj zeHd+V|7fA;2Q#H7BO7M2k%+>{Yv*xV&FrG{4a;*sH>G}i%b$-gi3YQFD_dz-;8{O7 zoZ`i$3t58dze);nmR0wcKLW%hRHz|3n9+Uotn4vwvM%>`EMt&ig;s|QZi3nMrod_F zC%sXXWk5*iSca^GzY_R@!!TOB$Z?P$2JF$ee{5*;-e5K>7%MVahhlYf3v86ur0z^O zo?EUTR#ZvmbA7Er0UEhT-VF6!j3oV2oOxY<_<2@hNZ-T8cI3VW4Xqkfj5ZWYOHpwV zMA7D;=M}YnD4vo0gzunNJ=<-K89-*TlpjAq^8zwD%*FfA5hO!@tvV(g1M_ z%1edQ+^BE3C)63Y@AmmeC|O9kL%4Sp=Ih0a^a%rjNBeX1!I46!s2EYJ*!9b;X;1xM z$chtG5wl^ozHPxq>KCB)3e36FokYBY=bZ6Kanx+)n1@}B8c_u(Z!OnF-Iz>j&IWK~ zF7g=_XawoTtb>J8lXJK_Qi#Hy4GxF%K^2GJCv{Lzd2?5-+iPSZAs zqC=y4_9A=w*|mcuFmta9X)BBVcRj8NG@@dzG4T=U+aKYCRFS{dZ;xE}-q=l*&E!}4 z<=)AeXuA3|dNW>FNrE@R%jAh}d?}hwVw@}&u8moJwPKEz@%O$bXQxmXyzF7I>ClP{ z7z891S-JWH&~tTd<*b;PgTh}tYogqPb;ph^{t7$dpXfQWa>F9`hLU4>&5CGj5qni% z$bQ_G!^h>mHtE)@$4f6xM;0S2Zi{l~7Ff$VGo*6be~UyW2Vi?)FpKB>HekJD%M#^8 zMD})0g<%$-GBmhO@QP&HNux0`qH7Get-oKR(x?ggyG&+p9d-Hj-yq@`(e#aNFLp~S zudU(T>X&&DTXajr(RJcWublIiPqCAhgfYE#Q<~#0oJMP03&mI_dhaD$R zqnE*9KGe%$zwhal9xSNcZWih5zr86Wa3Ww2|K4RcT7s(a=bg&QD6~Ab?m9`si)~56 zZKyXOPZK86rht5^Z~$9TH2+#6!?$F8vJJjD^e%l%BSEI`c|8dRahfw5o&V+F$WTgr zQ1GZ6WO#}v7@r|l??I_x$3C|7%9F{Cu+3fSV~q1+Tb35Lz(GN?bfyCp=vepO@Du+F zeVln@s^1{RAk<*W{T5aHFc!Ph%`EuP8qT6q{A zA@aH2O-6XeBAHx;__Hk$H!l<6Vrv-Oo)tfep~*ck{twgL$_&(CzZ%zcCjqXYpwZ82 zHpY^_*7!y;VX9NTI2JSWZxb_9@6M2Sue@w?%A#DIFx<)%)O*srvI!#$Ga z^{XntCE>}kOWiP`Vw6Q!?__sPFmBt-$Z5E*wCtjTLu%vRRbP z-s?NRw81kvQHbQtX>$&Vy9wvw=$G?7-Z;1UlGtR~_HJ0&@5#FNyCb^6p-j1ZI3){` zFZ6yw)JGdde`!{+od2#zK5fDsoB!Io_I2vO<#!3bnD|}z8{?t_>8>hj&rGBk@>>`GEKTcFn4EQF z+7GgHmJH5&&h_GUxkQfAq&GOBFNA=LF1;miL<=72P#)F}wu(#cD>8|kim88T$#A&9 z5bfLN+Ln&9@Md4F)XLhM$JwAcym#j^SL1?TqF{xTk4Kj`l%BcpyohbTM4$il^>vy& zJ`&14d}S!EaOPJ`QoAeHl*zK#rTv696=&h=dFF{w3#AYF83*h^_eQ@}I9g9ps3^7S z3T(7GBd)W@9GLL%4n`uqzP(P&!I0w%Csg-;h=Tth>A}mCAQ;3JFGV-$DG-V=ni#nl zg&5DNx9v6L@SLTkM!~`;25`Hj$*J{on9A|2yx zLYrJ)CNA&H07tDI#zxgRx#g;#Cod)*;i{~NRiPlksT30@x2#aH#FE^ zhI65!X4NFv=~It{-m~wBP>2w%vVm^>X9LsSG%gxuvpaHhJC$H-v_t^`mjVJwt3%ZK zL3J$d)g zzRiZR4`F}b#NJHuvbQxBJ5CmCD`a}aNTkYjeb-UyLtV>U-LLa0I@T7)Q}#i`8zlbi zSyg@8S|b0KEOWlU49?eW&{aVVy|gd<5?#piu_Y?#QoxZle)*~I!*)tJQwHyWct!ei zWMaGoDlah?munVf9ekASRcP4Chspd%e5EG;wbzO#Sgz&X#TR^-Ly+*%_&N zUHJ14PHjSILU2# z0jH%pSVTF{;T{cUc9T#h?rUwK8Y1lFpk2a;|r;I>ODheh1_GHAfn~JDf6I&*n z5lhuY2Ev5YtSI7~wY*%w0|>%|yySgk0f|@SAXA^)we-BJL5|S(*|9)}jgHSE|N6?! zLIMd0&$`?LzX;qj_9IfTy4%UiE>g+3-Y=C}-l|Y5cj6`1b@9;pS5A*(dV%M}|LT=m z3mwvQE4{2%QuJfKVwrf%{DIa+$?80txobqfaj0sk40Y+3?`Xt5N0Nqz58V#_v+6M_ z6%%prubh;zT5?{Qde2xOpIiyGW1U!O)(ijEoIz|3uNg(=Ha^i-Isd|$&BF;K6*o2F zL@G`g`z1Ee+nraGG)6J7^tRjENUHigK{)qv?9;>|0qG7gX8}`surcNE+MUu~>0j1| zAhwuxC{#}Q>qKh0d)`R>aIcIi{K4XsmGZ2fsX^Z@FtyD$sj7d1kVk%C@$dSu#;c%} zUP9j=>!OrO21Ax`jh-kcwt!l?9qa9Prk_DV55{I;749)-_r1bcoj%O``6_SeneqvE zXe1P+wvw$Z^UNVZR+9w-8cttt1+zk!pPcl3kwsT@YW7I2PPv=&1iFVU;$nk4cLm)` z#BE52GqXR#gEX^WvyjIx>1upx5LtqnK+b3)jc*(kCGei%-(q;x^J>D?;s{v7N;@6` z#TjyrN;ehCoUqA4v$MS$b7;WQfTO>>2L6amBQm{~+@$s-6@$$%=G_H;5I!X6s-mpg z);%>M$c1I5ttvBrdY9GPf%(^gGMvdSf2CGk^5%!z+Lai>tS7KnXB?zw?2`m-=1y_F z4iTS7*+M_6guh2Jzeo_brH4X3X_u*4^=T1hvF-Bw+va=}g~g`~%%ZEDjAPF?jC?f1 zur%)*=93;aI5)|_ejt^& zG^-=WaruvA3h#-(Lj(}k)x^fLJmEhJK~b}e9Vo>Mo6G$b&_VJrknI$n__GC%&p$tV z&EIP8X2r~AZfP3Lvhc!=;ajbb`3TFCWqIXT$C7{EVQ#``D^2V`X!1wBSRPwY27El< zGIm2N3j87`Dz@bX(v3IMl(!sBb+Rmmibd#xc5q>a!|Gs@*#_w-bn(z#I^tx$_vmUM z`P=O?GT@Iuwqtno36K3?q%c;e#*jD0WmO{de(8M{+OY>2YszT*ec&_~ZY|?I5hMX) z4MD3(c_>qnm_WFyPClB*IQgl5==KR{sfdNIotPWkoq@Me^7oTivNBVSafXi0;B{_= zbD6W$e9onF#WCIisCM<#1G!UMd#l1cA6SSIqcBzgQXUcW?EXK(3jXK;Jhogg45OQQ^UCDK?{nwChmP2gO)4l!8;2Q1s zGJlXgomLZO&#?MCVU;jEw#0`Mx2BWDwPS*RcJU5BO@+%lZ`k~Mv18nxA6b<2%x`&~dmDgP0i9Et zxbF>c@gZShRN~6VwAbn{0B{rFOq@^Oi_y@e%6_fRQ8>5c$XBC{NDP$V2L0D66>&55 zgEsQ$pR;SOne$GmU;K_+Wd2G!U)jTb>x9H*Ebb{mi)yY%pB;d|V9?=^bFZ%ThFM^- zoI{{c>_-5``hgw_Iccw2|>=mqB9ysBLdVf+LL`k3sDIk4#+X*pxCR??P_6A zqIH?s8jOQga_@tNtyu51YnG3+u8m_V1H@4 z6k=+rHOFM}{C0-sOi{r0QQ%u9;S0^YeB~|H4vw|Qc5e^Na6*ryaPT6hoNuz80?2Qm zr$;AU-O6Rlj<|J~t4#+1+Q%+SVu4)~zH#LOA6`{CiUI1=a2e%bud74_fn2V~m}T^3lW z&o5YvTvVielVw@fn8Jp<_ZX$+X4{M*w&h> zJVYFryiRx~v`2C>HiepYzs@56$X&zeVF+4&kCM-#o)N$6(kWKW|c?)xSjFCNm_=EVUL;#qM-h(Y_u zYL(^>WSj=4XMA)Yx=?Q})oebRQe!^af=EEZr}ZlJh0ztAy;_^4VUM4wR6F)%BQ)497)nbQ}mDM#iknr0ywtDtsZ{{%3bUdT>bz`I;}!w5}{{8jqfU9 ziNFe}lxR__v}^mTlK!h%P!d9sWeDElYK_%X9ga@B2Kcs4&WO1TeU28JPp0~br*YT7 zxR#t~#e5)MpzC>wTiP*kaO83p&@;mLdB+|yCH!frY>CFGC4$MhpZm3Bf@tvHX+I1J zMg;)z|Ku_H{A^e1Cjn2I&=-PnY-cGafq4s%j8R%F*V}3X8i{W{Pk#h0#NUDAmjTo+ znO^4UmHodH1!+t6{*PLxU{(28M<^t2tcTC901v(E3FeIpfD)r@{%mqK{Wcax=O+YA ztvul25G*n$L&+)tQ8Xj;EdflUBdOD6+T*>wXstdxIkz|aRcrA~61<*h7P2HPbj}Qn zMu5;Jfu8cF4TFqnNO^~D?G1`7jZ_>FdW%7gnG&EfL>cT0Cj#!0*kClL*p5jcwggxN z5#D22!3CA?J8>15e6X>x5s@qoP<>>e?l4IFPEkHU!RfJsq@MSmA~6kPB1t)V`jCKj z6@O6x5Lui;j(`vqKoj!51SW3MKkg^q)EU&gOd;tr&NiaCqj~HvskFz z%<6G85-2vye;oonY zv)sYZMgAzB^H3{Pa{WXDm z!EQDki^es8Nw6F9aVv=lY3+E1{uiSH@ck|@cdH0>BNU|?7; z+n5MwR`6{UB!O>N-Fo=zy}w)0$T6bliokH=M|slzwl>43>4kFgt#YAtb7(gD66bk`e(r=Y7CK&@C?1FsS4!|lrSz+C592Fq{*eqGov6q)@1Wtsz zGj)sH@{;*L(|X#eT!1m>(KhC4`_6%!F-IL}0`UfT1_6=-xW@m#Twv7xzjyyniedGi YyDqUP34`MC<6j6U$*Ia#N*e|J2fdZVBme*a literal 24393 zcmZ_018}85(?6PIW82Qgw#|(FJ)Q zr{^~_J>4f#QC<=O78e!-1O(xSl$a6-2xuPg;|L7~tRb6bVFvyKbykuT0jZhBKLHkC z9HcazK|tWp{(V3}GPALP1WT4G8ZH`gvOLE2whV?Q_C}@*9<~lZY7h`U4<6v7t*MJ4 zv4^dVoimRIKk0ucc!1CUsu@X%|3l(p%}=T!r${Vn?_^5M&cMdNL@EGFOiaw@WMamn zBqs6S=D-p^sfCM+0}mskySqDsJ1c{|lQ|WXmiBhU|M(gj*}J;%lal@u^uPc9r=KpCX8%W$o%4TR3;2SJ|Gr^l zW?*9c-@JiM`To`Nh}zpaIGH*-1NjTE^ZkeN|5f*Y}?^-h50zpelODgM8_6r3zgfp7hv*3AEH{eRc}m!6OD-;4iWiufNb|ECtHW&v0} z#{WHJ0Y9_Bo^i{PVxtpR&9G2FkbG&VLikxH*!XC!W7VzSfio5f~{ijJ<*>EX!EH8?ax zAmH;n7>RbaTz&WOK)~m5JzF43fRDeZQXm-;Py*DX8TBFknw^o+{*2&Pr^8#c8xQmSrJ1ZpGbs!y|OL`!?CvOjdQAv1v2G9QRRrF#1KKmwCy#hqRp`&Cw3fcpNx8)y*jsh z1L1I(bQX){i74KFr;EQG4~9LxycqPmH@CL7B9bG~N%A?tiaSmaa#B?JJa0{egoILP zRHt)z&2{D=iG`3&gge!$wJb)*^GV#uqkx=I30AYW*1LUqmsiwkba3$Tv+p7kuO6>< z1RZvTszzK-=7&a$Bk@V{$$&QC0zL^F9Uc;Lf3g78EdSTe%;4+xXadmH?>jg(H3dKQ z6D=fw2He7|0v0FoP}JIbtNo$Qcw~}nJpdP1#F>zg&_aupSc$U=4RpQIAm^ZzU_}FL zAOzjNQnj*F7vUgT#91ROSS9pH2dIu%(d2SuFdkANR%HUvhTMHXf=FyXYWnY$brs+Szc;AJ z{%X6+ey3YbyXjsiz5+W4sIahn%K7$H(I{IbBV=0;Xqx~Wz;`7;1pNuAPbSetuUqwGodn3uvh&S z+?N2(w$>3z_e;PQtbkqn3ac#wAVkQi)Hb*r4U8KlKf_XPO1<6X1OQ-)`fII}9ob3>9DM3R!v{-nWXWXRx=4;**wkL^)dsJK-`|#lfs^wL zhnAje2vVVH$NJ|im^ktpelsE*o2=YA4Hz41%HpW;4B8`PGM%PUNLPObJXZ|l+kn_8 zUfuT};K0651NH#|-rnAhQ5^X(1P@#eaI(@+j;E2W{{Z0fW-gRBL}kFxw|$YxT@wIB zF||e0WM26X2c|~{3}~EA!ZN6*#%7ttKafH<;wPz2}BrG_I(ccwJ3Y1&7a&gNdZ zm|Oi1V|d7jh={KC_eqaGcOATc*%=WMN8U&P>%V0(5^469ux2iz4FJWOafKNb5c^F* zN8zQT!|4r%FaGOG4ocXrl(aOevwtt)4^C#Jgh3B7U@hpw!vjWJkGO{i*9B|JG#uq? zZh{Ni2eRc<{&UyZ;8kK7`>M(;?7t}v5nN0jt*57FX=yo=%fC?HnGtPBsb*zGlg{Sc zv6wyeua8Qi@j0iGarh|aX^2Dubn674`(iPzEXY7$gao6iIgJ>nUFX85l+OnCsAfg{ zbTc75xmY5-P+i)J>o;o{3D95+pChX{CQFYd(vVU9E0ldA-|bbP)BN$nkHi)}QAi{gv>3p2l}WCtQ{?IvG`mvx<(y;IIO1 z`-TbHtQVEQj3yUocJjcX+ZCdupa9Nr&#iXXSUk>Vk0<#Yo}~_Vd;Hb&!$T8Cvk1^E zkpLG?7(~uP=*5zogAsExGp%NC1B1<0N5QXm)<}*C#1PwN`G4@g-Q(}CpfJPO^FCj{ z_3fKMk$#U~s%fxRiyUzB)2um;oG62Wf-y>Kl7AI?e>ai++ zWw#F>4kYp}jgr&s&H??1PFPh+rZG*`={G2BWJi8m0suH8|Ndyb~%rO@ZmpA$d$ z(Coi^9feY-Ahhn3b$jJ|5s@Jv$~U6+NHVNI&o|@9I$T!K#(nxh@G58 zk`)gQ3RFco#GlF$9D{_YZ#m&+QQeApiIYn&I3+nk16wBs+6`W^m?i~}QcaO? z`O#A3YpmGhZizM{-~j4NXB+eGS&;twd)Yv|Ssxs2O&LG(bDp5ZPhPFCRx6^`KkV}(NMh#v4~9ngHfC_J@kfM7l4_}KmH z2US)1>iNs{-oVSBp(qSHJG;|#Sq1Z8?fSbB)zsfx$<^GYx77T^ z;EfW*Jx9tRRv!U`tBsFI_bT*^l1QsYW?+3{|bD{WB@C=EQ)T1W4t9tq%8sDPegC zNXS&GpCVKqC-WumFLx_dn#!Y|ulJS~7F7LTMve%9hXz8}JTB$A8wc*yrfANvVs9dJ zWJz6eXqm>JpyvZ)bgfvP;U>l+)Ybnq&aPra^}NgQ<^_QX*Oh*kr}JGx)=%b8}@=XMT}d^0L~IN;GRnkO@!1lH?#lqHSq1I ztc>PQxdeanXYwA8yp0WAG4>$=8w+oSpsj=iq`YPrUGOODZKT>Y|0jTqw>j+$WwEjP z&(XV}?_lcZWLx}qN;CK7a^B#q2`FRDNTvVCQ27V!>>T@^_5Q3`AD+!{&~)(R(hX%u z4LF0i%gxb~GlIM$fW;(Ah5T_Cw)c+@j6Y=}-6*K`dUbjankrn{zR63rslmIIST;*w z&0e714}82m`TS?Ac?d7bs?)iQ=7J@_O-=xaCI)&hvf=HpI7dDN-VUpFqp;+L3#N8e zy3Cx7ALXZk*c@cN`{m{s#WZ#!ce#BOw}cE9^*E-Td6(o7rmaPOIUXKoN~nOr&c`$p z_fhRVTeBl_luGs5T;|a;@o!4!yK2og zh#ej@k{>KbYXR|^Glr9GQgJ@*B;dP(rZ*trY_M%8QW54de*1o7)S9QLF$Eg-uO2kP zr9~dT0s#cueQtWDdPnFD2(tQJUKk8y#QS@DKTD)Buzz?4tyXC@TF67f+x&<>P5UyQ z@Hc+rtaG<-u;y(8*oks6|Iw!AO;X~B2f`$C7ufYdc-!^U$@$QNO>+=W!w*5~JdfcX zGeb#@vzoO~1o^%&fb~kvqRO;e;0OGwUjV(%>UMxOG_QsXf?XdrIN=CjsmEEP%dbF) zfyf$j5;ErCGt;0lLqs&W5fQkz;HbX+k?5yaX>3$Dohi0RU{`vE#`An{O zG;phavC%RTx5#O~jnCx}uyV79re2Y3A)5+~*W!y|?m2n{@>e->;UIsCyCWTq$4bTq zIzd$!oXqK!=}a@;cb-?f(XeP

j3V*$mUR7c94nkJMrLh;svrT1m{ow!`TtDk7By z`MQ3Fs`-pY8?!%u&nTGQzYpxK=cXzH>&=fZ8a~Z@-oH6qXc^AXn;%O&9>o&X`yjm0 zM4j6;vO{NU6-%(;+g{ZK$elv1UbjzV&f_kaHad7@n6_B=q3@1?VXOa_z(`qnjRgH# z;l=qb0f&oGGc`AJ?i~e8&7?&0AC4NWR}g}qpk`!~vy+P=-v}`Jj(tZSZ4BtnR%xEA z5O8NCP3QfQ_oLPT5HGffgu))tleK66_0D#6T@Zv1ObLO@OKRqC?WzQQ2<4NOLITxt zXPro=pmgjI(|&b~vk9`WUj1duH-Kq_8f+0<4bmSy0`>@Al5aphJrvp?Qd(Aa*Pv1| zG?=0}9Pi5>P7rOlLvL_IGwwOzG4g0<(1iyOh+Pe&Z`>2s5jEqplWNC7k|{~v?l!f? ztW!JUwny6IOFMO4tq->Ggs#+7w99!CeglirrAMb^NNl6 zq_G`SE11=iXe%&*9Hk_i2B=${nNf%x0-5dt?%<#fW*?p{tI~N2BC%SZA>9rIXn1P? zm!g_|RT=C)@MW=P^y76q=72O<-bePiqCcdPKna$R&@$^>-FFS$YYaAo+vjgXHczgY zn+t4mrwSNa*g-+p4@ec$vNxp{fhfJcxl#Qb6d^`TQd#^c7{Ql`+L&i~GN*%r}t^oGWk zp|BW@frK}C7c`D$NkEAWV}|Cd+8HD^bfNshg6QZ^W+df|h0mvw;Z`VM zr9!|lr&eqN%S=zApmgS?g~MX7i5S2IOT1%nTFo-un(k6#vKTD112UQzrX^#&J8o#* zaGcqx;3vP6vpQW{4Nqj$|IL^xr=CpRhXSr$iyXOvOc+}Iup0GszQ!slrD?o1*w_dc zPbe6rj-?DzJHAAzV9ysVBPG)?ETBa_15uT1A9xY}bY81EZanzb1ZlgqHr68&9)%G# zoy+?B=SPMxeF%%G|J%n8k^KoqLFeQ)3ljgWCcKYkn;vE_lN8j=-0Se0n}bI}uCKq2 z55LGAyf01~%uFYy?JzXfhctYd-T*YFYGqD(jXz{8C(w&x7h>s{@x@hA&1T!td@l>D5)RM!hVce}(k* zAbgsIk zpU9tMdq8u*jCaVbn#Xo)o__x&+PhZoW4qf{LzZrPQBSokpw5*~CNC;A3(V2`X=vgv z=Z&Jgy!c4t?cI^&nTZShAQaEU5YIv#h!_wL}E-^%v`2~Yd;a{4+4M6jgW1VXr?W|lYV6nwMMXt*so};rfDXOY zh|C8aov&~6hF432S45=eXA}t0m<*d$I%~EqWc$B&a7zF)-<`_>78=ECphD|$n zOwG>YTHl8H1{C+>;6L*nKwoMVuw4|xB6R#-#%BpSaxN~s)l$6CN&SyfDlxXAwmffG z*iMD>?Ga}ND~7iIG+KtT{5Uz`Jjku>_Nu-m^0z-uRRep@P-Wp>@x z-)=HasyEfyozU9!)pIzTYud^3eE%K+o&3#rys3IO+`kWFT)}$ozD!Mc=GQNbEP)$h zirb~i6kT2s`HLPhBy4IGD9!HI-c0uVwGU1ssmZM8TBOih2sQV!^SwipG{LX&njC4K zv7r)0^$GAtUBf2~eGPkWk(y=)K7$hJY{g;n&o0fMwR*Olj&QKNdReAvH3q*6Jh?AT zxpE$U`Xpw<_pt%Y9%kR1@~_daVnhSM9=iAw!xn=h21k%~xW7Z%tlcb(0fO9KI@BNi zbF=w4&*8^+U*C5PeNy9)aAupFN>JtFO0sjpd$0g)?oWW#a9+C!K_)5+c%CrAhJ>z< zH=*DShlA1>$R{YqyHnZ~!5Zz3f)*^3yo4FbC~oxQoeVqG_nQKKUL$BT_@bxt1N{m6 zOpY40Lay!9wdx?B==OGQ^l4Sy{uYWfJ7v*$SKXc;D8l9Aqhj(tRy4=K+s;_4U-oWo zJD&mRyU`aN&MglWP1N=~aob(B@9*NH+4xSym;0uQ2N);BQ`N~s{5|)FFsNy{eB!tn z=_(YZW%{1C$&4{r%%f#G3HIDR_s{r^(PhY@tsh7C{-7n&ZdEFdODUB;MOQ4W9rmj% zUQeGm^qG2I@82cNpa$QfDDtoG&_jCi*vj98W38U_D;qJEQ+-$Apu^{woA+;1N=+vg zpB}ALhOLJ%80K3t;aHDIK-n?%Ta#%v;6W_k3@@G(KZEHvB)j!xEmOW z@gdFETsai7L;?j0quy|Lh#lPWvps`&N7A75WxCcfFRA z-mTGET+WccVi-(ls5lRv1Fu6S?VYn;aCuJY9;MyNZ5`isr4a08gXlUO<6~-U5)kxJh8Xu%sUijv!BEOFY=QCdHSfeh z&%lB|(vX`TrC76@bTmnxZ#F!xBfRUZJp5hnHhQs-$KSwn#L_r8bHBMkEC2a+xoNGm zjK}S+*Uk6z*O3QI;!o$PSBd~1;A=Mkm)_Smp*Ia}1~2VlI9tR+xqx$z+5`uk=hHe2 zhD;2baqD{gu|f-H=d{ink?wr<@k;*U>vR=y>4w>zN&`?TId+Fc6nvZ!j+B*38%!-R z{isRq?ymBiM!R$3A);rC+Iv7Tt!HQ*LGMFgsTIfNszUV|)SsGFho*YMe19ZB1OG|S z>8&Kce(yuVNODKdcKq%4?~>z00|lnk{o$5S+cMQkwW9^#!R^rfxYAH`icGfGeuzK3 z6M5W#A%2N;a-VanmEGr0?yFwzM7v8i%If*oJpY_T3uM@cEvTAa1DiCC#bLKT>?mN z{tA`h$$P2A-~&-eKcZD1=i1Bi?b1$0ii&5dpgh((q&b~@#vpQNN*D~3+{7o2F1PgK+EB8`PEI8;SEEH`L1Wf`yd zLstI zdk{^?f`FcF28L(8AY8pceXW+k__8YN#DXR;k5`LETTlJso46ocRAdI*`sH&6@0+=U zN`<_e&>O5u19rZ{Id(rTM_5{lQ86g{!J$3Mx*AC=Lw2Z%_F3AO2lV$37yr@rGfv4B zPkrJP&Y$ZU4K@Z~ajN}J;<_(jBBb`758M$-k-k?{%K1W|IUK1KloNtOVNg|nh1~~k zw2&Qw=vA}Yqf7(*vA(2ddzdOk1V#zsVqVoB(ZtPi=1WptE_>6$(Ya6v$QD2D-o>ka z-V)*YySG504U0IzD1?UiJG#FQ(A$gc-;v053%a2?3rX?#pinN>X63z&rUYJo3CEsl zP8AcnfX$OItaWh^B4+VT)Ou-9@ROQidc!V!#&!d?B^RjJZP8T(#8E@% zT^U&a8=T%W>??Su&ZySwmu>MVXb79{hQN!7w0vz-fA|@!SUeDA3tA7y0!)$*uUj#~ zBdA(^(UOr&hVNi`Nn@+YW^&+Yl=oJbri-suYR&MqsR!&w-M#g{LDgg(CS*82$>6<0 z&tV;e8yhGpMg!N8=dX+RYZHzvLwy#&@g=2w|iPIM=pTz?zC7?NO98P-I3pv z3V3`#cE4{ktl491%S;k%0_y1Sp^1O*s{()f5aGn3Sa+la$J4Kc7>q&VevbVbKIjeI3 z!7#hZ>8NaX-H#S_&2!yHPaPm$1PB~yagCT`s=1XcHjWKu@jUf3%&rqp1O5)H2Z{|` zffJcyYy4@ko-!Bl(Dl1zd%4gqSMiyiDeDK^YV}PaSlgyUUk^)Dfjy(c$<4r9-SqSx zzuVU39mBpzWn=RwUXump+tlsv_-Qw9G78WtWt;1rc>Xs-r5Y$jtaq_3^dV3nF%FdLsO z4~N6*i*5HusB&0njP}iRbZRsEe1$NQTF{&t;bx7hg4pL92ZEIu8Od<*>}P>1lfmvD z1yTj7LlxtddUrPVXTjJ@`n(ehU;mI`9xeJJ5QMoP)E|HO$_ZY9X1SRb+6o>Y_SYeK zW9iPC(|qzBbIX)f7iVX2;CTYg)4xO9(BYYV^HE z6RkkH23(JiO?R06ts%2AAa*lTw0(~?f)i2;@80u$^aX6wJu~`)_rlj0`*NR(40>gy75m7_DpfRb>rGgw zu=_BjUR5Xk^E6a=8lMBVq_~+&^`SJR>5FL&F38_YD~#3;jfdnO7YtT)L6U+vRPU*R zVg7mHY5Kn*hN*nG0l><0Ro`2-+YyG`+y`@xba`&G@7FpnJ}Wt8c$(~0hVgDMvjz&% z&*5Z^+9``&lO4OkCsI48tMQc{4=?vMoa@VOlJ zWpVDJp(3+DAjT&EE@;(+m^)dYJC4-2Fgrc0-g$%X5i(w)XC~j=jRs!7lDf(h?4E$D z{3kKwBf?NTf0ZmPqrNiP7Lz}eX1eq(YRhkYI)Tjhei6v!erE2_fHS=|RC`yRPhwmp z$VwPRZQ^ynCgRbwMwJ0YblUk992SX}M+Z0+=k?FtmAo;2& z^Axqx=;wJv!BI3uF!SD9ui3T5fo9MV9L%%m@xH_B99o-`(^fwY1b1f>mJ&Q%Tz2Qg zYj_@m&Nbk4WX8p5Y3Snenzy5$Ou&9s$ni6!C7pv?u{4A!$x94;t`w#l{v5!S7pQ<& z&spgoYX#8lpG{62BbF3Gra@KfqgbpBh9RpR-||hYRBnZnpE0_CXsvklUCzHi{Vdg=`2S7rJ> zed@NsL%7<I$S*2xKw8=`zac$Ax3R(- z?4}kHgNaC2kU5$T^Dah=IFU^^PNyv;9ZBaEgFfArzMs(>6$$7k=(CZrFOND7F&Y{X zlEbslq|zRExIfdEZ>zO^3qn>wam??^>GOo>{0eQ44I_roG7UJi;`oYw$YbM&*cD}i zL67(6I={tT&Tmk4FmFiv(GfGK5xs<=zyB7DQeo#@)4xC5`zz)H{!VB0J2!!m1i3>p zEVSJ&ZT6=@H`7g4V(cIn-;lOp(twYVFNUgx3J1bLe-aYj3&&Ta?5Ti5Y56p8ZnlzZ zv>U*F^N#)04Ao`u@ZQrv{DObRQ>9XqB`#qLY(h!L%C|6vTp#KowJ$@J?gtO&M{=Ow zch;jQyS{=K2w}1+Sg}A?1~2HGqL^f6g=lqX?AwHm6Yqxt*n0N(Emin6dhdjHJoPoQ zHF=_ytH4d?VyVy%kp#Zcs*T)wg;1rArG!Cz#5-d#;i*oJTojDHfg!z|wyS z!F;(M9$zLT!L8n_fe!3BHT=PYmI}9SSzV;xn>tA61l$R2wJ+>{G}l z-scy|VaR$LHtiZG2>A|(X~bjxgxC?``rf~iX^+mW!IT4F9Q^pRv)dG$_fe|11w3W( zIc&&tVH_F3U)p$>1N{rmN0n`%TQFdaK6Ql&6(Y=^wt?ap%E%;WFy|vdHBjMlKDo?4 zBYYlk&nnVG7%mrc^K$ax-F9_f9iB&Zy7zL0U za}@@kzw5la9Uyx^#3E67^Sc<^jA1C0?qtk%Ro4|?NEA@rMC&$4trAvZaF zqzqq=#yArbL*nuJK|jM_7Y45XFkaX1e@vV1D;6WUQ_7mvsg3eRQwh}jXbrtcFzR>1 z!@z)gGg-Txuc9~($W74%Xe4YHvoLXuoGBncjMB;ku#}wq5Q3lLK z0ZhlGHm}~DOxIX_SnIVKKlMf?Hz;CqslGj-7iomw0>fY=hj@*Vk(o&UxxS%GrGE6OjL#rPW6*{r9Zn6CcZLE3kcK(k&z9-8OAVWs zf0O)k?ho#Ou5QUT)~Vp_8fZH>?zw>(Zcq!#ER|xWkbuVo0dLZZda3kCSF;0R^LCLy z`6h|n5FkknTc|^9f^KwhhoZS0DRBRD8%8BN5Ru<19LuuY-YuWq--2jvR{+~672>z> zn*oCY99|eGjpT1+7lR9VU&eCr8`|PG6lCI^s1QKcOoNf(?^+QB7|;O}rof)8Jz1RS z{sl?8Ru&nbfw?yu%(rV=W0Yo0o-_vC{n(TenKZk3v7=IJRu`y1QI8J#`_|rGA=XB! zZHoRlK%X$ax9Ya>#-aAd;6ydMGlk`r?HYcYMWtda!rNwM_z$9OLrQUMz3x(O0^#J_ zs9>z;q?Y~teTUWjd6wp1nqAE{m2ib(y2h59+g$CkAptHW+9G@#0F5|Y^p>gxfNV$+ z^cbla6cO>t+FFL7(_)Zp5R{=+%o3-7N+iTfoyo$qc)C4#Twu$|d8$usIr&SX-Nt;1 z9bOs$oPmS-u5vQy5apIbCOBSWgwiofOx{UWQv3k@U-=jq>Z4|<}eYpTwKeiGa2QG~>n0Au-%;wxC} z-^R6p5s0MzJ~WT3z@Q3kPZI4>*U2d1Uz2U5^QNTW-)XkuE>aGbiV+$(J^A3#Vq>>W zNO(h1yrVbJ(7i#$QI8=6pym;LXat%H8i}{0+*yyxWpvw{5{Pn5q~XOS@hY|Izu{qT zGTy_z+qN_85i_W(+v!0jT|S(x$gL!lQ;&H5l^rnK>j= zx4$-5XlvrbOesD{;(}TEyuH0yAZ^qkokBC@T`q)-=X4#k`dac$aE5RUF3Dv9lszX} z`e*x)#F*jr;}QphR(n?eLpZem_p*qzpNJpZvdL?Em9h)KrNhmVOHkhT8ia7VO`q+a9fKEM8<`%>DKBw<}EP zEOYdgO#DP`Ru7To@n&rvVs6+Tq&Lu;i$(KYcLYejfPKRbQLX%LgEwA4yk(W>!ip~Q z@IdcFk?M7P??WTS=?rP$@zj*NiCe1$p&#add$u)KmE8TcF*V;(TI!{#fiXGL?t%`h zZ&H7f>0mDhb5SQPZDJlj*74|PY<*gUaZ4k;z!Ip#{`qrDhcF z6SDUCS@A$evXJDHgGF#tod+ON00V@qUa^u2T_*DKqECkPTc~;RKN+Ex*jT`zI9UC{ zseOc>K~g2`0!EL7{@E5m)lc>9i6kgzmoz(QP&iTm4ehZ3mUhgzsoyR+%~r@Pv7}oj zk+M}HvSLtg8`^o;c?>pP);)erF^DM+9%!U=OBj8IuOVXPbtRqJ<*t>@-v~mId^j-L zKVvfPguOb^CrU}2OTeA~iVmKD5hv=FmrcdruQDTj3O_$8{XnASnhpjN z3d*t#tXzKI;hT@Qt34D{kEp?*of#NYDnnH8)OWfa?RDY^)he@J6cHAL;Kj6CJom;v zKIZ9ib%!L?*27=&44;(*uWWB+fE}zx;X=&?U;s&XOmvcG)Hsj#_xo<$PanxR6NSl{ zT;JqR4l=6QGK)EpqmQV<?+TiEJ8;~S!(=4R zTd#NtZ=CkSID0Z6L$(7uPUB?V$eH;ezflruT%6qua|99wp00>Whvz2mjg7Y7dH%>s zNSMjWzT4kT7d{+MjqhKbEY$H)GU|QT?|wJg4LB{3B!ceD;lnskY!&)=&3!oYj(uy5 z3lCS$;_wTGS`reWBI^>XVGJxX?&EQl&Kn1V{QzFlo4UxpZ?xc)$(x4LaC@?pCWkAh z&_Hk5U8foBJ>5D~G-UxwZA-!d7T(`=PasF%;!_V01&dlKkMV;KMvBgs^}`r@W~p2P zj{_@j&_t#MCm2?sHO_bMm6@No(+vdtr>S+`9=B|_+-#-CH|C5VQ)*kGxs!T$z({9u zO)HJTng$2&t2gXK+q-;H>fLvA523)5>GqBMd4T@ZI6_~=!jOXzV{6GxPaHhF8TAks z>&?}T?d{9g2XlYM?$zo_e+`ZEW!t?z+Yj*Zc^RrV)Z{?iw@U&}%vMCAHvN)%UNOQ( zT>&WnK-K8uO{jrc#=Z|;n+YfGHJF_VwEH|w%qPV1CpfI*$q?I15<>eAwJ4(PZV1@8Y*Z(F(%~ zTmbv8E{fQ;h`n^nT;4A(e%-O-TU}l+M(y>vX@;G>;Ez93wz2(gZZBDE)MQee?~1`- zzT4ilDs_dSqVZ0GT4E7=t~49~2rSlm*$h(3cqA~v6q>di9_G=ZpcW|=U|iM6EC04&dWc#V*>TTCil^j=4*94$CU>T`KbW-pDq85@LWs1YJ>YPqz(}CBFoTiWogkT6QwH$w^jjc>fXTX7_eVC`^sbY>q5CqKKhhh` z;dgT66r}_L_)9IEEufegu2yMU^{t{i0;lb=}IJUEl2_V>TPywqlVa zyISnzRvs_L{dtd8x4W_nt#;EuVCmGpWAuR22m25|70enI3klMl?{&n;lKH^Y=v)P0r>hX_GN}UsFp%%E^#b9Nl>i8-Q z?IV}r!ab4!+0&~`h3b)rT>93#R4A@#(G93EXhX08p|xjBi^AASSpJep$QU#8nPf4& z+sU7fF;dNZ84|H@Z)mZ2gbcMm4jVeYKeydl$HM$!LH_qTE(b;N2o~$O@We3XB1r-1 zRaN7!n$Kl={L@_q7!|u!GV**o1rvdr3st~_Qv_=^?&;Ztra1FnyZ4o<%RSz^UBQi^ zRTZtKavBtv1}OUdCIK&36+S-fzW4R((cqZA6A2*^pGTL;3Asi{>;ri~?!=j$qEp+8 zH#3xvih=!J+BWf)$Iw)Mx4rflL%1V4ET)nE6mtZ+>M|VYT-TY`gNQA;#_wmQKrTp! zZS*Gb*cX6-a7Qlx`%BZ;miOiTg>q=wcG*rd@R{MI!`JA&_*$FDHG$eiWZ*di#!?r} zqRvp1vHbhH2&#UURuihCC*a;JTfeG?&r+Sxi{TtEo`|yd6pSn~U@3zYsqVY_qOJT< zg@3Jv=hBjD^Vj$2QWAZzzir5`v+E{s$(ygZ3{q@9*ao&Btbu`>=z3qYdkAl)^_{PP z$SS(B<^@wTa2ug^&^d*M>dhJ(elbfQUQw+aJiCIbTLJ7G5Z4HltJwo0`km$&3Wl&F`i%q-M z(S-`z&B~!aYbKtE;P2&SK$*xym=8=Q+YKa1x~7OI(Y3Ni0s`zkxBDeZ@@B}Oi)h`e zFOMh)gAOnrsHnCLT-Qvpm!nv_>sXdho;FDiZ)$toMx%5Dm$gr+B%9L53*g-%tyncn ztd`95jmL!x781Oirf2%ae&JZ#e!KWKdaaEs^L}Vg>xxnJ6zC8sc7U@cK1ba{VPqn~ zU@_EN?YPRdP87N7qMUE2tZQEFP{)v!C)o|aI4(B$e2%n`%4l=eQmr{hyo2V@0%0yt zhNJ+LYIb==_N3|feDGlWiYr^r52OsUe4H^9IMVU;Y4z;NRexru2>JGMYko{+>Mp`h z!Xm@Ikzf~ad)9Va4TrLl4#-;@L>8S3FhrL|`@EGSG5_pb4oBQ_XMD+WU)xd0iCb$d zcpC_Z`36NBX$rjCJH1UQljavuVU9~F5_xn}t!x#ofHn1CFV<{eS;wOaX*ZL(Khu7g zEhwMTwu95?OM|Gk-$99^2iJHTi6PYUOL*v53Y~!Nx2Y8eW~21}DfpbQ)i2P4eE@_}jXhWj!yXkXB(F(C__M+t1MHb|akmz=JyG zp*Aqstsf4z)C9}??YDB~X3v*bAJLzcs!MpzW6)qXv+fUR3mo~mgqS)cfya9n6qY4Q z4x3JxNbZLYiiJ`R4C(3Dy+zI)0^O#xONWv8tFtMi!6B01PlO8|}am?RH`kqMIwO8us$6b(3H-!9lfu*?|D;SUDWB z@kcYzG-Q*12$2^yNA_llpKmhPoVh`)B7HgNE4R}{1;qZ~H28RlpHy!~|Ajq6fn?~g zoF9$Mgb#Y2`t<_Go$)gX=aFl{)DqlAp&i4`K(P9-yQX!T&~)ODmKSun@0lcsOgoyO z!C0h@0*#8Xad*=v!;b=}X38xlEB3QGq=rdb=Watz#vg{?e$PhYAq_~ zs||!X+zFg%`axfJ5#OxR=Mu?-I6{QN=*HnlTM<4c!@THUYu11DXm9tewnWPPC7=0x z<>c@X*(Nm_ZFBxJ4GbIX`;ka0zTRw03z(=zWYp&?H`G&QbgfQxa+{Y0%@fGRJceQ;a!m&@Izr8Ff zhfb#fuu^?#ZpP!Z6I#6))i`D}!oMj*n3{Qx7Rxe~5O%F>rr!#fNb3RMK5+Q}m>opy zA4%W7gD>T0Jd7=rDeQFar9!}=Q|%rc7%w-AuVIUICZV-365wu5WpUCEA8cjWtYdqg zZH2S=P@kaUfN?NxonNeVB2-13+gwW`tv=^Y)}IRHK7CtI5JkWTwIf0P~bO+8ho0SXo$VbUP>xvls)@!28S`Fr0R~F_X!94051G?Ew(%=TghxqCb#^z~^J_ zyMghrmDNR{PZ6u0s9()9+WR@j<~|0UU>gS_n+wS|I0 zDfTOW*J98_mrm!%m;l2@LrcocjG_Q$B8KSKTXF+vPBnZF4nl!heup>4n(6k+cSyBY z@f{85Ytlri8>RFtuEAnE$Up}ab-sEz)0;6ZE%gGl9=~}JPd%qu&TV#r?sLIy!_U6l z01VEXu#ca;Gm}4LmwbJFUnB~;GsqCn(nME3pligzg+6>)oyQRaY>vB?o&P1U&{Iki z=4J;pq~&zETv6by3}jO*w%G3oc-{YfzBvE}8h5yy%-{W)>BE-|>9^;itaTXy=ERtp zj}Y`nYj7Kk%Mk0hH6^tXK0}(WY-RVwCfJ}?<+3=Y*pjO=XsQgL?;Ku{SHiW^xXn?r zD@ifsUARs@*VJ9v)o%(Bx!vK=1}Im=w=k zf{0)&tR>OAWy!AJ?*$9D>Bvi#-Z~oS8pM!Vvh2?ttjEJ@9WfWt#1Uj2^b7dI-x%Ks zPfkfW0?c!jkT9B7?ew_mgs|a;Vs*z(E2swr_xt)CAkpS@4=+YJp9&g?M4m<4Z{yr@^unUTzg}JM=FXS?)loo=v+x9>ev|c$d#`Sd;0D>^@h8 zM0?lMwzr1J{9w5)e|Up7FpD=DlfI&K1M)|Uw)*NHB7kfLhi)yzn^jz9*xy91e|a`T zt-D+|J?;(taXNG9-ZmcEkFO<|OD?p9((Qd@dCi8@p{UwiVBfyUq$sr^mwvTx+?Cn5 zn5TZvTJ?IXd&&CyEKnj!d@Bpg14B?0@?}5=_y2{toX#vBN&f!LY8QkEb0Z=mZ@9+ui(`!~2b&=lj%gN`AbT z*Rim-ioKo|%3-Vj-fsfh^^=%aesAoXjgf!pB)=S1)S9XDiJgYUfRpy4vGi~8d|-Ao z5K{NuO{p;#HQV=GDlce{(2-l(JbA^flA9TT5OKn#_ieZyctSqIU!Cm6>0@Da{a(=7 zTIeDwhK&6q_Q1ThlO5)`zjU)uJX)t?*(J%&o0BEmenP)XqMjsA3YkLt6bPkTl6aJ{ za8Mn#y14{8`tH(E?BFMmWgM-|i5AM>k`ruhrhN$&*vHCH1N|jTk3+=6COZ|7!ZxQw zS{%|tV*jR1R~V!-rq9NU^;RA*T>ZZAR}pH@&yYqp0>S_+v^S;AlTfR|flkD4-y2_zqFfwbt_uQl0{Mr8e` zwFYx&+mK${*4@5I%ImE9gL6pO z^M&}dh>SL>YuBx!+u5}t_(=i9)z0PBQM_p~KEO6_*@k$ViP~p9?PBLUW$*)+td^-d zFgLs}7z&6-UDhiMRp7!2!r3<>u~^Q}aEaj-m;}7RD-~2AAOxWQeha|uU(P2m+5Cn# z#D%rL#{L^GdBN%GWPolt@XqC+8bKOrXrbt^SUdbZSFCX7H=X{MOT6-!EH?V5g)oe; zk+E*yqeT0?Br!ex-GF>XO8WcGPD24YJT?=EjNt#%&RPCN8FYOdkXS@IWs#1hdud^p z6iGp(M3k16PM1bv5d*!SnKdz1Lnlb9Uy;nRCwX zTVh<3CgUZ5b6CKF&WD90z~YPxKIaL^eZPg>snl+xxPZ}MOcVD%6Y(lhq!fesF7plX zl%I}T(cu(wrz*}_ULmKw%t*jl-}9nYOG%fph78B@Xmj)wv9169_Gbi^InE(FDKJ8QyJcOVS zb+E-YNDdHNM?-{KAwt-8K$W{ZCNIcm%@ABZ6AKUdIxDz%beIeZwOlm@3q>8O>2XIXO8euQ)Ki#AL{Lk#Omo zcj*z*0|6R&+peSZUx@Cd`2&Sk-U(XbsFx0%7oqMFA>Et;0wj#Nl9fOAx5|W^@WwN`Z*Ups2OT)wzG=tZ!8MkAwRMv9%SEcLX(X@`&;x#ku+95Auov9v3 zT&WW05OnMGVt~T-Rkye*SFJax;(WmMc_4#HNxmwqAczyzv8alpl~3L$2lL?tLOPXN z8^W>%F2=?Aw^B_%`I96f@HqrIS%c=jCKEp(;<(KxfOh-8sp*x#cV= zo`)}eQ3fM4#^d@UlUeqRi4IfndSn_`~C&l)<}Qr zmb@7pr!7JHzR!U%wIi%&ZG|c@y+4?NtTZ{AKlz0$pVrv^cMj&lX{p=AB60U*XoSmN zpRLgjij%j{5>0QQzda1RpB{UObmO<;;M7bDja35D3H_6|hbiHRnIqB*>v{&x`r1K} zA+aoc`&f5~aJ)@?@-Br7@ z=j-{Tt_Q_C;!`D$yD|K$?oA*n1|H$*RAYi*nUhZGhaV(ut-6HuKp2t zMt)av2mFLv<&t5zuJ_BA6a2K}ROq?C?S35Eb&qUbPWe3@dY7JejR%$F5SpdDli_^t zsS%_Anz-p#?4`&_}~BLCn6K8HO+=c&BUpKT!?78Vu=q_oObktpn5{l}!V z>LiKUeZP~PC(>ekyiuM2DG<@B-e{2M>5~+4$ciQ=O^${$s5W_*jVeP?pAh7K=9Ir{ zMvNlL;Y!!vvx8rZBEx{}t=NaaXK_isl8KAw{uF1dnoH!}e9Z@Q+#mgQ65dHp)n1S@ zkU;nvZ3#NEtMeLcbu>ag3m2rh#ShQUwO8v0G{Xn)X=n~IEO5;A3SIpeh`BRZQ>l*e z*njJx`p*plyPH_&Tov-`Y-3oqd?HtE6q7<_rWgq-$a`F@JUq8`J&EOL7=`869W5RG z`b@6=dci%aPx4=8d+cLTX4h&w{E~5qNQH;ro8kU_?nMNojhtaY_+7dC^vI0d^x921 zpF>=~^&0+?JO(sdYE`nwCYNC4wcyFIOLBa6Uid28KI-jSlvHhofe5T+*c^JDY$3AE zd(uWHx~v%T0{`u?Z7_HZ=78XZauLp(b1g z#rc#*L)fOx)XUJiU0eD|-r(XoUE;`s!?x-xnE8hf4V@0+38YStw9qkaI$Xuk&(*GK5I$^!TFXSvv(7mP#sj!0LiUi%yQ^0@qOMr~&L zm!{-6dNX*0(OK1%T(v11M%BGj@mz*61Il^jZ22{3y`)^CTnu8cbtRLFTAJTS`s>&_QfaZz0|%rLr-FJZixVw+?Vq#JzksMBh${=%(|e7Vw-ra z5sV?OXKZYO`xqp?85I|&t*(9>%`j0L%($i!@Qo}lqowq+2fxlzFpD<g8w3RAnrC6y!hD%?OruB)2%L zmzEGHM+EJj>1BUCluK&2NLOdR({_^NAE!NG*Y9hEmoFv*5rbJrn1Y}d%!jHk)vm?8 zC9ZzC;>=Q!GBAnQcF097bGMzwcOP@B=Mn$;Z)CU-ZY6I~U^=(9C7PLLn~yju3^g^+ z*X1=96R(3)(IO`&XJKJ+Oc>0rGb&QyMtt#t42UMF1xaF@N)-hS!Xm|!g&>?oAI$&QT!_Ee9D~q zvulf`10vuG=Z!oYV}p0n)>wXA;1T?VHNCH78)9Xue z6-E=LMA`C4_7Hc81$&qPQ$3yFD!mznL~re42?;EcU0?^vFHEoUxe9&%X4&?L}I9EynwfUvEGTCW6}1 z;EQju0jwKFOiD@$3WWkn=+wc_Cubit!{oQt*sDH@)Fl87X-D%!p1>)*qw&=r)+swx zo7!{O=aa9jSnEtw-unhXb_K{di;Ig<89K&5XgH|9QPDal>#;q)ylhT)*S)mZ53icy zXyUJ5W1NkafT?3f5RODVv~sE8*LZOyJ~BV%H#eL$+G*Z%j z{0IaVLQ}80$}(*_W_#aHWJbBjbT8Q#;N7(s5OKcULH^=WJP;pxk(oARXst6Yq!h;& zjbe!qnFg)6bKJN(;R0MOwuJPyfmg><-V!cmHfSCRL90nY0eP>yWViJh3#kw4yVh5$ zyOf?^?`CUm@QK1^T~w?F&{!o+TmM|S&6*{ZNIP{v6dl`yPw*1sbGrZh(!h1uhlGZg zp!{+69Ti2bLymItJ;UjE20V5*H+R8wxbb4T^_~@UT`;@Rem23lfdUg_>$%gZLYR=^N|oaM#t&`g+MPevyqBENM($s+y&@wVv}?XNm91 zz(+svCl$#Q6@Nvc?~}dwN4WmXSX|N7wPH4VU=t$50Mc`J!$c(T(!2gXNC$NI1e_mj zO$QcGK26LLw!q&G#@R%rt#3odPJinfH1a(W;SY=5>O=n;HGO!=vHGkkes4z9Xc;Rd zoc8yG%w#DZzVhY%ELB7vd0Op5YE^Dh2FY@@cvhgx&JeXhpI7Z1P+6IKcgec9qOc1- zi@J=)r;a%B2>*IkgG^QR+|oekQ-A+1LV#!G(R2S#i_S+En_mdw+haW zb`y!44Xg+vQ`r{FCN0Nl{HX>B>o=QBSs?XG@N$Ev;|64Fipo>9!`2m~Eh6{cuhUVD zxoRiY+{ocQCEv7+|De-qm3xb)BB=A0(xs#+Dr?7qq^{KbT|}?5U)+nOe@R!7k3vyq zKtDT11a~#?rRXQD2XTrQnTy+01$?3Fo1IhXHIY@vT&4TWgNeaemY2|~s{W_pzX-AK z-CVcX%vlAmf3iRsI$53F9E_v!y?(=KXEz5QqHX?R)t-c=4jZq}etH-Me&F4F!73R( zq~5Qt9*t;i>m8}A@|HPcMISpcKNIHL`Ub!5QP)*qIUsrO;i_`&G%0Ro>|#PeE@+lLlKimsQSO_E|n*l1Gm3a?`0)X>gp@$PKhSd1Pp+ zx{t{UKmLBwF&*}99%~t^{Mppc*jdi5itPKTq`=7Q9ka#!KHim&RqH#dT@bCYdqe0w zQQlkh(2E~_wf3>i40?Z7Lj7TQmL{dBF<4sBE+@=^VJKFN<6I3yu)v(!9JA_@-0eB(wcnpEjzS^ z|MK`a-dKEm&Rc6)c5)!kHy|bM{1|+ts%YhV+|2YvhL{h-;6(Sz9ZB~*Yq=}f-E10- zM!D_E{0au{l>6rF{Ex;VmTz!b-Vy+8K>w{bMbxQ$Q!O4{?-camwM@cOx&E4dCdV~i zH_1Cs$S zdv|Y)G{aKo5b|WGlBL9O6^bTumUo(?%=j_jk}2DJ4?F$*JysPxI>oU4^l3R_4r`od4*2WjYJ>9feS*y(nESuNakB_S=-NZA-zXJBBUl&$}4+@4X-T5IO% zl0~ug0)zHeIGNC0kjc})FF7SFyAr)<9U-}@S6ezeEL@?FZvwcwNSEy<&%M=-LdE|i zHTom;RQZlc?nzfWEzH`DQNVX7r3RVaS2=}DjN}7mo-5cm)~2#0v9md;yLHZM8l-25 z0phB51gvaq`5LK}z<2l_ZzCKK-j55I=h}*jj{ql#`I;GbI5f<|(^`~)!e5v$Z_l8w zJV$nn)qKA}RH?O(JmtPYAU?I$gz=$U>5!7DY8;~+y=w6)UB~I^sj2qFQqJWOw_cQ# z6uNXB8t}LDy#1yFR2s+fvCv*m$L)fS<6=i`sVw<`Hc!ijTw(a@>q`_0i;F9Nf8S-( zX=rFj+Jj&%Un47U15vH#@)yPsUyaYgkQj(IJ+L13QaAsKm3~$dR!JnJ< z$q;vLuFWfaUe_~^=;RW()4VMJ-drjWi!nil4flNQ|klzc29JZt11h zZgS7_Nv}QYe{$dFyn4$J3eo`JNu;8pUDQkIw{JvlAdzJVIL=#{gR*DN)79Kr#yxer zw|zgE(}Nr0|D^|56_Oy>C1jY?LIa^0B+IlIxGcB$uc^O_f-@7I8;*v23ob6G-8VBT zkIQW`BJ(E~zeh=VYaDw-kGaCt$&M-5$4*4kJCJmfYLiWe%htN>z&hPHMOwt^zmK2pDo@8 zI5Lew_fC$F#qk1i?0($r%h(ny)4vs@&)Y)@0U*Yf7TSFO>?dWlmW2W&1qaweBdADx zR6|`InL#Q!v)wq_2g}q@Q`bbRUOf}RzxB}bC5mxDE{QA zz$%)#JRy4la~`U|2sZRuP-nLYSqX$ZmT%fFR3BN-P9&nB`H&hc5p@sCnmt-m1vHa< zDW_R_ZRinygtrSQKZJ!ue#I>St#J%sB#A#>ur&ZMPGse0-RiEcu53eqg`8*&9(?RF zrm3kpXp(0=mAKSQgpHjZZ&c;fy!d{vg}-075TQ^>Tuz7`*`2 zUV9i3J<$e#t18Dpy5LU~K`0-s6E^}vhX=l*5;SW3W*=Nxe=?$$xR4$!g6`g!-V`=glpX+Ci)nUWNjx_0sD*nBV|Hn ztkfLWoyUfTbfM-OLzyfk05W#IQ`T}gIB@inG4~> zP$At+S)W%*N}>B|b~pmuw#Y5EK6c!r8wBFw$b)QF%P5sC`~XJ~u0$G)H~YAqF0rc2 zVGtoBXLv+3<7LlY|C(=5Q}pr!$+7p(0^f)^P?355{k;^y26f_nT%-@cp6uhM%R#y6 z^!}Uifsl~_effFzM?~uH&be8d9jD5k#f6FP;wz8r17_##cVoc3_;`4P3e|RF;o59V zOG{a_j=kK1O`$K;DVMP`U;#?0H`QH0gOYH6Q~>#OLj-*3i!+)m|2nX^Yy~MCDW!DW znn6W>*E+M@?waNk}=Kzpc^;V?~z?G4~nB{J8vwWo)Rg9EP#FVT}C3c8CA4u&Z0m2GUn2H=O` z_LFxeA2GOv(XE2PhO7k%;$PhQ@IlfQ=d2tF_^JAZkCfw)rDDbeqz%U39)~*-V#|?X zG7WuEoP|7qj2C=#b~L7ayhClU>Ya|2yzxr6m4rZbCW%*mYR|dl3ZKLEV705^a4Q)& z)}VRk+@PNYjT0>9%bcFOUk{It26$&tw=RD$&x_#%^B*lEEQEO!Ae%Sj(neO!n#0H| z;sYuu)IOeBjcQ5r|J*-t ze3kf@DYKX=TzgFx+7axqs|Y9CDNI%>9_=p@fdwL4m&R01+flXT*(_>(EAA$3IKAd`3?0 z72}Y{%E%C3fnTMdrUt>GVUgrOtE23wgZf=XZVB*xUTHsol+!U&S%K45+H)78TcC-{ z9)FxL9Zv!Vmn)jQzWYF5pNjPe`Jb32T4z1-uf3L50Y6fK8a$%UqId?N#O4b($A?E8 zldCqM9v9u@5pLh@;{vebog44;-w*z4lK(D-|F> Date: Tue, 12 Sep 2017 10:43:19 +0800 Subject: [PATCH 030/115] fix NoInGrad bug --- paddle/operators/pad_op.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 449463c830..99f605c651 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -47,7 +47,8 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { "The input should be a k-D tensor(k > 0 and k < 7)"); AddOutput("Out", "The output of pad op." - "A tensor with the same shape as X."); + "A tensor with the same shape as X.") + .NotInGradient(); AddComment(R"DOC( Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: From ad64ca5da20e696d66cfcf9011d16a81e8ef8ff8 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Tue, 12 Sep 2017 10:45:11 +0800 Subject: [PATCH 031/115] Call Tensor::numel() everywhere. --- paddle/framework/tensor.h | 5 ++++- paddle/framework/tensor_impl.h | 8 ++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index fc54ed697f..19051db539 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -162,7 +162,10 @@ class Tensor { /*! points to dimensions of memory block. */ DDim dims_; - /*! the element count of tensor. */ + /** + * A cache of the number of elements in a tensor. + * Would be 0 for an uninitialized tensor. + */ int64_t numel_; /** diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 03678784b4..5e32bfcac6 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( - holder_->size(), numel_ * sizeof(T) + offset_, + holder_->size(), numel() * sizeof(T) + offset_, "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" "or maybe the required data-type mismatches the data already stored."); @@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - PADDLE_ENFORCE_GT(numel_, 0, + PADDLE_ENFORCE_GT(numel(), 0, "Tensor's numel must be larger than zero to call " "Tensor::mutable_data. Call Tensor::set_dim first."); /* some versions of boost::variant don't have operator!= */ - int64_t size = numel_ * sizeof(T); + int64_t size = numel() * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { @@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { PADDLE_ENFORCE_LT(begin_idx, end_idx, "Begin index must be less than end index."); PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); - size_t base = numel_ / dims_[0]; + size_t base = numel() / dims_[0]; Tensor dst; dst.holder_ = holder_; DDim dst_dims = dims_; From dd926498e7e61b25250c8f59d91afe57ab24098a Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 20:09:21 -0700 Subject: [PATCH 032/115] adapt to the new test framework --- .../v2/framework/tests/test_reshape_op.py | 27 +++++++------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index 50653f58ee..16bb6bb2af 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -1,29 +1,20 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, Operator -from op_test_util import OpTestMeta +from op_test import OpTest -class TestReshapeOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestReshapeOp(OpTest): def setUp(self): - self.type = "reshape" - self.inputs = {'X': np.random.random((37, 51)).astype("float32"), } - self.attrs = {'shape': [51 * 37]} + self.op_type = "reshape" + self.inputs = {'X': np.random.random((10, 20)).astype("float32")} + self.attrs = {'shape': [10 * 20]} self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + def test_check_output(self): + self.check_output() -class TestReshapeGradOp(GradientChecker): - def setUp(self): - self.op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) - self.inputs = {"X": np.random.random((10, 20)).astype("float32")} - - def test_normal(self): - self.check_grad(self.op, self.inputs, ["X"], "Out") - - def test_dev_compare(self): - self.compare_grad(self.op, self.inputs) + def test_check_grad(self): + self.check_grad(["X"], "Out") if __name__ == '__main__': From 6fbf097bccf77f74927e7a19aa879182088558ca Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 11 Sep 2017 20:11:56 -0700 Subject: [PATCH 033/115] Mark thrust::device_ptr in transform Fix TravisCI --- paddle/platform/transform.h | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/paddle/platform/transform.h b/paddle/platform/transform.h index fcd300f2d9..c80446b45c 100644 --- a/paddle/platform/transform.h +++ b/paddle/platform/transform.h @@ -19,13 +19,43 @@ #include "paddle/platform/place.h" #include +#include #ifdef __NVCC__ +#include #include #endif namespace paddle { namespace platform { +#ifdef __NVCC__ +template +struct DevicePtrCast; + +template +struct DevicePtrCast { + using ELEM = typename std::remove_pointer::type; + using RTYPE = thrust::device_ptr; + + inline thrust::device_ptr operator()(ELEM* ele) const { + return thrust::device_pointer_cast(ele); + } +}; + +template +struct DevicePtrCast { + using RTYPE = T; + inline RTYPE operator()(RTYPE it) const { return it; } +}; + +template +auto DevCast(T t) -> + typename DevicePtrCast::value>::RTYPE { + DevicePtrCast::value> cast; + return cast(t); +} +#endif + // Transform on host or device. It provides the same API in std library. template @@ -35,7 +65,7 @@ void Transform(Place place, InputIter first, InputIter last, OutputIter result, std::transform(first, last, result, op); } else { #ifdef __NVCC__ - thrust::transform(first, last, result, op); + thrust::transform(DevCast(first), DevCast(last), DevCast(result), op); #else PADDLE_THROW("Do not invoke `Transform` in .cc file"); #endif @@ -50,7 +80,8 @@ void Transform(Place place, InputIter1 first1, InputIter1 last1, std::transform(first1, last1, first2, result, op); } else { #ifdef __NVCC__ - thrust::transform(first1, last1, first2, result, op); + thrust::transform(DevCast(first1), DevCast(last1), DevCast(first2), + DevCast(result), op); #else PADDLE_THROW("Do not invoke `Transform` in .cc file"); #endif From 297b3d0f0f03461bf067d6196863454f685c3567 Mon Sep 17 00:00:00 2001 From: zhangchao41 Date: Tue, 12 Sep 2017 11:57:14 +0800 Subject: [PATCH 034/115] add test --- test | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 test diff --git a/test b/test new file mode 100644 index 0000000000..e69de29bb2 From ca3db07028b0098a16bb580b732925b58e5ce50f Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 12 Sep 2017 12:03:03 +0800 Subject: [PATCH 035/115] add createReorder and createMemoryDesc in MKLDNNMatrix --- paddle/math/MKLDNNMatrix.cpp | 25 +++++++++++++++++++++++++ paddle/math/MKLDNNMatrix.h | 19 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp index c4063e5069..a71ac12afc 100644 --- a/paddle/math/MKLDNNMatrix.cpp +++ b/paddle/math/MKLDNNMatrix.cpp @@ -49,6 +49,31 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, return create(m, memory::primitive_desc(memory::desc(dims, dtype, fmt), eg)); } +std::shared_ptr MKLDNNMatrix::createReorder(const MKLDNNMatrixPtr& src, + const MKLDNNMatrixPtr& dst, + bool checkData) { + if (src == dst) { + return nullptr; + } + + if (src->getPrimitiveDesc() == dst->getPrimitiveDesc()) { + return nullptr; + } + + if (checkData && (src->getData() == dst->getData())) { + LOG(FATAL) << "can not create reorder with inplace data"; + return nullptr; + } + + memory::dims srcDims = src->getDims(); + memory::dims dstDims = dst->getDims(); + CHECK_EQ(srcDims.size(), dstDims.size()); + for (size_t i = 0; i < srcDims.size(); ++i) { + CHECK_EQ(srcDims[i], dstDims[i]); + } + return std::make_shared(*src, *dst); +} + void MKLDNNMatrix::reorderDataFrom(const MKLDNNMatrixPtr& m, memory::format srcFmt, memory::dims targetDim) { diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h index eef3b429e6..c7765369c8 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/math/MKLDNNMatrix.h @@ -52,6 +52,25 @@ public: mkldnn::engine& eg, mkldnn::memory::data_type dtype = mkldnn::memory::data_type::f32); + /** + * Create Memory descriptor. + * default with any format and f32 dtype + */ + static mkldnn::memory::desc createMemoryDesc( + const mkldnn::memory::dims& dims, + const mkldnn::memory::format& fmt = mkldnn::memory::format::any, + const mkldnn::memory::data_type& dtype = mkldnn::memory::data_type::f32) { + return mkldnn::memory::desc(dims, dtype, fmt); + } + + /** + * Create reorder primitive. + */ + static std::shared_ptr createReorder( + const MKLDNNMatrixPtr& src, + const MKLDNNMatrixPtr& dst, + bool checkData = true); + public: /** * Reorder this MKLDNNMatrix from other format. From 0289a0091f094c75190698df7e450d8e1a70bbaa Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 22:15:29 -0700 Subject: [PATCH 036/115] follow comments to cleanup code --- paddle/operators/reshape_op.cc | 35 ++++++++++++++++++++++++++-------- paddle/operators/reshape_op.h | 9 ++++----- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 37cbecbf25..da29c89150 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -27,21 +27,26 @@ class ReshapeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto *in = ctx.Input("X"); + // input check + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null"); auto shape = ctx.Attr>("shape"); - int64_t capacity = -1; + PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); for (auto dim : shape) { PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive."); - if (capacity < 0) { - capacity = dim; - } else { - capacity *= dim; - } } + // capacity check + int64_t capacity = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); + auto *in = ctx.Input("X"); int64_t in_size = framework::product(in->dims()); PADDLE_ENFORCE_EQ(capacity, in_size, "The size of Input(X) mismatches with Attr(shape)."); - ctx.Output("Out")->Resize(in->dims()); + // resize output + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto out_dims = framework::make_ddim(shape_int64); + ctx.Output("Out")->Resize(out_dims); } }; @@ -56,6 +61,17 @@ class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC(Reshape operator Reshape Input(X) into the shape specified by Attr(shape). + +An example: +Given a 2-D tensor X with 2 rows and 2 columns + + [[1, 2], [3, 4]] + +with target shape = [1, 4], the reshape operator will tansform +the tensor X into a 1-D tensor: + + [1, 2, 3, 4] + )DOC"); } }; @@ -70,6 +86,9 @@ class ReshapeGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); auto dims = ctx.Input("X")->dims(); auto *d_in = ctx.Output(framework::GradVarName("X")); d_in->Resize(dims); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 0e920329d9..26708e72dc 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -30,11 +30,10 @@ class ReshapeKernel : public framework::OpKernel { out->mutable_data(ctx.GetPlace()); auto shape = ctx.Attr>("shape"); - std::vector tmp; - for (auto dim : shape) { - tmp.push_back(dim); - } - auto out_dims = framework::make_ddim(tmp); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto out_dims = framework::make_ddim(shape_int64); out->CopyFrom(*in, ctx.GetPlace()); out->Resize(out_dims); } From f3a23b68401e3206ebb18d5696cf339ec17ae1f7 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 12 Sep 2017 13:15:31 +0800 Subject: [PATCH 037/115] add MKLDNNConvLayer --- paddle/gserver/layers/MKLDNNConvLayer.cpp | 402 ++++++++++++++++++++++ paddle/gserver/layers/MKLDNNConvLayer.h | 157 +++++++++ 2 files changed, 559 insertions(+) create mode 100644 paddle/gserver/layers/MKLDNNConvLayer.cpp create mode 100644 paddle/gserver/layers/MKLDNNConvLayer.h diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp new file mode 100644 index 0000000000..617874defe --- /dev/null +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -0,0 +1,402 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "MKLDNNConvLayer.h" +#include "paddle/math/MathUtils.h" +#include "paddle/utils/Logging.h" + +using namespace mkldnn; // NOLINT +typedef memory::format format; +typedef convolution_forward conv_fwd; +typedef convolution_backward_weights conv_bwdWgt; +typedef convolution_backward_data conv_bwdData; + +namespace paddle { + +REGISTER_LAYER(mkldnn_conv, MKLDNNConvLayer); + +bool MKLDNNConvLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + if (!MKLDNNLayer::init(layerMap, parameterMap)) { + return false; + } + CHECK_EQ(inputLayers_.size(), 1) << "Only support one input layer yet"; + CHECK_EQ(inputLayers_.size(), parameters_.size()); + CHECK(config_.shared_biases()) << "Only support shared biases yet"; + + oc_ = config_.num_filters(); + const ConvConfig& conf = config_.inputs(0).conv_conf(); + ic_ = conf.channels(); + fw_ = conf.filter_size(); + fh_ = conf.filter_size_y(); + pw_ = conf.padding(); + ph_ = conf.padding_y(); + dw_ = conf.dilation(); + dh_ = conf.dilation_y(); + sw_ = conf.stride(); + sh_ = conf.stride_y(); + gp_ = conf.groups(); + oh_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + ow_ = conf.output_x(); + ih_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + iw_ = conf.img_size(); + caffeMode_ = conf.caffe_mode(); + CHECK(caffeMode_) << "Only support caffe mode yet"; + CHECK(dh_ == 1 && dw_ == 1) << "Only support dilation 1 yet"; + // check group setting + CHECK_EQ((oc_ / gp_) * gp_, oc_) << "group is indivisible for oc"; + CHECK_EQ((ic_ / gp_) * gp_, ic_) << "group is indivisible for ic"; + + // create weight + size_t height = oc_ / gp_; + size_t width = ic_ * fh_ * fw_; + CHECK_EQ(parameters_[0]->getSize(), height * width); + weight_ = + std::unique_ptr(new Weight(height, width, parameters_[0], 0)); + + // create biases + if (biasParameter_.get() != NULL) { + biases_ = std::unique_ptr(new Weight(1, oc_, biasParameter_)); + } + return true; +} + +void MKLDNNConvLayer::convertWeightsFromPaddle() { + if (hasInitedWgt_) { + return; + } + + CHECK(wgtVal_) << "should have been initialized"; + // the paddle weight format is oihw or goihw + auto targetDim = wgtVal_->getDims(); + auto srcFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw; + wgtVal_->reorderDataFrom(wgtVal_, srcFmt, targetDim); + hasInitedWgt_ = true; +} + +void MKLDNNConvLayer::convertWeightsToPaddle() { + CHECK(wgtVal_) << "should have been initialized"; + auto targetDim = wgtVal_->getDims(); + auto dstFmt = (gp_ == 1) ? memory::format::oihw : memory::format::goihw; + wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim); +} + +void MKLDNNConvLayer::reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) { + reshapeInput(bs, ih, iw); + + // cal output sizes + // oc can not be changed + int fh = (fh_ - 1) * dh_ + 1; + int fw = (fw_ - 1) * dw_ + 1; + oh = outputSize(ih, fh, ph_, sh_, caffeMode_); + ow = outputSize(iw, fw, pw_, sw_, caffeMode_); + + reshapeOutput(oh, ow); + resizeOutput(bs, oc * oh * ow); + + printSizeInfo(); +} + +void MKLDNNConvLayer::resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); + bool hasBias = biases_ && biases_->getW(); + biasVal_ = nullptr; + + // dims for conv + memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_}; + memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; + memory::dims wgtDims = + (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_} + : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_}; + memory::dims biasDims = memory::dims{oc_}; + memory::dims strides = {sh_, sw_}; + // note: mkldnn dilation start from 0 + memory::dims dilations = {dh_ - 1, dw_ - 1}; + memory::dims padding = {ph_, pw_}; + memory::dims padR = getPaddingR(); + + // create forward handle + prop_kind pk = + passType_ == PASS_TEST ? prop_kind::forward : prop_kind::forward_training; + algorithm algo = algorithm::convolution_direct; + padding_kind padKind = padding_kind::zero; + conv_fwd::desc fwdDesc = + hasBias ? conv_fwd::desc(pk, + algo, + MKLDNNMatrix::createMemoryDesc(inDims), + MKLDNNMatrix::createMemoryDesc(wgtDims), + MKLDNNMatrix::createMemoryDesc(biasDims), + MKLDNNMatrix::createMemoryDesc(outDims), + strides, + dilations, + padding, + padR, + padKind) + : conv_fwd::desc(pk, + algo, + MKLDNNMatrix::createMemoryDesc(inDims), + MKLDNNMatrix::createMemoryDesc(wgtDims), + MKLDNNMatrix::createMemoryDesc(outDims), + strides, + dilations, + padding, + padR, + padKind); + fwdPD_.reset(new conv_fwd::primitive_desc(fwdDesc, engine_)); + + // create mkldnn matrix + const MatrixPtr& wgtVal = weight_->getW(); + const MatrixPtr& inVal = inputLayers_[0]->getOutput().value; + const MatrixPtr& outVal = output_.value; + wgt = MKLDNNMatrix::create(wgtVal, fwdPD_->weights_primitive_desc()); + in = MKLDNNMatrix::create(inVal, fwdPD_->src_primitive_desc()); + out = MKLDNNMatrix::create(outVal, fwdPD_->dst_primitive_desc()); + VLOG(MKLDNN_FMTS) << "Weight value format: " << wgtVal_->getFormat(); + if (hasBias) { + const MatrixPtr& biasVal = biases_->getW(); + bias = MKLDNNMatrix::create(biasVal, biasDims, format::x, engine_); + CHECK(bias->getPrimitiveDesc() == fwdPD_->bias_primitive_desc()) + << "bias primitive desc should always be equal"; + } + + // add reorder if input value do not match + if (inputIsOnlyMKLDNN()) { + MKLDNNMatrixPtr dnnIn = std::dynamic_pointer_cast(inVal); + CHECK(dnnIn) << "Input should be MKLDNNMatrix"; + if (dnnIn->getPrimitiveDesc() != in->getPrimitiveDesc()) { + CHECK_EQ(dnnIn->getFormat(), format::nc); + CHECK(ih_ == 1 && iw_ == 1); + dnnIn = MKLDNNMatrix::create(inVal, inDims, format::nchw, engine_); + CHECK(dnnIn->getPrimitiveDesc() == in->getPrimitiveDesc()); + } + in = dnnIn; + } else { + const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE); + cpuInVal_ = MKLDNNMatrix::create(cpuIn, inDims, format::nchw, engine_); + if (cpuInVal_->getPrimitiveDesc() != in->getPrimitiveDesc()) { + // create new mkldnn matrix + in = MKLDNNMatrix::create(nullptr, fwdPD_->src_primitive_desc()); + cvtInVal_ = MKLDNNMatrix::createReorder(cpuInVal_, in); + CHECK(cvtInVal_); + pipeline.push_back(*cvtInVal_); + } else { + in = cpuInVal_; + } + } + + // add fwd handle + if (hasBias) { + fwd_.reset(new conv_fwd(*fwdPD_, *in, *wgt, *bias, *out)); + } else { + fwd_.reset(new conv_fwd(*fwdPD_, *in, *wgt, *out)); + } + pipeline.push_back(*fwd_); + + // change original output value from cpu matrix to mkldnn matrix + output_.value = std::dynamic_pointer_cast(out); + // add reorder if output value has cpu device and pd do not match + if (!outputIsOnlyMKLDNN()) { + const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value; + cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_); + if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) { + cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_); + CHECK(cvtOutVal_); + pipeline.push_back(*cvtOutVal_); + } else { + // share data + cpuOut->setData(out->getData()); + cpuOutVal_ = out; + } + } + + printValueFormatFlow(); +} + +void MKLDNNConvLayer::resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); + bool hasBias = biases_ && biases_->getWGrad(); + + /// backward weight + CHECK(inVal_) << "Should have input value"; + CHECK(outVal_) << "Should have output value"; + CHECK(wgtVal_) << "Should have weight value"; + memory::dims wgtDims = + (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_} + : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_}; + memory::dims strides = {sh_, sw_}; + memory::dims dilations = {dh_ - 1, dw_ - 1}; + memory::dims padding = {ph_, pw_}; + memory::dims padR = getPaddingR(); + + // create backward handle + algorithm algo = algorithm::convolution_direct; + padding_kind padKind = padding_kind::zero; + auto bwdWgtDesc = + hasBias ? conv_bwdWgt::desc(algo, + inVal_->getMemoryDesc(), + MKLDNNMatrix::createMemoryDesc(wgtDims), + biasVal_->getMemoryDesc(), + outVal_->getMemoryDesc(), + strides, + padding, + padR, + padKind) + : conv_bwdWgt::desc(algo, + inVal_->getMemoryDesc(), + MKLDNNMatrix::createMemoryDesc(wgtDims), + outVal_->getMemoryDesc(), + strides, + padding, + padR, + padKind); + + auto bwdWgtPD = conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_); + CHECK(bwdWgtPD.src_primitive_desc() == inVal_->getPrimitiveDesc()) + << "primitive desc of in value should equal"; + CHECK(bwdWgtPD.diff_dst_primitive_desc() == outVal_->getPrimitiveDesc()) + << "primitive desc of out grad should equal the out value"; + CHECK(bwdWgtPD.diff_weights_primitive_desc() == wgtVal_->getPrimitiveDesc()) + << "primitive desc of weight grad should equal the weight value"; + + // create mkldnn matrix + const MatrixPtr& wgtGrad = weight_->getWGrad(); + const MatrixPtr& outGrad = output_.grad; + wgt = MKLDNNMatrix::create(wgtGrad, bwdWgtPD.diff_weights_primitive_desc()); + out = MKLDNNMatrix::create(outGrad, bwdWgtPD.diff_dst_primitive_desc()); + CHECK(wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc()) + << "primitive desc of weight grad and value should be equal"; + CHECK(out->getPrimitiveDesc() == outVal_->getPrimitiveDesc()) + << "primitive desc of out grad and value should be equal"; + VLOG(MKLDNN_FMTS) << "Backward weight, weight grad format: " + << wgt->getFormat(); + if (hasBias) { + const MatrixPtr& biasGrad = biases_->getWGrad(); + bias = MKLDNNMatrix::create(biasGrad, bwdWgtPD.diff_bias_primitive_desc()); + CHECK(bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc()) + << "primitive desc of bias grad should equal the bias value"; + } + + // TODO(TJ): merge outgrad + // add reorder if has user output grad + if (!outputIsOnlyMKLDNN()) { + const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad; + memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; + // same PrimitiveDesc with cpuInVal_ + CHECK(cpuOutVal_); + cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc()); + if (cpuOutGrad_->getPrimitiveDesc() == out->getPrimitiveDesc()) { + outGrad->setData(cpuOut->getData()); + out = cpuOutGrad_; + } else { + cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out); + CHECK(cvtOutGrad_); + pipeline.push_back(*cvtOutGrad_); + } + } + + // add bwdWgt handle + if (hasBias) { + bwdWgt_.reset(new conv_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias)); + } else { + bwdWgt_.reset(new conv_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt)); + } + pipeline.push_back(*bwdWgt_); + + /// backward data + const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad; + if (inGrad == nullptr) { + return; + } + + auto bwdDataDesc = conv_bwdData::desc(algo, + inVal_->getMemoryDesc(), + MKLDNNMatrix::createMemoryDesc(wgtDims), + out->getMemoryDesc(), + strides, + padding, + padR, + padKind); + auto bwdDataPD = conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_); + CHECK(bwdDataPD.diff_src_primitive_desc() == inVal_->getPrimitiveDesc()) + << "primitive desc of in grad should equal the in value"; + CHECK(bwdDataPD.diff_dst_primitive_desc() == out->getPrimitiveDesc()) + << "primitive desc of out grad should equal"; + + // create mkldnn matrix inGrad_ and reorder if necessary + // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done + in = MKLDNNMatrix::create(inGrad, bwdDataPD.diff_src_primitive_desc()); + cvtInGrad_ = nullptr; + if (!inputIsOnlyMKLDNN()) { + const MatrixPtr& cpuIn = getInputGrad(0, CPU_DEVICE); + // same PrimitiveDesc with cpuInVal_ + CHECK(cpuInVal_); + cpuInGrad_ = MKLDNNMatrix::create(cpuIn, cpuInVal_->getPrimitiveDesc()); + if (cpuInGrad_->getPrimitiveDesc() != in->getPrimitiveDesc()) { + const MatrixPtr& dnnIn = getInputGrad(0, MKLDNN_DEVICE); + in = MKLDNNMatrix::create(dnnIn, in->getPrimitiveDesc()); + cvtInGrad_ = MKLDNNMatrix::createReorder(in, cpuInGrad_); + CHECK(cvtInGrad_); + } else { + in = cpuInGrad_; + } + } + + // create new weight value for backward data, and reorder if necessary + // since the primitive_desc would be different with wgtVal_ + if (bwdDataPD.weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) { + wgtValBwdData_ = + MKLDNNMatrix::create(nullptr, bwdDataPD.weights_primitive_desc()); + cvtWgtVal_ = MKLDNNMatrix::createReorder(wgtVal_, wgtValBwdData_); + CHECK(cvtWgtVal_); + pipeline.push_back(*cvtWgtVal_); + } else { + wgtValBwdData_ = wgtVal_; + } + VLOG(MKLDNN_FMTS) << "Backward data, weight value format: " + << wgtValBwdData_->getFormat(); + + // add bwdData handle + CHECK(wgtValBwdData_) << "Should have weight memory"; + bwdData_.reset(new conv_bwdData(bwdDataPD, *out, *wgtValBwdData_, *in)); + pipeline.push_back(*bwdData_); + + // add ingrad reorder after bwdData + if (cvtInGrad_) { + pipeline.push_back(*cvtInGrad_); + } + + printGradFormatFlow(); +} + +void MKLDNNConvLayer::updateInputData() { + cpuInVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); +} + +void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) { + weight_->getParameterPtr()->incUpdate(callback); + if (biases_ && biases_->getWGrad()) { + biases_->getParameterPtr()->incUpdate(callback); + } +} + +} // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h new file mode 100644 index 0000000000..58891ff5e1 --- /dev/null +++ b/paddle/gserver/layers/MKLDNNConvLayer.h @@ -0,0 +1,157 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "MKLDNNLayer.h" +#include "mkldnn.hpp" + +namespace paddle { + +/** + * @brief A subclass of MKLDNNLayer conv layer. + * + * The config file api is mkldnn_conv + */ +class MKLDNNConvLayer : public MKLDNNLayer { +protected: + // padding height and width + int ph_, pw_; + // stride height and width + int sh_, sw_; + // dilation height and width + int dh_, dw_; + // filter(kenerl) height and width + int fh_, fw_; + // group number + int gp_; + + // in backward data the format is different with wgtVal_ + MKLDNNMatrixPtr wgtValBwdData_; + std::shared_ptr cvtWgtVal_; + + // save forward primitive_desc use for backward + std::shared_ptr fwdPD_; + + // MKLDNNMatrixPtr with cpu device for conversion between MKLDNN device + MKLDNNMatrixPtr cpuInVal_; + MKLDNNMatrixPtr cpuInGrad_; + MKLDNNMatrixPtr cpuOutVal_; + MKLDNNMatrixPtr cpuOutGrad_; + std::shared_ptr cvtInVal_; + std::shared_ptr cvtInGrad_; + std::shared_ptr cvtOutVal_; + std::shared_ptr cvtOutGrad_; + + // if has already init the weight + bool hasInitedWgt_; + + // True by default. This impact the calculation of output size. + // For example: + // - input(+padding): 0123456789 + // - imageSize(+padding) = 10; + // - filterSize = 3; + // - stride = 2; + // - caffeMode_ is true: + // - output: (012), (234), (456), (678) + // - outputSize = 4; + // - caffeMode_ is false: + // - output: (012), (234), (456), (678), (9) + // - outputSize = 5; + bool caffeMode_; + + // weight and bias + std::unique_ptr weight_; + std::unique_ptr biases_; + +public: + explicit MKLDNNConvLayer(const LayerConfig& config) + : MKLDNNLayer(config), hasInitedWgt_(false), caffeMode_(true) {} + + ~MKLDNNConvLayer() {} + + bool init(const LayerMap& layerMap, + const ParameterMap& parameterMap) override; + + void reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override; + + void resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; + + void resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; + + void updateInputData() override; + + void updateWeights(const UpdateCallback& callback) override; + + void convertWeightsFromPaddle() override; + + void convertWeightsToPaddle() override; + +protected: + void printSizeInfo() override { + MKLDNNLayer::printSizeInfo(); + VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_ + << ": ph: " << ph_ << ", pw: " << pw_ << ", sh: " << sh_ + << ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_; + } + + void printValueFormatFlow() override { + if (cpuInVal_) { + VLOG(MKLDNN_FMTS) << cpuInVal_->getFormat() << " >>>"; + } + MKLDNNLayer::printValueFormatFlow(); + if (cpuOutVal_) { + VLOG(MKLDNN_FMTS) << " >>> " << cpuOutVal_->getFormat(); + } + } + void printGradFormatFlow() override { + if (cpuInGrad_) { + VLOG(MKLDNN_FMTS) << cpuInGrad_->getFormat() << " <<<"; + } + MKLDNNLayer::printGradFormatFlow(); + if (cpuOutGrad_) { + VLOG(MKLDNN_FMTS) << " <<< " << cpuOutGrad_->getFormat(); + } + } + + /** + * get padding_r according to + * https://github.com/01org/mkl-dnn/blob/master/tests/gtests/ + * test_convolution_forward_common.hpp + * @note: mkldnn dilation start from 0 while paddle start from 1 + */ + mkldnn::memory::dims getPaddingR() const { + mkldnn::memory::dims padR = {ph_, pw_}; + for (int i = 0; i < 2; ++i) { + if ((ih_ - ((fh_ - 1) * dh_ + 1) + ph_ + padR[0]) / sh_ + 1 != oh_) { + ++padR[0]; + } + if ((iw_ - ((fw_ - 1) * dw_ + 1) + pw_ + padR[1]) / sw_ + 1 != ow_) { + ++padR[1]; + } + } + return padR; + } +}; + +} // namespace paddle From c39b771a53226bd79646943bb2e677413c59b0fe Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 12 Sep 2017 13:17:08 +0800 Subject: [PATCH 038/115] add test cases for mkldnn_conv --- paddle/gserver/tests/test_MKLDNN.cpp | 78 ++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/gserver/tests/test_MKLDNN.cpp index e1d2270df2..e70802881e 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/gserver/tests/test_MKLDNN.cpp @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "MKLDNNTester.h" #include "ModelConfig.pb.h" +#include "paddle/math/MathUtils.h" using namespace paddle; // NOLINT @@ -63,6 +64,83 @@ TEST(MKLDNNLayer, FcLayer) { testFcLayer({/*bs*/ 15, /*ic*/ 3, /*oc*/ 6, /*ih*/ 16, /*iw*/ 16}); } +struct testConvDesc { + int bs, gp; + int ic, ih, iw; + int oc, oh, ow; + int fh, fw; + int ph, pw; + int sh, sw; + int dh, dw; +}; + +void testConvLayer(const testConvDesc& pm) { + const std::string compareTypes[] = {"mkldnn_conv", "exconv"}; + TestConfig cfg; + cfg.layerConfig.set_type(compareTypes[0]); + cfg.layerConfig.set_num_filters(pm.oc); + cfg.layerConfig.set_size(pm.oc * pm.oh * pm.ow); + // cfg.layerConfig.set_partial_sum(1); // TODO: check it + cfg.layerConfig.set_shared_biases(true); + cfg.inputDefs.push_back( + {INPUT_DATA, + "layer_0", + /* size of input layer= */ size_t(pm.ic * pm.ih * pm.iw), + /* size of weight= */ size_t(pm.oc * pm.ic * pm.fh * pm.fw / pm.gp)}); + LayerInputConfig* input = cfg.layerConfig.add_inputs(); + ConvConfig* conv = input->mutable_conv_conf(); + conv->set_groups(pm.gp); + conv->set_img_size(pm.iw); + conv->set_img_size_y(pm.ih); + conv->set_output_x(pm.ow); + conv->set_output_y(pm.oh); + conv->set_filter_size(pm.fw); + conv->set_filter_size_y(pm.fh); + conv->set_channels(pm.ic); + conv->set_padding(pm.pw); + conv->set_padding_y(pm.ph); + conv->set_stride(pm.sw); + conv->set_stride_y(pm.sh); + conv->set_dilation(pm.dw); + conv->set_dilation_y(pm.dh); + conv->set_caffe_mode(true); + conv->set_filter_channels(conv->channels() / conv->groups()); + CHECK_EQ(conv->filter_channels() * pm.gp, conv->channels()) + << "it is indivisible"; + + int fh = (pm.fh - 1) * pm.dh + 1; + int fw = (pm.fw - 1) * pm.dw + 1; + int ow = outputSize(pm.iw, fw, pm.pw, pm.sw, true); + int oh = outputSize(pm.ih, fh, pm.ph, pm.sh, true); + CHECK_EQ(ow, pm.ow) << "output size check failed"; + CHECK_EQ(oh, pm.oh) << "output size check failed"; + + MKLDNNTester tester; + for (auto biasSize : {pm.oc, 0}) { + cfg.biasSize = biasSize; + TestConfig ref = cfg; + ref.layerConfig.set_type(compareTypes[1]); + for (auto bs : {pm.bs, 1}) { + tester.run(cfg, ref, bs, pm.ih, pm.iw); + } + } +} + +TEST(MKLDNNLayer, ConvLayer) { + /* bs, gp, ic, ih, iw, oc, oh, ow, fh, fw, ph, pw, sh, sw, dh, dw */ + testConvLayer({2, 1, 3, 32, 32, 16, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1}); + testConvLayer({2, 1, 8, 16, 16, 8, 16, 16, 3, 3, 1, 1, 1, 1, 1, 1}); + testConvLayer({3, 1, 16, 32, 32, 3, 32, 32, 3, 3, 1, 1, 1, 1, 1, 1}); + testConvLayer({8, 1, 16, 18, 18, 32, 18, 18, 3, 3, 1, 1, 1, 1, 1, 1}); + testConvLayer({16, 1, 1, 42, 31, 32, 23, 11, 4, 5, 3, 2, 2, 3, 1, 1}); + testConvLayer({2, 1, 8, 16, 16, 8, 8, 8, 3, 3, 1, 1, 2, 2, 1, 1}); + testConvLayer({3, 1, 8, 13, 13, 8, 7, 7, 3, 3, 1, 1, 2, 2, 1, 1}); + // with groups + testConvLayer({2, 2, 4, 5, 5, 8, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1}); + testConvLayer({2, 3, 3, 5, 5, 3, 5, 5, 3, 3, 1, 1, 1, 1, 1, 1}); + testConvLayer({4, 4, 16, 3, 3, 16, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1}); +} + // TODO(TJ): add branch test int main(int argc, char** argv) { From 5915138c791c7a2d6fd40c0ae6c942ca870033c8 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 22:22:43 -0700 Subject: [PATCH 039/115] fix a typo --- paddle/operators/reshape_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index da29c89150..b7061153d2 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -67,7 +67,7 @@ Given a 2-D tensor X with 2 rows and 2 columns [[1, 2], [3, 4]] -with target shape = [1, 4], the reshape operator will tansform +with target shape = [1, 4], the reshape operator will transform the tensor X into a 1-D tensor: [1, 2, 3, 4] From b90461b9d978913d2210ef1f02d4e62197a1458c Mon Sep 17 00:00:00 2001 From: zhangchao41 Date: Tue, 12 Sep 2017 13:40:43 +0800 Subject: [PATCH 040/115] fix the typo of the param description in sequence_conv_pool --- python/paddle/trainer_config_helpers/networks.py | 6 +++--- test | 0 2 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 test diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index 34be203ee2..2f604ee45a 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -68,7 +68,7 @@ def sequence_conv_pool(input, :type context_len: int :param hidden_size: FC Layer size. :type hidden_size: int - :param context_start: context projection length. See + :param context_start: context start position. See context_projection's context_start. :type context_start: int or None :param pool_type: pooling layer type. See pooling_layer's document. @@ -1406,7 +1406,7 @@ def inputs(layers, *args): if len(args) != 0: layers.extend(args) - Inputs(*[l.name for l in layers]) + Inputs(* [l.name for l in layers]) def outputs(layers, *args): @@ -1456,7 +1456,7 @@ def outputs(layers, *args): assert len(layers) > 0 if HasInputsSet(): # input already set - Outputs(*[l.name for l in layers]) + Outputs(* [l.name for l in layers]) return # just return outputs. if len(layers) != 1: diff --git a/test b/test deleted file mode 100644 index e69de29bb2..0000000000 From 44acc7514a4c53fd21d4649aab4cb330b5750582 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 12 Sep 2017 14:01:01 +0800 Subject: [PATCH 041/115] add python interface for mkldnn_conv --- python/paddle/trainer/config_parser.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 356e1d8b6f..d633cae4aa 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2054,20 +2054,29 @@ class ConvLayerBase(LayerBase): if num_filters is not None: self.config.num_filters = num_filters + use_mkldnn = int(g_command_config_args.get("use_mkldnn", 0)) use_gpu = int(g_command_config_args.get("use_gpu", 0)) parallel_nn = int(g_command_config_args.get("parallel_nn", 0)) - # Automatically select cudnn_type for GPU and exconv for CPU + # Automatically select cudnn_type for GPU, exconv for CPU + # and mkldnn_conv for MKLDNN # if set type=conv, but still reserve the way user specify - # exconv or cudnn_conv manually. + # exconv, mkldnn_conv or cudnn_conv manually. if self.layer_type == "cudnn_conv": config_assert(use_gpu, "cudnn_conv only support GPU") + if self.layer_type == "mkldnn_conv": + config_assert(use_mkldnn, "mkldnn_conv only support MKLDNN") + if (use_gpu == 1 and self.layer_type != "exconv" and + self.layer_type != "mkldnn_conv" and (parallel_nn == 0 or self.config.device > -1)): self.layer_type = "cudnn_conv" else: - self.layer_type = "exconv" + if (use_mkldnn == 1): + self.layer_type = "mkldnn_conv" + else: + self.layer_type = "exconv" # need to specify layer in config self.config.type = self.layer_type @@ -2099,6 +2108,11 @@ class ConvLayer(ConvLayerBase): layer_type = 'exconv' +@config_layer('mkldnn_conv') +class ConvLayer(ConvLayerBase): + layer_type = 'mkldnn_conv' + + @config_layer('cudnn_conv') class ConvLayer(ConvLayerBase): layer_type = 'cudnn_conv' From 1f839a6618db31b9be26f5d2604d98ef4fd2f46e Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 12 Sep 2017 17:03:17 +0800 Subject: [PATCH 042/115] fix bug in prelu parsing. --- python/paddle/trainer/config_parser.py | 1 + .../protostr/test_prelu_layer.protostr | 45 ++++++++++++++++++- .../tests/configs/test_prelu_layer.py | 2 + 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 356e1d8b6f..4f68a89534 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2034,6 +2034,7 @@ class ParameterReluLayer(LayerBase): config_assert(input_layer.size % partial_sum == 0, "a wrong setting for partial_sum") self.set_layer_size(input_layer.size) + self.config.partial_sum = partial_sum self.create_input_parameter(0, input_layer.size / partial_sum) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr index 64d227565f..94ad56cab0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr @@ -14,6 +14,29 @@ layers { input_layer_name: "input" input_parameter_name: "___prelu_layer_0__.w0" } + partial_sum: 1 +} +layers { + name: "__prelu_layer_1__" + type: "prelu" + size: 300 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "___prelu_layer_1__.w0" + } + partial_sum: 1 +} +layers { + name: "__prelu_layer_2__" + type: "prelu" + size: 300 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "___prelu_layer_2__.w0" + } + partial_sum: 5 } parameters { name: "___prelu_layer_0__.w0" @@ -23,14 +46,32 @@ parameters { initial_strategy: 0 initial_smart: true } +parameters { + name: "___prelu_layer_1__.w0" + size: 300 + initial_mean: 0.0 + initial_std: 0.057735026919 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___prelu_layer_2__.w0" + size: 60 + initial_mean: 0.0 + initial_std: 0.129099444874 + initial_strategy: 0 + initial_smart: true +} input_layer_names: "input" -output_layer_names: "__prelu_layer_0__" +output_layer_names: "__prelu_layer_2__" sub_models { name: "root" layer_names: "input" layer_names: "__prelu_layer_0__" + layer_names: "__prelu_layer_1__" + layer_names: "__prelu_layer_2__" input_layer_names: "input" - output_layer_names: "__prelu_layer_0__" + output_layer_names: "__prelu_layer_2__" is_recurrent_layer_group: false } diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py index 2e3057f323..aae90fab32 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py @@ -2,5 +2,7 @@ from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300) prelu = prelu_layer(input=data) +prelu = prelu_layer(input=data, partial_sum=1) +prelu = prelu_layer(input=data, partial_sum=5) outputs(prelu) From 5991a35e582c295629978f50b162b98d3192a3df Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 12 Sep 2017 10:10:46 +0000 Subject: [PATCH 043/115] Install the automatically built, compiled libraries under third_party when executing `make install` and WITH_C_API is set. --- cmake/external/gflags.cmake | 13 +++++++++++-- cmake/external/glog.cmake | 13 +++++++++++-- cmake/external/openblas.cmake | 20 ++++++++++++++++++++ cmake/external/protobuf.cmake | 9 +++++++++ cmake/external/zlib.cmake | 9 +++++++++ 5 files changed, 60 insertions(+), 4 deletions(-) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 16e5bef4cd..01a2f4d5fa 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -18,9 +18,9 @@ SET(GFLAGS_SOURCES_DIR ${THIRD_PARTY_PATH}/gflags) SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags) SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE) IF(WIN32) - set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) ELSE(WIN32) - set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) ENDIF(WIN32) INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR}) @@ -56,3 +56,12 @@ SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES}) ADD_DEPENDENCIES(gflags extern_gflags) LIST(APPEND external_project_dependencies gflags) + +IF(WITH_C_API) + INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags) + IF(ANDROID) + INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib) + ENDIF() +ENDIF() diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 8a594a825a..b450a30166 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -19,9 +19,9 @@ SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog) SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE) IF(WIN32) - SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) ELSE(WIN32) - SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) ENDIF(WIN32) INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR}) @@ -56,3 +56,12 @@ ADD_DEPENDENCIES(glog extern_glog gflags) LINK_LIBRARIES(glog gflags) LIST(APPEND external_project_dependencies glog) + +IF(WITH_C_API) + INSTALL(DIRECTORY ${GLOG_INCLUDE_DIR} DESTINATION third_party/glog) + IF(ANDROID) + INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib) + ENDIF() +ENDIF() diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index f9e05af59f..4fc8d43fc1 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -73,6 +73,26 @@ IF(NOT ${CBLAS_FOUND}) UPDATE_COMMAND "" CONFIGURE_COMMAND "" ) + + IF(WITH_C_API) + INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas) + # Because libopenblas.a is a symbolic link of another library, thus need to + # install the whole directory. + IF(ANDROID) + SET(TMP_INSTALL_DIR third_party/openblas/lib/${ANDROID_ABI}) + ELSE() + SET(TMP_INSTALL_DIR third_party/openblas/lib) + ENDIF() + INSTALL(CODE "execute_process( + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CBLAS_INSTALL_DIR}/lib + destination ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR} + )" + ) + INSTALL(CODE "MESSAGE(STATUS \"Installing: \" + \"${CBLAS_INSTALL_DIR}/lib -> ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}\" + )" + ) + ENDIF() ENDIF(NOT ${CBLAS_FOUND}) MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}") diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index e629d61585..a887be2e2a 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -223,6 +223,15 @@ IF(NOT PROTOBUF_FOUND) SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY} CACHE FILEPATH "protoc library." FORCE) + IF(WITH_C_API) + INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf) + IF(ANDROID) + INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib) + ENDIF() + ENDIF() + IF(CMAKE_CROSSCOMPILING) PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf) ELSE() diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 45ca5542b7..5aecab90ca 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -49,3 +49,12 @@ ExternalProject_Add( ) LIST(APPEND external_project_dependencies zlib) + +IF(WITH_C_API) + INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib) + IF(ANDROID) + INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib) + ENDIF() +ENDIF() From 27d7812b872bc83571dab418c0b4bcd448c42589 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 12 Sep 2017 18:15:55 +0800 Subject: [PATCH 044/115] Fix compiling error when using clang. The detail of the error is: "suggest braces around initialization of subobject [-Werror,-Wmissing-braces]" --- paddle/function/neon/NeonDepthwiseConv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/function/neon/NeonDepthwiseConv.h b/paddle/function/neon/NeonDepthwiseConv.h index aefeea78ba..33722d3cac 100644 --- a/paddle/function/neon/NeonDepthwiseConv.h +++ b/paddle/function/neon/NeonDepthwiseConv.h @@ -594,7 +594,7 @@ struct StridePadding { float32x4_t s1 = vdupq_n_f32(0.f); for (int s = 0; s < step; s++) { float32x4_t s0 = vld1q_f32(input); - float32x4x2_t v = {s0, s1}; + float32x4x2_t v = {{s0, s1}}; vst2q_f32(inputPadding, v); input += 4; inputPadding += 8; From 8a2ff350ed041e1805f860ec48d2a2c5fa4f9550 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 12 Sep 2017 18:28:17 +0800 Subject: [PATCH 045/115] simplify the python/paddle/v2/framework/tests/CMakeLists.txt --- doc/howto/dev/new_op_cn.md | 6 +-- .../paddle/v2/framework/tests/CMakeLists.txt | 43 +++---------------- .../tests/{mnist.py => test_mnist.py} | 0 3 files changed, 6 insertions(+), 43 deletions(-) rename python/paddle/v2/framework/tests/{mnist.py => test_mnist.py} (100%) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 58665e9f2b..07dce05df4 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -354,11 +354,7 @@ class TestMulGradOp(GradientChecker): ### 编译和执行单元测试 -单元测试编写完成之后,在[`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt)中添加以下内容,将单元测试加入工程: - -``` -py_test(test_mul_op SRCS test_mul_op.py) -``` +无需修改 [`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt) 文件,新增的 `test_*.py` 单元测试会被自动加入工程。 请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试: diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 6b22c00082..4d7664469e 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -1,38 +1,5 @@ -py_test(test_net SRCS test_net.py) - -py_test(test_scope SRCS test_scope.py) - -py_test(test_tensor SRCS test_tensor.py) -py_test(test_mul_op SRCS test_mul_op.py) -py_test(test_cos_sim_op SRCS test_cos_sim_op.py) - -py_test(test_mean_op SRCS test_mean_op.py) - -py_test(test_protobuf SRCS test_protobuf.py) - -py_test(test_add_two_op SRCS test_add_two_op.py) -py_test(test_sigmoid_op SRCS test_sigmoid_op.py) -py_test(test_softmax_op SRCS test_softmax_op.py) -py_test(test_cross_entropy_op SRCS test_cross_entropy_op.py) -py_test(test_gather_op SRCS test_gather_op.py) -py_test(test_scatter_op SRCS test_scatter_op.py) -py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) -py_test(test_top_k_op SRCS test_top_k_op.py) - -py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) - -py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) - -py_test(test_operator SRCS test_operator.py) -py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) -py_test(test_uniform_random_op SRCS test_uniform_random_op.py) -py_test(test_recurrent_op SRCS test_recurrent_op.py) -py_test(test_sgd_op SRCS test_sgd_op.py) -py_test(test_gradient_checker SRCS test_gradient_checker.py) -py_test(test_lookup_table SRCS test_lookup_table.py) -py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py) -py_test(test_sum_op SRCS test_sum_op.py) -py_test(mnist SRCS mnist.py) -py_test(test_concat_op SRCS test_concat_op.py) -py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py) -py_test(test_reshape_op SRCS test_reshape_op.py) +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/v2/framework/tests/mnist.py b/python/paddle/v2/framework/tests/test_mnist.py similarity index 100% rename from python/paddle/v2/framework/tests/mnist.py rename to python/paddle/v2/framework/tests/test_mnist.py From e76eb8534d51f3722d8cced2ff5a69beaa0e6515 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 12 Sep 2017 20:11:34 +0800 Subject: [PATCH 046/115] fix SwitchOrderLayer forward --- paddle/gserver/layers/SwitchOrderLayer.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/gserver/layers/SwitchOrderLayer.cpp b/paddle/gserver/layers/SwitchOrderLayer.cpp index d7eee6eaf0..e97809141a 100644 --- a/paddle/gserver/layers/SwitchOrderLayer.cpp +++ b/paddle/gserver/layers/SwitchOrderLayer.cpp @@ -83,8 +83,7 @@ void SwitchOrderLayer::forward(PassType passType) { setOutDims(); resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]); if (heightAxis_.size() > 0) { - getOutputValue()->reshape(reshapeHeight_, reshapeWidth_); - getOutputGrad()->reshape(reshapeHeight_, reshapeWidth_); + resetOutput(reshapeHeight_, reshapeWidth_); } // switch NCHW to NHWC From 25be0ede764583f851fc1863ad9d2d65cab893c1 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Tue, 12 Sep 2017 11:29:49 -0700 Subject: [PATCH 047/115] fix cpplint error --- paddle/operators/reshape_op.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 26708e72dc..873acf3078 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -1,4 +1,3 @@ - /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); @@ -52,5 +51,5 @@ class ReshapeGradKernel : public framework::OpKernel { d_x->Resize(in_dims); } }; -} -} +} // namespace operators +} // namespace paddle From 5c4dfdebcb12d17b8fe3090b874a496ea38dfcf4 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 12:12:48 -0700 Subject: [PATCH 048/115] add more rules --- paddle/operators/name_convention.md | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 280ab8d317..182c74e78b 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -1,15 +1,27 @@ ## Operator Name Convention -To make the operator document itself more clear. we recommend operator names observe the listing conventions. +To make the operator document itself more clear, we recommend operator names obey the listing conventions. -### Input/Output names +### OpMaker names -* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with other one. +When defining an operator in Paddle, a corresponding `OpMaker` need to be defined. All the `Input`/`Output` and `attrs` will write into the `OpProto` , and will be used in client language to create operator. -* element wise operator, math operator or similar op, please obey common name convention. if the operator only have one output, use `Out`. +- Input/Output. + - names follow the `CamelCase` but the first character is uppercase. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. + - If an operator's Input/Output are not meaningful words, input name starts from `X`. e.g. `X`, `Y`, and output name starts from `Out`. e.g. `Out`. -* we prefer more meaningful input/output name. +* Attribute. + * Attribute name follows the normal `CamelCase`. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. +* Comments. + * Input/Output/Attr comment follow the format of `type:meaning`. e.g. `AddOutput("Out", "EigenTensor,Tensor: Output of XX")`. we prefer to more meaningful comment. Some comments like `The first input of Operator` contains no information, we forbid it. + * Operator comment format of` R"DOC(your comment here)DOC"`. if there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. ### Best Practice -e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out` -e.g. `cosine` , inputs : `X`, `axis`, outputs : `Out` + +- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`. + +- The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`. + +- The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`. + + ​ From b8e75c1f1a0b56993b3b1a528784e9e86d5a7277 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Tue, 12 Sep 2017 15:10:31 -0700 Subject: [PATCH 049/115] cond op --- paddle/operators/CMakeLists.txt | 2 + paddle/operators/cond_op.cc | 45 ++++ paddle/operators/cond_op.h | 232 ++++++++++++++++++ paddle/pybind/pybind.cc | 23 ++ python/paddle/v2/framework/op.py | 22 ++ .../paddle/v2/framework/tests/test_cond_op.py | 114 +++++++++ 6 files changed, 438 insertions(+) create mode 100644 paddle/operators/cond_op.cc create mode 100644 paddle/operators/cond_op.h create mode 100644 python/paddle/v2/framework/tests/test_cond_op.py diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index f9ea25ab04..639ccd4052 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -55,12 +55,14 @@ set(DEPS_OPS minus_op mul_op recurrent_op + cond_op scale_op) op_library(identity_op DEPS scale_op) op_library(minus_op DEPS scale_op) op_library(mul_op DEPS math_function) op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS framework_proto tensor operator net_op) +op_library(cond_op SRCS cond_op.cc DEPS framework_proto tensor operator net_op) op_library(scale_op DEPS net_op) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc new file mode 100644 index 0000000000..cb7fed7ebd --- /dev/null +++ b/paddle/operators/cond_op.cc @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/cond_op.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/net_op.h" + +namespace paddle { +namespace operators { + +class CondOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { + public: + CondOpProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Cond", "The condition, which is a bool vector"); + AddInput("Xs", "Inputs of Subnets").AsDuplicable(); + AddOutput("Outs", "Outputs of Cond_Op after merge").AsDuplicable(); + + AddOutput("SubScopes", "sub scopes for true and false branches"); + AddOutput("IndexTensors", "Index Tensors contains indices for true/false"); + + AddComment(R"DOC( +Sample dependent Cond Operator: +The equation is: Out[i] = subnet_t[i], if Cond[i] == true +Out[i] = subnet_t[i], if Cond[i] == false +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_WITHOUT_GRADIENT(cond_op, paddle::operators::CondOp, + paddle::operators::CondOpProtoAndCheckerMaker); diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h new file mode 100644 index 0000000000..b776f8ccd9 --- /dev/null +++ b/paddle/operators/cond_op.h @@ -0,0 +1,232 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "glog/logging.h" +#include "paddle/framework/ddim.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/tensor.h" +#include "paddle/operators/gather.h" +#include "paddle/operators/scatter.h" + +namespace paddle { +namespace operators { + +using namespace paddle::framework; + +class CondOp : public OperatorBase { + public: + CondOp(const std::string& type, const VariableNameMap& inputs, + const VariableNameMap& outputs, const AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) { + index_.resize(2); + sub_net_op_.resize(2); + LOG(INFO) << "Initialization Done."; + } + + CondOp(const CondOp& o) + : framework::OperatorBase( + static_cast(o)) { + // TODO(yuyang18): Implement copy ctor well. + PADDLE_THROW("Not implemented"); + } + + void CreateScope(const Scope& scope) const { + auto sub_scopes_var = scope.FindVar("SubScopes"); + PADDLE_ENFORCE(sub_scopes_var != nullptr, ""); + auto sub_scopes = sub_scopes_var->GetMutable>(); + auto& sub_scope = scope.NewScope(); + sub_scopes->push_back(&sub_scope); + } + + void CreateIndexTensor(const Scope& scope) const { + auto index_tensors_var = scope.FindVar("IndexTensors"); + PADDLE_ENFORCE(index_tensors_var != nullptr, ""); + auto& index_tensors = + *index_tensors_var->GetMutable>(); + Tensor index_tensor; + index_tensors.push_back(&index_tensor); + } + + /** + * InferShape must be called before Run. + */ + void InferShape(const framework::Scope& scope) const override { + auto sub_scopes_var = scope.FindVar("SubScopes"); + PADDLE_ENFORCE_NOT_NULL(sub_scopes_var); + auto& sub_scopes = *sub_scopes_var->GetMutable>(); + // auto& index_tensors = + // *scope.FindVar("IndexTensors")->GetMutable>(); + + for (int i = 0; i < 2; ++i) { + // Create two sub scopes for true and false branches + // sub_scopes[0] for the true branch and sub_scopes[1] for the false + // branch + CreateScope(scope); + + // Create two tensors for true and false indices + // index_tensors[0] for the true branch and index_tensors[1] for the false + // branch + CreateIndexTensor(scope); + + for (auto& input : Inputs("Xs")) { + // Create a new tensor in sub-scope for input-type tensor + Variable* v = sub_scopes[i]->NewVar(input); + Tensor* sub_input = v->GetMutable(); + sub_input->Resize(scope.FindVar(input)->GetMutable()->dims()); + } + + // Inputs that do not require tailoring + /*for (auto& input : (*sub_net_op_[i]).Inputs()) { + // weights are located in the parent scope rather than sub scope + for (auto& var_name : input.second) { + if (!sub_scopes[i]->FindVar(var_name)) { + sub_scopes[i]->NewVar(var_name)->GetMutable(); + } + } + }*/ + + // Outputs + for (auto& output : (*sub_net_op_[i]).Outputs()) { + for (auto& var_name : output.second) { + sub_scopes[i]->NewVar(var_name); + } + } + + // each net calls InferShape + LOG(INFO) << "OK 3"; + sub_net_op_[i]->InferShape(*sub_scopes[i]); + LOG(INFO) << "OK 4"; + } + + for (auto& output : Outputs("Outs")) { + Tensor* tensor_t_out = + sub_scopes[0]->FindVar(output)->GetMutable(); + Tensor* tensor_f_out = + sub_scopes[1]->FindVar(output)->GetMutable(); + Tensor* tensor_out = scope.FindVar(output)->GetMutable(); + // check output size should be same + PADDLE_ENFORCE_EQ(tensor_t_out->dims(), tensor_f_out->dims(), + "Outputs not of the same shape"); + tensor_out->Resize(tensor_t_out->dims()); + } + LOG(INFO) << "OK 5"; + } + + // Set True Block + void set_truenet(std::unique_ptr net) { + sub_net_op_[0] = std::move(net); + } + + // Set False Block + void set_falsenet(std::unique_ptr net) { + sub_net_op_[1] = std::move(net); + } + + void Run(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const override { + auto sub_scopes = scope.FindVar("SubScopes")->Get>(); + auto index_tensors = + scope.FindVar("IndexTensors")->Get>(); + + std::string cond_name = Input("Cond"); + Variable* cond_var = scope.FindVar(cond_name); + PADDLE_ENFORCE_NOT_NULL(cond_var) + const Tensor* cond = cond_var->GetMutable(); + + // Step 1: get the true/false index at runtime + // index_[0]: vector, contains all index for cond[i] == true + // index_[1]: vector, contains all index for cond[i] == false + for (int i = 0; i < 2; ++i) index_[i].clear(); + + const bool* cond_data = cond->data(); + for (int i = 0; i < cond->dims()[0]; ++i) { + if (cond_data[i]) + index_[0].push_back(i); + else + index_[1].push_back(i); + } + // put index_[0] and index_[1] into two tensors: + // index_tensor_[0] and index_tensor_[1] + framework::DDim dim = paddle::framework::make_ddim({0}); + for (int i = 0; i < 2; ++i) { + dim[0] = index_[i].size(); + int* tmp_ptr = + index_tensors[i]->mutable_data(dim, platform::CPUPlace()); + index_tensors[i]->Resize(dim); + memcpy(tmp_ptr, index_[i].data(), dim[0] * sizeof(int)); + } + + // Step 2: collect data by calling gather + for (int i = 0; i < 2; ++i) { + // i= 0/i for True and False branches respectively + for (auto& input : Inputs("Xs")) { + // find Tensor + // Tensor* tensor_parent = scope.FindVar(input)->GetMutable(); + Variable* v = scope.FindVar(input); + Tensor* tensor_parent = v->GetMutable(); + // Tensor* tensor_child = + // sub_scope_[i].FindVar(input)->GetMutable(); + v = sub_scopes[i]->FindVar(input); + Tensor* tensor_child = v->GetMutable(); + Gather(dev_ctx.GetPlace(), tensor_parent, index_tensors[i], + tensor_child); + } + } + + // Step 3: run + for (int i = 0; i < 2; ++i) sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); + + // Step 4: merge output results + for (int i = 0; i < 2; ++i) { + // i= 0/i for True and False branches respectively + // for (auto& output : GetAttr>("sub_outputs")) { + for (auto& output : Outputs("Outs")) { + // find Tensor + Variable* v = scope.FindVar(output); + Tensor* tensor_parent = v->GetMutable(); + v = sub_scopes[i]->FindVar(output); + Tensor* tensor_child = v->GetMutable(); + ScatterUpdate(dev_ctx.GetPlace(), tensor_child, index_tensors[i], + tensor_parent); + } + } + } + + private: + // sub_net_op_[0]: subnet_t + // sub_net_op_[1]: subnet_f + std::vector> sub_net_op_; + + // index_[0]: True_index; + // index_[1]: False_index; + mutable std::vector> index_; +}; + +/* +class CondGradientOp final : public OperatorBase { +public: + void Init() override; + + virtual void InferShape(const std::shared_ptr& scope) const +override; + + virtual void Run(const std::shared_ptr& scope, + const platform::DeviceContext& dev_ctx) const override; +};*/ + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 16a2368aae..3eeae856fb 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -41,6 +41,7 @@ USE_OP(softmax); USE_OP(rowwise_add); USE_OP(fill_zeros_like); USE_NO_KERNEL_OP(recurrent); +USE_NO_KERNEL_OP(cond); USE_OP(gaussian_random); USE_OP(uniform_random); USE_OP(lookup_table); @@ -324,6 +325,28 @@ All parameter, weight, gradient are variables in Paddle. [](operators::RecurrentOp &self, const operators::NetOp &net) -> void { self.set_stepnet(net.Clone()); }); + // cond_op + py::class_(m, "CondOp") + .def_static("create", + [](py::bytes protobin) -> operators::CondOp * { + OpDesc desc; + PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), + "Cannot parse user input to OpDesc"); + PADDLE_ENFORCE(desc.IsInitialized(), + "User OpDesc is not initialized, reason %s", + desc.InitializationErrorString()); + auto cond_op = OpRegistry::CreateOp(desc); + return static_cast(cond_op.release()); + }) + .def("set_truenet", + [](operators::CondOp &self, const operators::NetOp &net) -> void { + self.set_truenet(net.Clone()); + }) + .def("set_falsenet", + [](operators::CondOp &self, const operators::NetOp &net) -> void { + self.set_falsenet(net.Clone()); + }); + m.def("unique_integer", UniqueIntegerGenerator); m.def("is_compile_gpu", IsCompileGPU); diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 9e665adad2..bddd4d8908 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -215,5 +215,27 @@ class __RecurrentOp__(object): return core.RecurrentOp.create(proto.SerializeToString()) +class __CondOp__(object): + __proto__ = None + type = 'cond_op' + + def __init__(self): + # cache recurrent_op's proto + if self.__proto__ is None: + for op_proto in get_all_op_protos(): + if op_proto.type == self.type: + self.__proto__ = op_proto + + def __call__(self, *args, **kwargs): + if self.type not in args and 'type' not in kwargs: + kwargs['type'] = self.type + # create proto + create_method = OpDescCreationMethod(self.__proto__) + proto = create_method(*args, **kwargs) + # create condop + return core.CondOp.create(proto.SerializeToString()) + + Operator = OperatorFactory() # The default global factory RecurrentOp = __RecurrentOp__() +CondOp = __CondOp__() diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py new file mode 100644 index 0000000000..1fe5889b7f --- /dev/null +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -0,0 +1,114 @@ +import logging +import paddle.v2.framework.core as core +import unittest +import numpy as np +from paddle.v2.framework.op import Operator, CondOp + + +class PySimpleCond(object): + ''' + A simple implementation of dynamic if-else based on numpy + ''' + + def __init__(self): + array = [True] * 10 + for i in range(1, 10, 2): + array[i] = False + self.cond = np.array(array) + self.x = np.ones(shape=(10, 1)) + + def forward(self): + self.index_t = np.where(self.cond) + self.index_f = np.where(self.cond == False) + y_t = self.x[self.index_t] + y_f = self.x[self.index_f] + y_t = y_t * 2. + y_f = y_f * (-2.) + output = np.zeros(shape=(10, 1)) + output[self.index_t] = y_t + output[self.index_f] = y_f + return output + + +class PySimpleCondTest(unittest.TestCase): + def setUp(self): + self.condnn = PySimpleCond() + + def test_forward(self): + output = self.condnn.forward() + print 'output', output + + +def create_tensor(scope, name, shape, np_data): + tensor = scope.new_var(name).get_tensor() + tensor.set_dims(shape) + tensor.set(np_data, core.CPUPlace()) + return tensor + + +class TestCondOp(unittest.TestCase): + ''' + Test CondOp + + equation: + cond = [True, False, True, False, ...] + y[index_t] = x[index_t] * 2. + y[index_f] = x[index_f] * -2. + outputs: + y + ''' + + def setUp(self): + self.py_cond = PySimpleCond() + + def forward(self): + self.scope = core.Scope() + self.create_global_variables() + self.create_cond_op() + self.create_sub_net() + ctx = core.DeviceContext.create(core.CPUPlace()) + print 'running infer shape' + print self.scope.find_var("SubScopes") + self.condop.infer_shape(self.scope) + print 'ok 2' + self.condop.run(self.scope, ctx) + print 'ok 3' + return np.array(self.scope.find_var("Outs").get_tensor()) + + def create_global_variables(self): + x_np_data = self.py_cond.x + create_tensor(self.scope, "x", [10, 1], x_np_data) + cond_np_data = self.py_cond.cond + create_tensor(self.scope, "cond", [10, 1], x_np_data) + self.scope.new_var("SubScopes") + self.scope.new_var("IndexTensors") + self.scope.new_var("Outs") + + def create_cond_op(self): + self.condop = CondOp( + Cond="cond", + Xs=["x"], + Outs=['Out_final'], + SubScopes="SubScopes", + IndexTensors="IndexTensors") + + def create_sub_net(self): + truenet = core.Net.create() + scale_op_t = Operator("scale", X='X', Y='Out', scale=2.) + truenet.append_op(scale_op_t) + truenet.complete_add_op(True) + self.condop.set_truenet(truenet) + + falsenet = core.Net.create() + scale_op_t = Operator("scale", X='X', Y='Out', scale=-2.) + falsenet.append_op(scale_op_t) + falsenet.complete_add_op(True) + self.condop.set_falsenet(falsenet) + + def test_forward(self): + print 'test cond op forward' + py_output = self.forward() + + +if __name__ == "__main__": + unittest.main() From d00e8a5f8350c38a9455b5fd604cac32e8b2cc62 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 15:47:12 -0700 Subject: [PATCH 050/115] "add Op name example and fix format error" --- paddle/operators/name_convention.md | 54 +++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 182c74e78b..8000dc8f08 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -1,27 +1,59 @@ -## Operator Name Convention +## Operator's Parameter Name Convention To make the operator document itself more clear, we recommend operator names obey the listing conventions. -### OpMaker names +### OpProtoMaker names -When defining an operator in Paddle, a corresponding `OpMaker` need to be defined. All the `Input`/`Output` and `attrs` will write into the `OpProto` , and will be used in client language to create operator. +When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator. - Input/Output. - - names follow the `CamelCase` but the first character is uppercase. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. - - If an operator's Input/Output are not meaningful words, input name starts from `X`. e.g. `X`, `Y`, and output name starts from `Out`. e.g. `Out`. + - Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. + - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule make operators which have few inputs/outputs unified. -* Attribute. - * Attribute name follows the normal `CamelCase`. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. -* Comments. - * Input/Output/Attr comment follow the format of `type:meaning`. e.g. `AddOutput("Out", "EigenTensor,Tensor: Output of XX")`. we prefer to more meaningful comment. Some comments like `The first input of Operator` contains no information, we forbid it. - * Operator comment format of` R"DOC(your comment here)DOC"`. if there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. +- Attribute. + - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. + +- Comments. + - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier` + - Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. + +- Order. + - Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice. ### Best Practice +Here we give some examples to show how these rules will be used. + - The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`. - The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`. - The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`. - ​ + We give a full example of Accumulator Operator. Its OpProtoMaker should look like below. + +```c++ +class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker { +public: + AccumulateOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done."); + AddOutput("Out", "(Tensor) Accumulated output tensor"); + AddAttr("gamma", "(float, default 1.0) Accumulation multiplier"); + AddComment(R"DOC( +Accumulate operator accumulates the input tensor to the output tensor. If the +output tensor already has the right size, we add to it; otherwise, we first +initialize the output tensor to all zeros, and then do accumulation. Any +further calls to the operator, given that no one else fiddles with the output +in the interim, will do simple accumulations. +Accumulation is done as shown: + +Out = 1*X + gamma*Out + +where X is the input tensor, Y is the output tensor and gamma is the multiplier +argument. +)DOC"); + } +}; +``` From 594dece99625caa2b5a0de9998755f587348cbe5 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 15:54:36 -0700 Subject: [PATCH 051/115] "fix typo" --- paddle/operators/name_convention.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 8000dc8f08..59d4019a3b 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -8,13 +8,13 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith - Input/Output. - Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. - - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule make operators which have few inputs/outputs unified. + - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified. - Attribute. - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. - Comments. - - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier` + - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`. - Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. - Order. From 15fccfefb5afe9cf145dc045c7e4ecb6613d8b71 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 15:59:58 -0700 Subject: [PATCH 052/115] "remove used words" --- paddle/operators/name_convention.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 59d4019a3b..a090e0b545 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -30,7 +30,7 @@ Here we give some examples to show how these rules will be used. - The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`. - We give a full example of Accumulator Operator. Its OpProtoMaker should look like below. + We give a full example of Accumulator Operator. ```c++ class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker { From a7e3325aade2b36816026cf311f70b393dbeae8b Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 17:09:35 -0700 Subject: [PATCH 053/115] "fix typos" --- paddle/framework/backward.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 0859bf1d9b..d0494f50d7 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -2,7 +2,7 @@ ## Motivation -In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. +In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need a module chains the gradient operators/expressions together with to construct the backward pass. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. ## Implementation @@ -13,7 +13,7 @@ std::unique_ptr Backward(const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); ``` -The implementation behind it can be divided into two parts. Namely, ** Backward Operator Creating** and **Backward Operator Building**. +The implementation behind it can be divided into two parts, ** Backward Operator Creating** and **Backward Operator Building**. ###Backward Operator Registry @@ -60,7 +60,7 @@ A backward network is a series of backward operators. The main idea of building 1. Op - when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NoGradient` operator + when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`. 2. NetOp @@ -70,27 +70,27 @@ A backward network is a series of backward operators. The main idea of building RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet. -4. Shared Variable +4. Sharing Variables - **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their shared input variable. + **sharing variables**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their sharing input variable.


-​ pic 1. Shared variable in operators. +​ pic 1. Sharing variables in operators.

-​ Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links. +​ Sharing variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator to replace the overwrite links.


-​ pic 2. Replace shared variable's gradient with `Add` operator. +​ pic 2. Replace sharing variable's gradient with `Add` operator.

-​ Because our framework find variable accord to its name, we need rename the output links. We add a suffix of number represent its position in clockwise. +​ Because our framework finds variables accord to their names, we need to rename the output links. We add a suffix of number to represent its position in clockwise. 5. Part of Gradient is Zero. From 6d03ca33475b75b22bd306d57ac1d0aaf681dd46 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 13 Sep 2017 10:10:20 +0800 Subject: [PATCH 054/115] refine new_op_cn.md --- doc/howto/dev/new_op_cn.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 07dce05df4..e3892849ab 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -262,7 +262,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, - 生成库 - 无需修改 [`paddle/pybind/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/CMakeLists.txt)文件,`paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。 + `paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。 ## 实现单元测试 @@ -354,7 +354,7 @@ class TestMulGradOp(GradientChecker): ### 编译和执行单元测试 -无需修改 [`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt) 文件,新增的 `test_*.py` 单元测试会被自动加入工程。 +`python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。 请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试: From f8c6792aa3ac17135f33c2de01f693ea781e1212 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 12 Sep 2017 19:44:13 -0700 Subject: [PATCH 055/115] Extract DevPtrCast to device_ptr_cast.h --- paddle/platform/details/device_ptr_cast.h | 56 +++++++++++++++++++++++ paddle/platform/transform.h | 40 +++------------- 2 files changed, 63 insertions(+), 33 deletions(-) create mode 100644 paddle/platform/details/device_ptr_cast.h diff --git a/paddle/platform/details/device_ptr_cast.h b/paddle/platform/details/device_ptr_cast.h new file mode 100644 index 0000000000..4015491fcd --- /dev/null +++ b/paddle/platform/details/device_ptr_cast.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#ifndef __NVCC__ +#error device_ptr_cast must be include by .cu file +#endif + +#include + +namespace paddle { +namespace platform { +namespace details { +template +struct DevicePtrCast; + +template +struct DevicePtrCast { + using ELEM = typename std::remove_pointer::type; + using RTYPE = thrust::device_ptr; + + inline thrust::device_ptr operator()(ELEM* ele) const { + return thrust::device_pointer_cast(ele); + } +}; + +template +struct DevicePtrCast { + using RTYPE = T; + inline RTYPE operator()(RTYPE it) const { return it; } +}; + +// Cast T to thrust::device_ptr if T is a pointer. +// Otherwise, e.g., T is a iterator, return T itself. +template +auto DevPtrCast(T t) -> + typename DevicePtrCast::value>::RTYPE { + DevicePtrCast::value> cast; + return cast(t); +} + +} // namespace details +} // namespace platform +} // namespace paddle diff --git a/paddle/platform/transform.h b/paddle/platform/transform.h index c80446b45c..3ee4acd296 100644 --- a/paddle/platform/transform.h +++ b/paddle/platform/transform.h @@ -21,41 +21,12 @@ #include #include #ifdef __NVCC__ -#include #include +#include "paddle/platform/details/device_ptr_cast.h" #endif namespace paddle { namespace platform { - -#ifdef __NVCC__ -template -struct DevicePtrCast; - -template -struct DevicePtrCast { - using ELEM = typename std::remove_pointer::type; - using RTYPE = thrust::device_ptr; - - inline thrust::device_ptr operator()(ELEM* ele) const { - return thrust::device_pointer_cast(ele); - } -}; - -template -struct DevicePtrCast { - using RTYPE = T; - inline RTYPE operator()(RTYPE it) const { return it; } -}; - -template -auto DevCast(T t) -> - typename DevicePtrCast::value>::RTYPE { - DevicePtrCast::value> cast; - return cast(t); -} -#endif - // Transform on host or device. It provides the same API in std library. template @@ -65,7 +36,9 @@ void Transform(Place place, InputIter first, InputIter last, OutputIter result, std::transform(first, last, result, op); } else { #ifdef __NVCC__ - thrust::transform(DevCast(first), DevCast(last), DevCast(result), op); + using namespace details; + thrust::transform(DevPtrCast(first), DevPtrCast(last), DevPtrCast(result), + op); #else PADDLE_THROW("Do not invoke `Transform` in .cc file"); #endif @@ -80,8 +53,9 @@ void Transform(Place place, InputIter1 first1, InputIter1 last1, std::transform(first1, last1, first2, result, op); } else { #ifdef __NVCC__ - thrust::transform(DevCast(first1), DevCast(last1), DevCast(first2), - DevCast(result), op); + using namespace details; + thrust::transform(DevPtrCast(first1), DevPtrCast(last1), DevPtrCast(first2), + DevPtrCast(result), op); #else PADDLE_THROW("Do not invoke `Transform` in .cc file"); #endif From bc9e20d9ed399d6b21c31afa4c294b7bb7371e43 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 20:01:50 -0700 Subject: [PATCH 056/115] "update img alt" --- paddle/framework/backward.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index d0494f50d7..61c80635b8 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -13,9 +13,9 @@ std::unique_ptr Backward(const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); ``` -The implementation behind it can be divided into two parts, ** Backward Operator Creating** and **Backward Operator Building**. +The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**. -###Backward Operator Registry +### Backward Operator Registry A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients. @@ -36,7 +36,7 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); `mul_grad` is the type of backward operator, and `MulOpGrad` is its class name. -###Backward Opeartor Creating +### Backward Opeartor Creating Given a certain forward operator, we can get its corresponding backward operator by calling: @@ -54,13 +54,13 @@ The function `BuildGradOp` will sequentially execute following processes: 4. Building backward operator with `inputs`, `outputs` and forward operator's attributes. -###Backward Network Building +### Backward Network Building A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially. 1. Op - when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`. + When the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`. 2. NetOp @@ -72,12 +72,12 @@ A backward network is a series of backward operators. The main idea of building 4. Sharing Variables - **sharing variables**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their sharing input variable. + **sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.

-
+Sharing variables in operators.
-​ pic 1. Sharing variables in operators. +​ pic 1.

From 885fa893324b3c51f676c706e09d5472822fffe2 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 20:05:13 -0700 Subject: [PATCH 057/115] "remove the alt" --- paddle/framework/backward.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 61c80635b8..19e1850e46 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -75,9 +75,9 @@ A backward network is a series of backward operators. The main idea of building **sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.

-Sharing variables in operators.
+
-​ pic 1. +​ pic 1. Sharing variables in operators.

From a90274eb5ce32025ed9492d969502cc3157cee52 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 20:07:38 -0700 Subject: [PATCH 058/115] "update words" --- paddle/framework/backward.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 19e1850e46..0a6d762bc8 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -17,7 +17,7 @@ The implementation behind it can be divided into two parts, **Backward Operator ### Backward Operator Registry -A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients. +A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients. | | forward operator | backward operator | ---------------------- | ---------------- |------------------------- | From 236a84c5050d419285cb7fbcc9c8f5bf923058ab Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 13 Sep 2017 11:09:01 +0800 Subject: [PATCH 059/115] Fix nullptr check --- paddle/operators/pad_op.cc | 5 +++-- paddle/operators/pad_op.h | 13 +++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 99f605c651..7e78b6ec13 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -96,8 +96,9 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); - - x_grad->Resize(x_dims); + if (x_grad != nullptr) { + x_grad->Resize(x_dims); + } } }; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index ca8832f26a..2cc3b945ae 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -87,12 +87,13 @@ void PadGradFunction(const framework::ExecutionContext& context) { } auto* d_out = context.Input(framework::GradVarName("Out")); auto* d_x = context.Output(framework::GradVarName("X")); - d_x->mutable_data(context.GetPlace()); - - auto d_x_tensor = EigenTensor::From(*d_x); - auto d_out_tensor = EigenTensor::From(*d_out); - auto place = context.GetEigenDevice(); - d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); + if (d_x != nullptr) { + d_x->mutable_data(context.GetPlace()); + auto d_x_tensor = EigenTensor::From(*d_x); + auto d_out_tensor = EigenTensor::From(*d_out); + auto place = context.GetEigenDevice(); + d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); + } } template From 92e7b09547a102edb8724eb95b1756dd3d0c5b16 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 12 Sep 2017 19:52:43 +0800 Subject: [PATCH 060/115] Add ARGS ANDROID_API in Dockerfile.android, to support using toolchain of different api level. --- Dockerfile.android | 13 +++++---- paddle/scripts/docker/build_android.sh | 37 ++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/Dockerfile.android b/Dockerfile.android index 452aa15745..9d13a414f6 100644 --- a/Dockerfile.android +++ b/Dockerfile.android @@ -6,13 +6,14 @@ RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ub # ENV variables ARG ANDROID_ABI +ARG ANDROID_API ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"} +ENV ANDROID_API=${ANDROID_API:-21} ENV HOME=/root \ ANDROID_NDK_HOME=/opt/android-ndk-linux \ - ANDROID_ARM_STANDALONE_TOOLCHAIN=/opt/arm-toolchain \ - ANDROID_ARM64_STANDALONE_TOOLCHAIN=/opt/arm64-toolchain + ANDROID_TOOLCHAINS_DIR=/opt/toolchains RUN apt-get update && \ apt-get install -y \ @@ -42,14 +43,12 @@ RUN pip install --upgrade pip && \ pip install pre-commit # Android NDK -RUN mkdir /opt/android-ndk-tmp && \ +RUN mkdir -p ${ANDROID_TOOLCHAINS_DIR} && \ + mkdir -p /opt/android-ndk-tmp && \ cd /opt/android-ndk-tmp && \ wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ unzip -q android-ndk-r14b-linux-x86_64.zip && \ mv android-ndk-r14b ${ANDROID_NDK_HOME} && \ - ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-23 --install-dir=${ANDROID_ARM_STANDALONE_TOOLCHAIN} && \ - ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm64 --platform=android-23 --install-dir=${ANDROID_ARM64_STANDALONE_TOOLCHAIN} && \ - rm -rf /opt/android-ndk-tmp && \ - rm -rf ${ANDROID_NDK_HOME} + rm -rf /opt/android-ndk-tmp CMD ["bash", "/paddle/paddle/scripts/docker/build_android.sh"] diff --git a/paddle/scripts/docker/build_android.sh b/paddle/scripts/docker/build_android.sh index aabd2da5e4..11612ad4be 100644 --- a/paddle/scripts/docker/build_android.sh +++ b/paddle/scripts/docker/build_android.sh @@ -2,8 +2,30 @@ set -xe +if [ $ANDROID_ABI == "arm64-v8a" ]; then + ANDROID_ARCH=arm64 +else # armeabi, armeabi-v7a + ANDROID_ARCH=arm +fi + +ANDROID_STANDALONE_TOOLCHAIN=$ANDROID_TOOLCHAINS_DIR/$ANDROID_ARCH-android-$ANDROID_API + +cat </dev/null || true mkdir -p $BUILD_ROOT @@ -11,7 +33,7 @@ cd $BUILD_ROOT if [ $ANDROID_ABI == "armeabi-v7a" ]; then cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ -DANDROID_ABI=$ANDROID_ABI \ -DANDROID_ARM_NEON=ON \ -DANDROID_ARM_MODE=ON \ @@ -26,7 +48,7 @@ if [ $ANDROID_ABI == "armeabi-v7a" ]; then .. elif [ $ANDROID_ABI == "arm64-v8a" ]; then cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM64_STANDALONE_TOOLCHAIN \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ -DANDROID_ABI=$ANDROID_ABI \ -DANDROID_ARM_MODE=ON \ -DHOST_C_COMPILER=/usr/bin/gcc \ @@ -40,12 +62,12 @@ elif [ $ANDROID_ABI == "arm64-v8a" ]; then .. elif [ $ANDROID_ABI == "armeabi" ]; then cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ -DANDROID_ABI=$ANDROID_ABI \ -DANDROID_ARM_MODE=ON \ -DHOST_C_COMPILER=/usr/bin/gcc \ -DHOST_CXX_COMPILER=/usr/bin/g++ \ - -DCMAKE_INSTALL_PREFIX=/paddle/install \ + -DCMAKE_INSTALL_PREFIX=$DEST_ROOT \ -DCMAKE_BUILD_TYPE=Release \ -DWITH_C_API=ON \ -DWITH_SWIG_PY=OFF \ @@ -55,5 +77,10 @@ else echo "Invalid ANDROID_ABI: $ANDROID_ABI" fi +cat < Date: Wed, 13 Sep 2017 12:16:52 +0800 Subject: [PATCH 061/115] Update pad op unitest --- .../paddle/v2/framework/tests/test_pad_op.py | 60 +++---------------- 1 file changed, 9 insertions(+), 51 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 56b9c88f7d..456b765e33 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -1,16 +1,12 @@ import unittest import numpy as np -from paddle.v2.framework.op import Operator -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class TestPadOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestPadOp(OpTest): def setUp(self): self.initTestCase() - self.type = "pad" + self.op_type = "pad" self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.attrs = {} self.attrs['paddings'] = np.array(self.paddings).flatten() @@ -22,6 +18,12 @@ class TestPadOp(unittest.TestCase): constant_values=self.pad_value) } + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X'], 'Out') + def initTestCase(self): self.shape = (16, 16) self.paddings = [(0, 1), (2, 3)] @@ -49,49 +51,5 @@ class TestCase3(TestPadOp): self.pad_value = 0.9 -class TestPadGradOp(GradientChecker): - def setUp(self): - self.initTestCase() - self.op = Operator( - type="pad", - X="X", - Out="Out", - paddings=np.array(self.paddings).flatten(), - pad_value=self.pad_value) - self.inputs = {'X': np.random.random(self.shape).astype("float32"), } - - def initTestCase(self): - self.shape = (16, 16) - self.paddings = [(0, 1), (2, 3)] - self.pad_value = 0 - - def test_normal(self): - self.check_grad(self.op, self.inputs, set(["X"]), "Out") - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - -class TestiGradCase1(TestPadOp): - def initTestCase(self): - self.shape = (2, 3, 4, 4) - self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] - self.pad_value = 0.5 - - -class TestGradCase2(TestPadOp): - def initTestCase(self): - self.shape = (2, 2, 2) - self.paddings = [(0, 0), (0, 0), (1, 2)] - self.pad_value = 1 - - -class TestGradCase3(TestPadOp): - def initTestCase(self): - self.shape = (8) - self.paddings = [(0, 1)] - self.pad_value = 0.9 - - if __name__ == '__main__': unittest.main() From 66fdbd0ceea6711183353b7f4af168d420166a2f Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Wed, 13 Sep 2017 13:09:07 +0800 Subject: [PATCH 062/115] add some comment and simplify some code --- paddle/gserver/layers/MKLDNNConvLayer.cpp | 4 ++-- paddle/gserver/layers/MKLDNNConvLayer.h | 24 ++++++++--------------- paddle/math/MKLDNNMatrix.cpp | 6 +----- paddle/math/MKLDNNMatrix.h | 6 ++++++ python/paddle/trainer/config_parser.py | 5 +---- 5 files changed, 18 insertions(+), 27 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index 617874defe..19891043a1 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -47,9 +47,9 @@ bool MKLDNNConvLayer::init(const LayerMap& layerMap, sw_ = conf.stride(); sh_ = conf.stride_y(); gp_ = conf.groups(); - oh_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + oh_ = conf.output_y(); ow_ = conf.output_x(); - ih_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + ih_ = conf.img_size_y(); iw_ = conf.img_size(); caffeMode_ = conf.caffe_mode(); CHECK(caffeMode_) << "Only support caffe mode yet"; diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h index 58891ff5e1..d1a78ac1c0 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.h +++ b/paddle/gserver/layers/MKLDNNConvLayer.h @@ -37,38 +37,30 @@ protected: // group number int gp_; - // in backward data the format is different with wgtVal_ + // in resetBwdData, the format of wgtValBwdData_ is different with wgtVal_ MKLDNNMatrixPtr wgtValBwdData_; + // convert handle from wgtVal_ to wgtValBwdData_ std::shared_ptr cvtWgtVal_; - // save forward primitive_desc use for backward + // save forward primitive_desc, which can be used backward std::shared_ptr fwdPD_; - // MKLDNNMatrixPtr with cpu device for conversion between MKLDNN device + // MKLDNNMatrixPtr which should be created from CPU Device MKLDNNMatrixPtr cpuInVal_; MKLDNNMatrixPtr cpuInGrad_; MKLDNNMatrixPtr cpuOutVal_; MKLDNNMatrixPtr cpuOutGrad_; + // convert handle between CPU device and MKLDNN device std::shared_ptr cvtInVal_; std::shared_ptr cvtInGrad_; std::shared_ptr cvtOutVal_; std::shared_ptr cvtOutGrad_; - // if has already init the weight + // whether the weight has been init bool hasInitedWgt_; - // True by default. This impact the calculation of output size. - // For example: - // - input(+padding): 0123456789 - // - imageSize(+padding) = 10; - // - filterSize = 3; - // - stride = 2; - // - caffeMode_ is true: - // - output: (012), (234), (456), (678) - // - outputSize = 4; - // - caffeMode_ is false: - // - output: (012), (234), (456), (678), (9) - // - outputSize = 5; + // true by default, which impact the calculation of output image size. + // details can refer to mathUtil.h bool caffeMode_; // weight and bias diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp index a71ac12afc..0778bb63b7 100644 --- a/paddle/math/MKLDNNMatrix.cpp +++ b/paddle/math/MKLDNNMatrix.cpp @@ -52,11 +52,7 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, std::shared_ptr MKLDNNMatrix::createReorder(const MKLDNNMatrixPtr& src, const MKLDNNMatrixPtr& dst, bool checkData) { - if (src == dst) { - return nullptr; - } - - if (src->getPrimitiveDesc() == dst->getPrimitiveDesc()) { + if (src == dst || src->getPrimitiveDesc() == dst->getPrimitiveDesc()) { return nullptr; } diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h index c7765369c8..0aa130b4a0 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/math/MKLDNNMatrix.h @@ -65,6 +65,12 @@ public: /** * Create reorder primitive. + * Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst. + * checkData: for whether to check the data handle of src and dst is the same. + * if true, means check it and do not want support inplace reorder; + * otherwise do not check data which means the created reorder + * maybe inplace buffer and do not guarantee the logical is correct + * since not all format or conversion support inplace. */ static std::shared_ptr createReorder( const MKLDNNMatrixPtr& src, diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index d633cae4aa..58ebcd1d8e 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2073,10 +2073,7 @@ class ConvLayerBase(LayerBase): (parallel_nn == 0 or self.config.device > -1)): self.layer_type = "cudnn_conv" else: - if (use_mkldnn == 1): - self.layer_type = "mkldnn_conv" - else: - self.layer_type = "exconv" + self.layer_type = "mkldnn_conv" if use_mkldnn else "exconv" # need to specify layer in config self.config.type = self.layer_type From b51ba53a55ef5dda2b37fb8feb4d68de0d659118 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 13 Sep 2017 12:54:49 +0800 Subject: [PATCH 063/115] Write the building and the lastest commit into a BUILD.txt in install phase. --- CMakeLists.txt | 2 ++ paddle/capi/CMakeLists.txt | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 08237cd850..e3194cd29c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,6 +67,8 @@ endif() if(ANDROID) if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16") + elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") + message(WARNING "Using the unofficial git repository instead") endif() set(WITH_GPU OFF CACHE STRING diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index dde99ab340..3af111eb57 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -64,9 +64,29 @@ link_paddle_exe(paddle_capi_shared) install(FILES ${CAPI_HEADERS} DESTINATION include/paddle) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle) if(ANDROID) + execute_process( + COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1 + OUTPUT_VARIABLE GIT_COMMITS_LIST + RESULT_VARIABLE GIT_COMMITS_LIST_RESULT + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + if(${GIT_COMMITS_LIST_RESULT}) + set(GIT_COMMITS_LIST "No commits.") + endif() install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib/${ANDROID_ABI}) install(TARGETS paddle_capi_shared DESTINATION lib/${ANDROID_ABI}) + install(CODE "FILE(WRITE ${CMAKE_INSTALL_PREFIX}/lib/${ANDROID_ABI}/BUILD.txt + \"Compiler:\n\" + \"\\t${CMAKE_C_COMPILER}\\n\" + \"\\t${CMAKE_CXX_COMPILER}\\n\" + \"Compiler Flags:\\n\" + \"\\t${CMAKE_F_FLAGS}\\n\" + \"\\t${CMAKE_CXX_FLAGS}\\n\" + \"Android API: ${CMAKE_SYSTEM_VERSION}\\n\" + \"Lastest commit:\\n\" + \"\\t${GIT_COMMITS_LIST}\\n\" + )" + ) else(ANDROID) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib) install(TARGETS paddle_capi_shared DESTINATION lib) From 03ea7320d3de03dec3880bd1504db8d61ad06a0c Mon Sep 17 00:00:00 2001 From: Xinghai Sun Date: Wed, 13 Sep 2017 13:23:03 +0800 Subject: [PATCH 064/115] Update cos_sim operator by following reviewer's comments. --- paddle/operators/cos_sim_op.cc | 66 +++++++++++++++++++--------------- paddle/operators/cos_sim_op.h | 58 ++++++++++++++---------------- 2 files changed, 65 insertions(+), 59 deletions(-) diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 428ee7d9d0..e3bee43792 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -32,17 +32,18 @@ class CosSimOp : public framework::OperatorWithKernel { // shape check auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); - PADDLE_ENFORCE_EQ(framework::arity(x_dims), framework::arity(y_dims), + + PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(), "Ranks of Input(X) and Input(Y) must be equal."); - PADDLE_ENFORCE_GE(framework::arity(x_dims), 2, + PADDLE_ENFORCE_GE(x_dims.size(), 2, "Rank of Input(X) must not be less than 2."); - PADDLE_ENFORCE_EQ( - framework::slice_ddim(x_dims, 1, framework::arity(x_dims)), - framework::slice_ddim(y_dims, 1, framework::arity(y_dims)), - "All dimensions except 1st of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()), + framework::slice_ddim(y_dims, 1, y_dims.size()), + "All dimensions except the 1st of Input(X) and Input(Y) " + "must be equal."); PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, - "1st dimension of Input(Y) must be equal to Input(X) or " - "just 1 (which will be broadcasted to match Input(X))."); + "The 1st dimension of Input(Y) must be equal to Input(X) or" + " just 1 (which will be broadcasted to match Input(X))."); // resize tensor ctx.Output("Out")->Resize({x_dims[0], 1}); @@ -58,8 +59,14 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "The 1st input of cos_sim op."); AddInput("Y", "The 2nd input of cos_sim op."); AddOutput("Out", "The output of cos_sim op."); - AddOutput("XNorm", "Row norm of the first input.").AsIntermediate(); - AddOutput("YNorm", "Row norm of the second input.").AsIntermediate(); + AddOutput("XNorm", + "Norm of the first input, reduced along the 1st " + "dimension.") + .AsIntermediate(); + AddOutput("YNorm", + "Norm of the second input, reduced along the 1st " + "dimension.") + .AsIntermediate(); AddComment(R"DOC( Cosine Similarity Operator. @@ -95,29 +102,32 @@ class CosSimOpGrad : public framework::OperatorWithKernel { // shape check auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); - PADDLE_ENFORCE_GE(framework::arity(x_dims), framework::arity(y_dims), - "Ranks of Input(X) and Input(Y) must be equal."); - PADDLE_ENFORCE_GE(framework::arity(x_dims), 2, - "Rank of Input(X) must not be less than 2."); - PADDLE_ENFORCE_EQ( - framework::slice_ddim(x_dims, 1, framework::arity(x_dims)), - framework::slice_ddim(y_dims, 1, framework::arity(y_dims)), - "All dimensions except 1st of Input(X) and Input(Y) must be equal."); - PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, - "1st dimension of Input(Y) must be equal to Input(X) or " - "just 1 (which will be broadcasted to match Input(X))."); auto xnorm_dims = ctx.Input("XNorm")->dims(); - PADDLE_ENFORCE_EQ(xnorm_dims, framework::make_ddim({x_dims[0], 1}), - "Shape of Input(XNorm) must be [X.Dim(0), 1]."); auto ynorm_dims = ctx.Input("YNorm")->dims(); - PADDLE_ENFORCE_EQ(ynorm_dims, framework::make_ddim({y_dims[0], 1}), - "Shape of Input(YNorm) must be [Y.Dim(0), 1]."); auto out_dims = ctx.Input("Out")->dims(); - PADDLE_ENFORCE_EQ(out_dims, framework::make_ddim({x_dims[0], 1}), - "Shape of Input(Out) must be [X.Dim(0), 1]."); auto out_grad_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - PADDLE_ENFORCE_EQ(out_grad_dims, framework::make_ddim({x_dims[0], 1}), + + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Ranks of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE_GE(x_dims.size(), 2, + "Rank of Input(X) must not be less than 2."); + PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()), + framework::slice_ddim(y_dims, 1, y_dims.size()), + "All dimensions except the 1st of Input(X) and Input(Y) " + "must be equal."); + PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, + "The 1st dimension of Input(Y) must be equal to Input(X) or" + " just 1 (which will be broadcasted to match Input(X))."); + auto target_xnorm_dims = framework::make_ddim({x_dims[0], 1}), + auto target_ynorm_dims = framework::make_ddim({y_dims[0], 1}), + PADDLE_ENFORCE_EQ(xnorm_dims, target_xnorm_dims, + "Shape of Input(XNorm) must be [X.Dim(0), 1]."); + PADDLE_ENFORCE_EQ(ynorm_dims, target_ynorm_dims, + "Shape of Input(YNorm) must be [Y.Dim(0), 1]."); + PADDLE_ENFORCE_EQ(out_dims, target_xnorm_dims, + "Shape of Input(Out) must be [X.Dim(0), 1]."); + PADDLE_ENFORCE_EQ(out_grad_dims, target_xnorm_dims, "Shape of Input(Out@Grad) must be [X.Dim(0), 1]."); // resize tensor diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 62298ccbce..4d03d5902d 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -42,22 +42,23 @@ class CosSimKernel : public framework::OpKernel { int rows_x = in_x->dims()[0]; int rows_y = in_y->dims()[0]; int cols = framework::product(in_x->dims()) / rows_x; - auto x = EigenMatrix::From(*in_x, framework::make_ddim({rows_x, cols})); - auto y = EigenMatrix::From(*in_y, framework::make_ddim({rows_y, cols})); + auto x = EigenMatrix::Reshape(*in_x, 1); + auto y = EigenMatrix::Reshape(*in_y, 1); auto z = EigenMatrix::From(*out_z); auto x_norm = EigenMatrix::From(*out_x_norm); auto y_norm = EigenMatrix::From(*out_y_norm); // compute auto place = context.GetEigenDevice(); - x_norm.device(place) = x.square().sum(Eigen::array({1})).sqrt(); - y_norm.device(place) = y.square().sum(Eigen::array({1})).sqrt(); + auto row_along = Eigen::array({{1}}); + x_norm.device(place) = x.square().sum(row_along).sqrt(); + y_norm.device(place) = y.square().sum(row_along).sqrt(); if (rows_x == rows_y) { auto xy = (x * y).sum(Eigen::array({1})); z.device(place) = xy / x_norm / y_norm; } else { Eigen::DSizes bcast(rows_x, 1); - auto xy = (x * y.broadcast(bcast)).sum(Eigen::array({1})); + auto xy = (x * y.broadcast(bcast)).sum(row_along); z.device(place) = xy / x_norm / y_norm.broadcast(bcast); } } @@ -78,61 +79,56 @@ class CosSimGradKernel : public framework::OpKernel { auto* in_grad_z = context.Input(framework::GradVarName("Out")); // convert Tensor to Eigen Tensor - int rows_x = in_x->dims()[0]; - int rows_y = in_y->dims()[0]; - int cols = framework::product(in_x->dims()) / rows_x; - auto x = EigenMatrix::From(*in_x, framework::make_ddim({rows_x, cols})); - auto y = EigenMatrix::From(*in_y, framework::make_ddim({rows_y, cols})); + auto x = EigenMatrix::Reshape(*in_x, 1); + auto y = EigenMatrix::Reshape(*in_y, 1); auto z = EigenMatrix::From(*in_z); auto x_norm = EigenMatrix::From(*in_x_norm); auto y_norm = EigenMatrix::From(*in_y_norm); auto dz = EigenMatrix::From(*in_grad_z); // compute gradident - Eigen::DSizes bcast(1, cols); - auto z_bcast = z.broadcast(bcast); - auto dz_bcast = dz.broadcast(bcast); - auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast); + int rows_x = in_x->dims()[0]; + int rows_y = in_y->dims()[0]; + int cols = framework::product(in_x->dims()) / rows_x; + Eigen::DSizes bcast_cols(1, cols); + auto z_bcast = z.broadcast(bcast_cols); + auto dz_bcast = dz.broadcast(bcast_cols); + auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast_cols); auto place = context.GetEigenDevice(); if (rows_x == rows_y) { - auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast); - auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast); + auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_cols); + auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast_cols); // compute dx if (out_grad_x) { out_grad_x->mutable_data(context.GetPlace()); - auto dx = EigenMatrix::From(*out_grad_x, - framework::make_ddim({rows_x, cols})); + auto dx = EigenMatrix::Reshape(*out_grad_x, 1); auto grad = y / norm_prod_bcast - z_bcast * x / x_snorm_bcast; dx.device(place) = dz_bcast * grad; } // compute dy if (out_grad_y) { out_grad_y->mutable_data(context.GetPlace()); - auto dy = EigenMatrix::From(*out_grad_y, - framework::make_ddim({rows_y, cols})); - auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast; + auto dy = EigenMatrix::Reshape(*out_grad_y, 1) auto grad = + x / norm_prod_bcast - z_bcast * y / y_snorm_bcast; dy.device(place) = dz_bcast * grad; } } else { - Eigen::DSizes bcast_row(rows_x, 1); - auto y_bcast = y.broadcast(bcast_row); - auto y_snorm_bcast = - y_norm.square().eval().broadcast(bcast_row).eval().broadcast(bcast); - auto norm_prod_bcast = - (x_norm * y_norm.broadcast(bcast_row)).eval().broadcast(bcast); + Eigen::DSizes bcast_rows(rows_x, 1); + Eigen::DSizes bcast_rows_cols(rows_x, 1); + auto y_bcast = y.broadcast(bcast_rows); + auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols); + auto norm_prod_bcast = x_norm * y_norm.broadcast(bcast_rows_cols); // compute dx if (out_grad_x) { out_grad_x->mutable_data(context.GetPlace()); - auto dx = EigenMatrix::From( - *out_grad_x, framework::make_ddim({rows_x, cols})); + auto dx = EigenMatrix::Reshape(*out_grad_x, 1); auto grad = y_bcast / norm_prod_bcast - z_bcast * x / x_snorm_bcast; dx.device(place) = dz_bcast * grad; } // compute dy if (out_grad_y) { out_grad_y->mutable_data(context.GetPlace()); - auto dy = EigenMatrix::From( - *out_grad_y, framework::make_ddim({rows_y, cols})); + auto dy = EigenMatrix::Reshape(*out_grad_y, 1); auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast; dy.device(place) = (dz_bcast * grad).sum(Eigen::array({0})); } From d11430e00945cb7f1c07f875ff16e01bf49d9444 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Wed, 13 Sep 2017 14:12:51 +0800 Subject: [PATCH 065/115] Use the inheritance in the definition of LoDTensor. --- paddle/framework/lod_tensor.h | 10 ++--- paddle/framework/lod_tensor_test.cc | 45 +++++++++---------- paddle/framework/lod_tensor_test.cu | 6 +-- paddle/framework/tensor_impl.h | 4 ++ paddle/pybind/pybind.cc | 25 +++-------- paddle/pybind/tensor_py.h | 1 + .../paddle/v2/framework/tests/test_tensor.py | 27 ++++++----- 7 files changed, 52 insertions(+), 66 deletions(-) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 568f4e8981..f2b77ca345 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -51,16 +51,13 @@ bool operator==(const LoD& a, const LoD& b); * LoDTensor (Level of details Tensor) * see https://en.wikipedia.org/wiki/Level_of_details for reference. */ -class LoDTensor { +class LoDTensor : public Tensor { public: LoDTensor() {} - LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {} - void set_lod(const LoD& lod) { lod_ = lod; } - - void set_tensor(Tensor* tensor) { tensor_ = tensor; } + explicit LoDTensor(const LoD& lod) : lod_(lod) {} - Tensor& tensor() { return *tensor_; } + void set_lod(const LoD& lod) { lod_ = lod; } LoD lod() { return lod_; } @@ -104,7 +101,6 @@ class LoDTensor { private: LoD lod_; - Tensor* tensor_; // not owned }; } // namespace framework } // namespace paddle diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/framework/lod_tensor_test.cc index 1da8553134..7915326b27 100644 --- a/paddle/framework/lod_tensor_test.cc +++ b/paddle/framework/lod_tensor_test.cc @@ -36,69 +36,64 @@ class LoDTensorTester : public ::testing::Test { ASSERT_EQ(lod.size(), 3UL); - tensor.Resize({20 /*batch size*/, 128 /*dim*/}); + lod_tensor_.Resize({20 /*batch size*/, 128 /*dim*/}); // malloc memory - tensor.mutable_data(place); + lod_tensor_.mutable_data(place); - lod_tensor.set_lod(lod); - lod_tensor.set_tensor(&tensor); + lod_tensor_.set_lod(lod); } protected: platform::CPUPlace place; - Tensor tensor; - LoDTensor lod_tensor; + LoDTensor lod_tensor_; }; -TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor.NumLevels(), 3UL); } +TEST_F(LoDTensorTester, NumLevels) { ASSERT_EQ(lod_tensor_.NumLevels(), 3UL); } TEST_F(LoDTensorTester, NumElements) { - ASSERT_EQ(lod_tensor.NumElements(0), 2UL); - ASSERT_EQ(lod_tensor.NumElements(1), 4UL); - ASSERT_EQ(lod_tensor.NumElements(2), 8UL); + ASSERT_EQ(lod_tensor_.NumElements(0), 2UL); + ASSERT_EQ(lod_tensor_.NumElements(1), 4UL); + ASSERT_EQ(lod_tensor_.NumElements(2), 8UL); } TEST_F(LoDTensorTester, SliceLevels) { // slice 1 level for (size_t level = 0; level < 3UL; ++level) { - LoDTensor new_lod_tensor = lod_tensor; + LoDTensor new_lod_tensor = lod_tensor_; new_lod_tensor.SliceLevels(level, level + 1); ASSERT_EQ(new_lod_tensor.NumLevels(), 1UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); - ASSERT_EQ(new_lod_tensor.tensor().data(), - lod_tensor.tensor().data()); + ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level)); + ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); } // slice 2 level for (size_t level = 0; level < 2UL; ++level) { - LoDTensor new_lod_tensor = lod_tensor; + LoDTensor new_lod_tensor = lod_tensor_; new_lod_tensor.SliceLevels(level, level + 2); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); - ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor.NumElements(level)); - ASSERT_EQ(new_lod_tensor.NumElements(1), lod_tensor.NumElements(level + 1)); - ASSERT_EQ(new_lod_tensor.tensor().data(), - lod_tensor.tensor().data()); + ASSERT_EQ(new_lod_tensor.NumElements(0), lod_tensor_.NumElements(level)); + ASSERT_EQ(new_lod_tensor.NumElements(1), + lod_tensor_.NumElements(level + 1)); + ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); } } TEST_F(LoDTensorTester, SliceInLevel) { size_t level = 0; - LoDTensor new_lod_tensor = lod_tensor; + LoDTensor new_lod_tensor = lod_tensor_; new_lod_tensor.SliceInLevel(level, 0, 2); EXPECT_EQ(new_lod_tensor.NumLevels(), 3UL); EXPECT_EQ(new_lod_tensor.NumElements(0), 2UL); EXPECT_EQ(new_lod_tensor.NumElements(1), 4UL); EXPECT_EQ(new_lod_tensor.NumElements(2), 8UL); - ASSERT_EQ(new_lod_tensor.tensor().data(), - lod_tensor.tensor().data()); + ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); level = 1; - new_lod_tensor = lod_tensor; + new_lod_tensor = lod_tensor_; new_lod_tensor.SliceInLevel(level, 0, 2); ASSERT_EQ(new_lod_tensor.NumLevels(), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(0), 2UL); ASSERT_EQ(new_lod_tensor.NumElements(1), 4UL); - ASSERT_EQ(new_lod_tensor.tensor().data(), - lod_tensor.tensor().data()); + ASSERT_EQ(new_lod_tensor.data(), lod_tensor_.data()); } } // namespace framework diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index 1079a36a2e..97e69cdb2e 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -26,18 +26,16 @@ __global__ void test(size_t* a, int size) { } TEST(LoDTensor, LoDInGPU) { - paddle::framework::Tensor tensor; paddle::framework::LoDTensor lod_tensor; paddle::platform::GPUPlace place(0); paddle::framework::LoD src_lod; src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14}); - tensor.Resize({14, 16}); - tensor.mutable_data(place); + lod_tensor.Resize({14, 16}); + lod_tensor.mutable_data(place); lod_tensor.set_lod(src_lod); - lod_tensor.set_tensor(&tensor); CHECK_EQ(lod_tensor.lod_element(0, 2), 4); CHECK_EQ(lod_tensor.lod_element(0, 4), 8); diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 642b53efc7..cc4d908834 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -16,6 +16,8 @@ limitations under the License. */ #include "paddle/memory/memcpy.h" #include "paddle/platform/enforce.h" +#include + namespace paddle { namespace framework { @@ -53,6 +55,7 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { + LOG(INFO) << "------ mutable_data ---- "; static_assert(std::is_pod::value, "T must be POD"); PADDLE_ENFORCE_GT(numel(), 0, "Tensor's numel must be larger than zero to call " @@ -142,6 +145,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { } inline Tensor& Tensor::Resize(const DDim& dims) { + LOG(INFO) << "---- resize -----"; dims_ = dims; numel_ = product(dims_); return *this; diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 16a2368aae..b5afe2f55b 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -121,27 +121,19 @@ PYBIND11_PLUGIN(core) { return self.data()[offset]; }); - py::class_(m, "LoDTensor", R"DOC(LoD(Leval of Ddetails) Tensor. - -The tensor and LoD info should be created before creating the LoDTensor, then -call the set_tensor and set_lod functions to set them. - -)DOC") - .def("__init__", - [](LoDTensor &instance, - const std::vector> &lod, - Tensor *t) { + py::class_(m, "LoDTensor") + .def( + "__init__", + [](LoDTensor &instance, const std::vector> &lod) { #ifdef PADDLE_ONLY_CPU - new (&instance) LoDTensor(lod, t); + new (&instance) LoDTensor(lod); #else paddle::framework::LoD new_lod; new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); - new (&instance) LoDTensor(new_lod, t); + new (&instance) LoDTensor(new_lod); #endif - }) - .def("set_tensor", - [](LoDTensor &self, Tensor *tensor) { self.set_tensor(tensor); }) + }) .def("set_lod", [](LoDTensor &self, const std::vector> &lod) { #ifdef PADDLE_ONLY_CPU @@ -153,9 +145,6 @@ call the set_tensor and set_lod functions to set them. self.set_lod(new_lod); #endif }) - .def("tensor", - [](LoDTensor &self) -> Tensor & { return self.tensor(); }, - py::return_value_policy::reference) .def("lod", [](LoDTensor &self) -> std::vector> { #ifdef PADDLE_ONLY_CPU return self.lod(); diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index 95171acf72..a32a0b6790 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -42,6 +42,7 @@ template struct CastToPyBufferImpl { using CUR_TYPE = typename std::tuple_element>::type; py::buffer_info operator()(framework::Tensor &tensor) { + LOG(INFO) << "---- CastToPyBufferImpl -----"; if (std::type_index(typeid(CUR_TYPE)) == tensor.holder_->type()) { auto dim_vec = framework::vectorize(tensor.dims()); std::vector dims_outside; diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index f26ed4964c..fc6abe9806 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -4,7 +4,7 @@ import numpy class TestTensor(unittest.TestCase): - def test_int_tensor(self): + def not_test_int_tensor(self): scope = core.Scope() var = scope.new_var("test_tensor") place = core.CPUPlace() @@ -23,7 +23,7 @@ class TestTensor(unittest.TestCase): self.assertEqual(1, tensor_array_2[3, 9]) self.assertEqual(2, tensor_array_2[19, 11]) - def test_float_tensor(self): + def not_test_float_tensor(self): scope = core.Scope() var = scope.new_var("test_tensor") place = core.CPUPlace() @@ -47,23 +47,26 @@ class TestTensor(unittest.TestCase): places = [core.CPUPlace(), core.GPUPlace(0)] for place in places: scope = core.Scope() - var = scope.new_var("test_tensor") + #var = scope.new_var("test_tensor") var_lod = scope.new_var("test_lod_tensor") - tensor = var.get_tensor() + # tensor = var.get_tensor() lod_tensor = var_lod.get_lod_tensor() - tensor.set_dims([4, 4, 6]) - tensor.alloc_int(place) - array = numpy.array(tensor) + lod_tensor.set_dims([4, 4, 6]) + lod_tensor.alloc_int(place) + print lod_tensor + array = numpy.array(lod_tensor) + print "---- array ----", array array[0, 0, 0] = 3 array[3, 3, 5] = 10 - tensor.set(array, place) + lod_tensor.set(array, place) - lod_tensor.set_tensor(tensor) + # lod_tensor.set_tensor(tensor) lod_tensor.set_lod([[0, 2, 4]]) - lod_v = numpy.array(lod_tensor.tensor()) + # lod_v = numpy.array(lod_tensor.tensor()) + lod_v = numpy.array(lod_tensor) self.assertTrue(numpy.alltrue(array == lod_v)) lod = lod_tensor.lod() @@ -71,7 +74,7 @@ class TestTensor(unittest.TestCase): self.assertEqual(2, lod[0][1]) self.assertEqual(4, lod[0][2]) - def test_float_lod_tensor(self): + def not_test_float_lod_tensor(self): places = [core.CPUPlace(), core.GPUPlace(0)] for place in places: scope = core.Scope() @@ -102,7 +105,7 @@ class TestTensor(unittest.TestCase): lod = lod_tensor.lod() self.assertListEqual(lod_py, lod) - def test_lod_tensor_init(self): + def not_test_lod_tensor_init(self): scope = core.Scope() var = scope.new_var("test_tensor") place = core.CPUPlace() From 8778957cfc26a76c1495c406ffdfb66755503565 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Wed, 13 Sep 2017 14:18:30 +0800 Subject: [PATCH 066/115] Add element-wise multiplication operator. (#3787) Add element-wise multiplication operator --- paddle/operators/elementwise_mul_op.cc | 109 +++++++++++ paddle/operators/elementwise_mul_op.cu | 25 +++ paddle/operators/elementwise_mul_op.h | 185 ++++++++++++++++++ paddle/pybind/pybind.cc | 1 + .../tests/test_elementwise_mul_op.py | 157 +++++++++++++++ 5 files changed, 477 insertions(+) create mode 100644 paddle/operators/elementwise_mul_op.cc create mode 100644 paddle/operators/elementwise_mul_op.cu create mode 100644 paddle/operators/elementwise_mul_op.h create mode 100644 python/paddle/v2/framework/tests/test_elementwise_mul_op.py diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc new file mode 100644 index 0000000000..1742925545 --- /dev/null +++ b/paddle/operators/elementwise_mul_op.cc @@ -0,0 +1,109 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/elementwise_mul_op.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class ElementWiseMulOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null"); + auto x_dim = ctx.Input("X")->dims(); + auto y_dim = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), + "Rank of first input must >= rank of second input.") + ctx.Output("Out")->Resize(x_dim); + } +}; + +class ElementWiseMulOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ElementWiseMulOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The first input of elementwise mul op"); + AddInput("Y", "The second input of elementwise mul op"); + AddAttr("axis", + R"DOC( +When shape(Y) does not equal shape(X),Y will be broadcasted +to match the shape of X and axis should be dimension index Y in X + )DOC") + .SetDefault(-1) + .EqualGreaterThan(-1); + + AddOutput("Out", "The output of elementwise mul op"); + AddComment(R"DOC( +Limited elementwise multiple operator.The equation is: Out = X ⊙ Y. +1. The shape of Y should be same with X or +2. Y's shape is a subset of X. + Y will be broadcasted to match the shape of X and axis should be dimension index Y in X. + example: + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 +)DOC"); + } +}; + +class ElementWiseMulOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); + + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Rank of first input must >= rank of second input.") + + if (x_grad) { + x_grad->Resize(x_dims); + } + + if (y_grad) { + y_grad->Resize(y_dims); + } + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(elementwise_mul, ops::ElementWiseMulOp, ops::ElementWiseMulOpMaker, + elementwise_mul_grad, ops::ElementWiseMulOpGrad); +REGISTER_OP_CPU_KERNEL( + elementwise_mul, + ops::ElementWiseMulKernel); +REGISTER_OP_CPU_KERNEL( + elementwise_mul_grad, + ops::ElementWiseMulGradKernel); diff --git a/paddle/operators/elementwise_mul_op.cu b/paddle/operators/elementwise_mul_op.cu new file mode 100644 index 0000000000..56f2087c22 --- /dev/null +++ b/paddle/operators/elementwise_mul_op.cu @@ -0,0 +1,25 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/elementwise_mul_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL( + elementwise_mul, + ops::ElementWiseMulKernel); +REGISTER_OP_GPU_KERNEL( + elementwise_mul_grad, + ops::ElementWiseMulGradKernel); diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h new file mode 100644 index 0000000000..e9ed679179 --- /dev/null +++ b/paddle/operators/elementwise_mul_op.h @@ -0,0 +1,185 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +/* + * Out = X ⊙ Y + * 1. shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + * pre=2, n=3*4, post=5 + * 2. shape(X) = (2, 3, 4, 5), shape(Y) = (4,5) + * pre=2*3, n=4*5, post=1 + */ + +inline void get_mid_dims(const framework::DDim& x_dims, + const framework::DDim& y_dims, const int axis, + int& pre, int& n, int& post) { + pre = 1; + n = 1; + post = 1; + for (int i = 0; i < axis; ++i) { + pre *= x_dims[i]; + } + + for (int i = 0; i < y_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i], + "Broadcast dimension mismatch."); + n *= y_dims[i]; + } + + for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) { + post *= x_dims[i]; + } +} + +template +class ElementWiseMulKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); + z->mutable_data(ctx.GetPlace()); + + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto z_e = framework::EigenVector::Flatten(*z); + + auto x_dims = x->dims(); + auto y_dims = y->dims(); + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Rank of first input must >= rank of second input.") + + if (x_dims == y_dims || product(y_dims) == 1) { + z_e.device(ctx.GetEigenDevice()) = x_e * y_e; + return; + } + + int axis = ctx.Attr("axis"); + axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); + PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), + "Axis should be in range [0, x_dims)"); + + int pre, n, post; + get_mid_dims(x_dims, y_dims, axis, pre, n, post); + if (post == 1) { + auto y_bcast = y_e.reshape(Eigen::DSizes(1, n)) + .broadcast(Eigen::DSizes(pre, 1)) + .reshape(Eigen::DSizes(x_e.size())); + z_e.device(ctx.GetEigenDevice()) = x_e * y_bcast; + return; + } else { + auto y_bcast = y_e.reshape(Eigen::DSizes(1, n, 1)) + .broadcast(Eigen::DSizes(pre, 1, post)) + .reshape(Eigen::DSizes(x_e.size())); + z_e.device(ctx.GetEigenDevice()) = x_e * y_bcast; + return; + } + } +}; + +template +class ElementWiseMulGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* dout = ctx.Input(framework::GradVarName("Out")); + + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dout_e = framework::EigenVector::Flatten(*dout); + + auto x_dims = x->dims(); + auto y_dims = y->dims(); + + auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dy = ctx.Output(framework::GradVarName("Y")); + if (dx) { + dx->mutable_data(ctx.GetPlace()); + } + if (dy) { + dy->mutable_data(ctx.GetPlace()); + } + + if (x_dims == y_dims || product(y_dims) == 1) { + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(ctx.GetEigenDevice()) = dout_e * y_e; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(ctx.GetEigenDevice()) = x_e * dout_e; + } + return; + } + + int axis = ctx.Attr("axis"); + axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); + + int pre, n, post; + get_mid_dims(x_dims, y_dims, axis, pre, n, post); + + // TODO(gongweibao): wrap reshape to a function. + if (post == 1) { + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n)) + .broadcast(Eigen::DSizes(pre, 1)) + .reshape(Eigen::DSizes(x_e.size())); + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(ctx.GetEigenDevice()) = dout_e * y_e_bcast; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(ctx.GetEigenDevice()) = + (x_e * dout_e) + .reshape(Eigen::DSizes(pre, n)) + .sum(Eigen::array{{0}}); + } + return; + } else { + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n, 1)) + .broadcast(Eigen::DSizes(pre, 1, post)) + .reshape(Eigen::DSizes(x_e.size())); + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(ctx.GetEigenDevice()) = dout_e * y_e_bcast; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(ctx.GetEigenDevice()) = + (x_e * dout_e) + .reshape(Eigen::DSizes(pre, n, post)) + .sum(Eigen::array{{0, 2}}); + } + return; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 16a2368aae..ef62d6e997 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -35,6 +35,7 @@ USE_OP(add); USE_OP(onehot_cross_entropy); USE_OP(sgd); USE_OP(mul); +USE_OP(elementwise_mul); USE_OP(mean); USE_OP(sigmoid); USE_OP(softmax); diff --git a/python/paddle/v2/framework/tests/test_elementwise_mul_op.py b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py new file mode 100644 index 0000000000..e268cfddb2 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py @@ -0,0 +1,157 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestElementwiseMulOp_Matrix(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + """ Warning + CPU gradient check error! + 'X': np.random.random((32,84)).astype("float32"), + 'Y': np.random.random((32,84)).astype("float32") + """ + self.inputs = { + 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), + 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") + } + self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_Vector(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.random((32, )).astype("float32"), + 'Y': np.random.random((32, )).astype("float32") + } + self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_0(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(2).astype(np.float32) + } + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(2, 1, 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_1(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(3).astype(np.float32) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_2(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(4).astype(np.float32) + } + + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 4) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_3(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4, 5).astype(np.float32), + 'Y': np.random.rand(3, 4).astype(np.float32) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 4, 1) + } + + +if __name__ == '__main__': + unittest.main() From b75123b56f31c3c90cdf07d378a806efdf5d838a Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 13 Sep 2017 11:30:03 +0000 Subject: [PATCH 067/115] Add TODO comment about the support of glog for Android api 16 ~ 19. --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index e3194cd29c..5739c2a260 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -68,6 +68,7 @@ if(ANDROID) if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16") elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") + # TODO: support glog for Android api 16 ~ 19 in the future message(WARNING "Using the unofficial git repository instead") endif() From 22de57f1273d08d0dfd2e46a7a7c91e5022e0db3 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 13 Sep 2017 19:56:49 +0800 Subject: [PATCH 068/115] enable cudnn_convt --- python/paddle/trainer_config_helpers/layers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 4b1d80d3db..8c7d1738ad 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -169,6 +169,7 @@ class LayerType(object): EXCONV_LAYER = 'exconv' EXCONVTRANS_LAYER = 'exconvt' CUDNNCONV_LAYER = 'cudnn_conv' + CUDNNCONVTRANS_LAYER = 'cudnn_convt' POOL_LAYER = 'pool' POOL3D_LAYER = 'pool3d' BATCH_NORM_LAYER = 'batch_norm' From 47975870aa043d1d4e6c71335c6e4d09df94e13d Mon Sep 17 00:00:00 2001 From: Yancey Date: Wed, 13 Sep 2017 20:18:00 +0800 Subject: [PATCH 069/115] Fix check grad with multioutput (#4067) Fix check grad with multi outputs --- python/paddle/v2/framework/tests/op_test.py | 16 +++++++++++----- .../v2/framework/tests/test_gradient_checker.py | 3 ++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 4fec4c9109..9936fd76ba 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -85,7 +85,7 @@ def get_numeric_gradient(scope, op, inputs, input_to_check, - output_name, + output_names, delta=0.005, in_place=False): @@ -100,8 +100,11 @@ def get_numeric_gradient(scope, ctx = core.DeviceContext.create(core.CPUPlace()) def get_output(): - op.run(scope, ctx) - return np.array(scope.find_var(output_name).get_tensor()).sum() + sum = 0.0 + for output_name in output_names: + op.run(scope, ctx) + sum += np.array(scope.find_var(output_name).get_tensor()).sum() + return sum tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_size = product(tensor_to_check.get_dims()) @@ -225,7 +228,7 @@ class OpTest(unittest.TestCase): def check_grad(self, inputs_to_check, - output_name, + output_names, no_grad_set=None, in_place=False, max_relative_error=0.005): @@ -237,13 +240,16 @@ class OpTest(unittest.TestCase): if no_grad_set is None: no_grad_set = set() + if not type(output_names) is list: + output_names = [output_names] + numeric_grads = [ get_numeric_gradient( self.scope, self.op, self.inputs, input_to_check, - output_name, + output_names, in_place=in_place) for input_to_check in inputs_to_check ] grad_names = [ diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py index abeb01cb34..85117bf960 100644 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ b/python/paddle/v2/framework/tests/test_gradient_checker.py @@ -12,7 +12,8 @@ class GetNumericGradientTest(unittest.TestCase): z = x + y scope = core.Scope() add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) - arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out') + arr = get_numeric_gradient(scope, add_op, {'X': x, + 'Y': y}, 'X', ['Out']) self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) def test_softmax_op(self): From af2eb94909a0b938b23b0959b42a1b8c36236778 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 13 Sep 2017 12:48:22 +0000 Subject: [PATCH 070/115] Support inputs and weights of multi-dimensions and refine the output names. --- paddle/operators/fc_op.cc | 53 ++++++++++++------- .../paddle/v2/framework/tests/test_fc_op.py | 49 ++++++++++------- 2 files changed, 64 insertions(+), 38 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 1c6c045427..3e6cd8f76a 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -26,7 +26,7 @@ class FCOp : public NetOp { : NetOp(type, inputs, outputs, attrs) { auto x = Inputs("X"); auto w = Inputs("W"); - auto mul_out = Outputs("mul_out"); + auto mul_out = Outputs("MulOut"); PADDLE_ENFORCE_EQ( x.size(), w.size(), "The size of inputs X(%d) should be the same as that of weights W(%d).", @@ -36,36 +36,51 @@ class FCOp : public NetOp { "as that of inputs X(%d).", mul_out.size(), x.size()); - int n = x.size(); - PADDLE_ENFORCE_GE(n, 1, + size_t n = x.size(); + PADDLE_ENFORCE_GE(n, static_cast(1), "The size of inputs X(%d) should be no less than 1.", n); + auto x_num_col_dims = Attr>("xNumColDims"); + auto w_num_col_dims = Attr>("wNumColDims"); + PADDLE_ENFORCE_EQ(x_num_col_dims.size(), n, + "The size of attribute xNumColDims(%d) should be the " + "same as that of inputs X(%d).", + x_num_col_dims.size(), n); + PADDLE_ENFORCE_EQ(w_num_col_dims.size(), n, + "The size of attribute wNumColDims(%d) should be the " + "same as that of inputs X(%d).", + w_num_col_dims.size(), n) + // mul_out[i] = X[i] * W[i] - for (int i = 0; i < n; i++) { - AppendOp(framework::OpRegistry::CreateOp( - "mul", {{"X", {x[i]}}, {"Y", {w[i]}}}, {{"Out", {mul_out[i]}}}, {})); + for (size_t i = 0; i < n; i++) { + framework::AttributeMap mul_attr; + mul_attr["x_num_col_dims"] = static_cast(x_num_col_dims[i]); + mul_attr["y_num_col_dims"] = static_cast(w_num_col_dims[i]); + AppendOp( + framework::OpRegistry::CreateOp("mul", {{"X", {x[i]}}, {"Y", {w[i]}}}, + {{"Out", {mul_out[i]}}}, mul_attr)); } // sum_out = X[0] * W[0] + ... + X[n-1] * W[n-1] if (n > 1) { AppendOp(framework::OpRegistry::CreateOp( - "sum", {{"X", {mul_out}}}, {{"Out", {Output("sum_out")}}}, {})); + "sum", {{"X", {mul_out}}}, {{"Out", {Output("SumOut")}}}, {})); } else { AppendOp(framework::OpRegistry::CreateOp( - "identity", {{"X", {mul_out[0]}}}, {{"Y", {Output("sum_out")}}}, {})); + "identity", {{"X", {mul_out[0]}}}, {{"Y", {Output("SumOut")}}}, {})); } // add_out = sum_out + b - auto b = Input("b"); - std::string add_out = "sum_out"; + auto b = Input("B"); + std::string add_out = "SumOut"; if (b != framework::kEmptyVarName) { - add_out = "add_out"; + add_out = "AddOut"; AppendOp(framework::OpRegistry::CreateOp( - "rowwise_add", {{"X", {Output("sum_out")}}, {"b", {Input("b")}}}, + "rowwise_add", {{"X", {Output("SumOut")}}, {"b", {Input("B")}}}, {{"Out", {Output(add_out)}}}, {})); } else { - if (Output("add_out") != framework::kEmptyVarName) { - this->Rename(Output("add_out"), framework::kEmptyVarName); + if (Output("AddOut") != framework::kEmptyVarName) { + this->Rename(Output("AddOut"), framework::kEmptyVarName); } } @@ -84,24 +99,26 @@ class FCOpMaker : public framework::OpProtoAndCheckerMaker { .AsDuplicable(); AddInput("W", "The weights of FC operator, a ordered vector of 2-D matrix.") .AsDuplicable(); - AddInput("b", "The 1-D bias vector of FC operator"); + AddInput("B", "The 1-D bias vector of FC operator"); AddOutput("Y", "The activated output matrix of FC operator"); - AddOutput("mul_out", + AddOutput("MulOut", "The intermediate outputs of FC operator, " "saving the product of X[i] * W[i]") .AsIntermediate() .AsDuplicable(); - AddOutput("sum_out", + AddOutput("SumOut", "The intermediate output of FC operator, " "saving the sum of products, sum(X[i] * W[i])") .AsIntermediate(); - AddOutput("add_out", + AddOutput("AddOut", "The non-actived output of FC operator, saving X * W + b") .AsIntermediate(); AddAttr("activation", "The activation type of FC operator.") .SetDefault("identity") .InEnum({"identity", "sigmoid", "softmax"}); + AddAttr>("xNumColDims", ""); + AddAttr>("wNumColDims", ""); AddComment(R"DOC( Fully Connected Operator, known as Fully Connected Layer or Inner Product Layer diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 00c4870997..39906c8b33 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -5,52 +5,61 @@ from op_test import OpTest class TestFCOp1(OpTest): def setUp(self): - self.op_type = "fc" x0 = np.random.random((16, 32)).astype("float32") w0 = np.random.random((32, 10)).astype("float32") b = np.random.random(10).astype("float32") - self.inputs = {"X": [("X0", x0)], "W": [("W0", w0)], "b": b} + mul_out0 = np.dot(x0, w0) sum_out = mul_out0 add_out = sum_out + b identity_out = add_out + + self.op_type = "fc" + self.inputs = {"X": [("X0", x0)], "W": [("W0", w0)], "B": b} self.outputs = { - "mul_out": [("mul_out0", mul_out0)], - "sum_out": sum_out, - "add_out": add_out, + "MulOut": [("MulOut0", mul_out0)], + "SumOut": sum_out, + "AddOut": add_out, "Y": identity_out } + self.attrs = {"xNumColDims": [1], "wNumColDims": [1]} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(["X0", "W0", "b"], "Y", max_relative_error=0.01) + self.check_grad(["X0", "W0", "B"], "Y", max_relative_error=0.01) class TestFCOp2(OpTest): def setUp(self): - self.op_type = "fc" - x0 = np.random.random((16, 32)).astype("float32") + x0 = np.random.random((16, 4, 8)).astype("float32") x1 = np.random.random((16, 32)).astype("float32") w0 = np.random.random((32, 10)).astype("float32") - w1 = np.random.random((32, 10)).astype("float32") + w1 = np.random.random((4, 8, 10)).astype("float32") b = np.random.random(10).astype("float32") + + mul_out0 = np.dot(x0.reshape(16, 4 * 8), w0) + mul_out1 = np.dot(x1, w1.reshape(4 * 8, 10)) + sum_out = mul_out0 + mul_out1 + add_out = np.add(sum_out, b) + sigmoid_out = 1 / (1 + np.exp(-add_out)) + + self.op_type = "fc" self.inputs = { "X": [("X0", x0), ("X1", x1)], "W": [("W0", w0), ("W1", w1)], - "b": b + "B": b + } + self.attrs = { + "xNumColDims": [1, 1], + "wNumColDims": [1, 2], + "activation": "sigmoid" } - self.attrs = {"activation": "sigmoid"} - mul_out0 = np.dot(x0, w0) - mul_out1 = np.dot(x1, w1) - sum_out = mul_out0 + mul_out1 - add_out = np.add(sum_out, b) - sigmoid_out = 1 / (1 + np.exp(-add_out)) self.outputs = { - "mul_out": [("mul_out0", mul_out0), ("mul_out1", mul_out1)], - "sum_out": sum_out, - "add_out": add_out, + "MulOut": [("MulOut0", mul_out0), ("MulOut1", mul_out1)], + "SumOut": sum_out, + "AddOut": add_out, "Y": sigmoid_out } @@ -59,7 +68,7 @@ class TestFCOp2(OpTest): def test_check_grad(self): self.check_grad( - ["X0", "X1", "W0", "W1", "b"], "Y", max_relative_error=0.01) + ["X0", "X1", "W0", "W1", "B"], "Y", max_relative_error=0.01) if __name__ == '__main__': From f2317b67f7673eea465dbc0e41b4235d0927aa72 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Wed, 13 Sep 2017 16:44:37 +0800 Subject: [PATCH 071/115] separate resetFwd and resetBwd to some sub functions --- paddle/gserver/layers/MKLDNNConvLayer.cpp | 513 ++++++++++++++-------- paddle/gserver/layers/MKLDNNConvLayer.h | 108 ++++- 2 files changed, 433 insertions(+), 188 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index 19891043a1..f8c06c5f86 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -18,9 +18,6 @@ limitations under the License. */ using namespace mkldnn; // NOLINT typedef memory::format format; -typedef convolution_forward conv_fwd; -typedef convolution_backward_weights conv_bwdWgt; -typedef convolution_backward_data conv_bwdData; namespace paddle { @@ -114,237 +111,396 @@ void MKLDNNConvLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - pipeline.clear(); - bool hasBias = biases_ && biases_->getW(); - biasVal_ = nullptr; + resetFwdPD(fwdPD_); + + resetFwdBuffers(fwdPD_, in, wgt, bias, out); + + resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out); + + printValueFormatFlow(); +} + +void MKLDNNConvLayer::resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + std::shared_ptr bwdWgtPD; + std::shared_ptr bwdDataPD; + + resetBwdWgtPD(bwdWgtPD); + + resetBwdDataPD(bwdDataPD); + + resetBwdBuffers(bwdWgtPD, bwdDataPD, in, wgt, bias, out); + resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out); + + printGradFormatFlow(); +} + +void MKLDNNConvLayer::updateInputData() { + cpuInVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); +} + +void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) { + weight_->getParameterPtr()->incUpdate(callback); + if (biases_ && biases_->getWGrad()) { + biases_->getParameterPtr()->incUpdate(callback); + } +} + +void MKLDNNConvLayer::loadConvSettings(memory::dims& wgt, + memory::dims& bias, + memory::dims& stride, + memory::dims& dilation, + memory::dims& padL, + memory::dims& padR) { + wgt = (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_} + : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_}; + bias = memory::dims{oc_}; + stride = memory::dims{sh_, sw_}; + padL = memory::dims{ph_, pw_}; + padR = getPaddingR(); + // note: mkldnn dilation start from 0 + dilation = memory::dims{dh_ - 1, dw_ - 1}; +} + +void MKLDNNConvLayer::resetFwdPD( + std::shared_ptr& pd) { // dims for conv memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_}; memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; - memory::dims wgtDims = - (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_} - : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_}; - memory::dims biasDims = memory::dims{oc_}; - memory::dims strides = {sh_, sw_}; - // note: mkldnn dilation start from 0 - memory::dims dilations = {dh_ - 1, dw_ - 1}; - memory::dims padding = {ph_, pw_}; - memory::dims padR = getPaddingR(); + memory::dims wgtDims, biasDims, strides, dilations, padL, padR; + loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR); - // create forward handle - prop_kind pk = - passType_ == PASS_TEST ? prop_kind::forward : prop_kind::forward_training; + prop_kind pk = passType_ == PASS_TEST ? prop_kind::forward_scoring + : prop_kind::forward_training; algorithm algo = algorithm::convolution_direct; padding_kind padKind = padding_kind::zero; conv_fwd::desc fwdDesc = - hasBias ? conv_fwd::desc(pk, - algo, - MKLDNNMatrix::createMemoryDesc(inDims), - MKLDNNMatrix::createMemoryDesc(wgtDims), - MKLDNNMatrix::createMemoryDesc(biasDims), - MKLDNNMatrix::createMemoryDesc(outDims), - strides, - dilations, - padding, - padR, - padKind) - : conv_fwd::desc(pk, - algo, - MKLDNNMatrix::createMemoryDesc(inDims), - MKLDNNMatrix::createMemoryDesc(wgtDims), - MKLDNNMatrix::createMemoryDesc(outDims), - strides, - dilations, - padding, - padR, - padKind); - fwdPD_.reset(new conv_fwd::primitive_desc(fwdDesc, engine_)); - - // create mkldnn matrix - const MatrixPtr& wgtVal = weight_->getW(); - const MatrixPtr& inVal = inputLayers_[0]->getOutput().value; - const MatrixPtr& outVal = output_.value; - wgt = MKLDNNMatrix::create(wgtVal, fwdPD_->weights_primitive_desc()); - in = MKLDNNMatrix::create(inVal, fwdPD_->src_primitive_desc()); - out = MKLDNNMatrix::create(outVal, fwdPD_->dst_primitive_desc()); - VLOG(MKLDNN_FMTS) << "Weight value format: " << wgtVal_->getFormat(); - if (hasBias) { - const MatrixPtr& biasVal = biases_->getW(); - bias = MKLDNNMatrix::create(biasVal, biasDims, format::x, engine_); - CHECK(bias->getPrimitiveDesc() == fwdPD_->bias_primitive_desc()) - << "bias primitive desc should always be equal"; + biases_ && biases_->getW() + ? conv_fwd::desc(pk, + algo, + MKLDNNMatrix::createMemoryDesc(inDims), + MKLDNNMatrix::createMemoryDesc(wgtDims), + MKLDNNMatrix::createMemoryDesc(biasDims), + MKLDNNMatrix::createMemoryDesc(outDims), + strides, + dilations, + padL, + padR, + padKind) + : conv_fwd::desc(pk, + algo, + MKLDNNMatrix::createMemoryDesc(inDims), + MKLDNNMatrix::createMemoryDesc(wgtDims), + MKLDNNMatrix::createMemoryDesc(outDims), + strides, + dilations, + padL, + padR, + padKind); + pd.reset(new conv_fwd::primitive_desc(fwdDesc, engine_)); +} + +void MKLDNNConvLayer::resetFwdBuffers( + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + CHECK(pd); + resetInValue(pd, in); + + resetWgtBiasValue(pd, wgt, bias); + + resetOutValue(pd, out); +} + +void MKLDNNConvLayer::resetFwdPipeline( + std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); + + if (cvtInVal_) { + pipeline.push_back(*cvtInVal_); + } + + if (bias) { + fwd_.reset(new conv_fwd(*pd, *in, *wgt, *bias, *out)); + } else { + fwd_.reset(new conv_fwd(*pd, *in, *wgt, *out)); } + pipeline.push_back(*fwd_); + + if (cvtOutVal_) { + pipeline.push_back(*cvtOutVal_); + } +} - // add reorder if input value do not match +void MKLDNNConvLayer::resetInValue( + std::shared_ptr& pd, MKLDNNMatrixPtr& in) { + const MatrixPtr& inMat = inputLayers_[0]->getOutput().value; + in = MKLDNNMatrix::create(inMat, pd->src_primitive_desc()); + + // create buffer and reorder if input value do not match + cpuInVal_ = nullptr; + cvtInVal_ = nullptr; if (inputIsOnlyMKLDNN()) { - MKLDNNMatrixPtr dnnIn = std::dynamic_pointer_cast(inVal); + MKLDNNMatrixPtr dnnIn = std::dynamic_pointer_cast(inMat); CHECK(dnnIn) << "Input should be MKLDNNMatrix"; if (dnnIn->getPrimitiveDesc() != in->getPrimitiveDesc()) { CHECK_EQ(dnnIn->getFormat(), format::nc); - CHECK(ih_ == 1 && iw_ == 1); - dnnIn = MKLDNNMatrix::create(inVal, inDims, format::nchw, engine_); + CHECK(ih_ == 1 && iw_ == 1) << "when input is nc format"; + // create a new one with nchw format and same data + memory::dims inDims = memory::dims{bs_, ic_, 1, 1}; + dnnIn = MKLDNNMatrix::create(inMat, inDims, format::nchw, engine_); CHECK(dnnIn->getPrimitiveDesc() == in->getPrimitiveDesc()); } in = dnnIn; } else { const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE); + memory::dims inDims = memory::dims{bs_, ic_, ih_, iw_}; cpuInVal_ = MKLDNNMatrix::create(cpuIn, inDims, format::nchw, engine_); if (cpuInVal_->getPrimitiveDesc() != in->getPrimitiveDesc()) { // create new mkldnn matrix - in = MKLDNNMatrix::create(nullptr, fwdPD_->src_primitive_desc()); + in = MKLDNNMatrix::create(nullptr, pd->src_primitive_desc()); cvtInVal_ = MKLDNNMatrix::createReorder(cpuInVal_, in); - CHECK(cvtInVal_); - pipeline.push_back(*cvtInVal_); + CHECK(cvtInVal_) << "should not be emptry"; } else { in = cpuInVal_; } } +} - // add fwd handle - if (hasBias) { - fwd_.reset(new conv_fwd(*fwdPD_, *in, *wgt, *bias, *out)); - } else { - fwd_.reset(new conv_fwd(*fwdPD_, *in, *wgt, *out)); +void MKLDNNConvLayer::resetWgtBiasValue( + std::shared_ptr& pd, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias) { + wgt = MKLDNNMatrix::create(weight_->getW(), pd->weights_primitive_desc()); + VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat(); + + bias = nullptr; + if (biases_ && biases_->getW()) { + bias = MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc()); } - pipeline.push_back(*fwd_); +} + +void MKLDNNConvLayer::resetOutValue( + std::shared_ptr& pd, MKLDNNMatrixPtr& out) { + out = MKLDNNMatrix::create(output_.value, pd->dst_primitive_desc()); // change original output value from cpu matrix to mkldnn matrix output_.value = std::dynamic_pointer_cast(out); - // add reorder if output value has cpu device and pd do not match + + // create reorder if output value has cpu device and pd do not match + cpuOutVal_ = nullptr; + cpuOutVal_ = nullptr; if (!outputIsOnlyMKLDNN()) { const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).value; + memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; cpuOutVal_ = MKLDNNMatrix::create(cpuOut, outDims, format::nchw, engine_); if (cpuOutVal_->getPrimitiveDesc() != out->getPrimitiveDesc()) { cvtOutVal_ = MKLDNNMatrix::createReorder(out, cpuOutVal_); - CHECK(cvtOutVal_); - pipeline.push_back(*cvtOutVal_); + CHECK(cvtOutVal_) << "should not be emptry"; } else { - // share data + // CPU output share the same data of MKLDNN output cpuOut->setData(out->getData()); cpuOutVal_ = out; } } - - printValueFormatFlow(); } -void MKLDNNConvLayer::resetBwd(std::vector& pipeline, - MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, - MKLDNNMatrixPtr& out) { - pipeline.clear(); - bool hasBias = biases_ && biases_->getWGrad(); +void MKLDNNConvLayer::resetBwdWgtPD( + std::shared_ptr& pd) { + memory::dims wgtDims, biasDims, strides, dilations, padL, padR; + loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR); - /// backward weight + // create backward weight using input, output and weight value memory desc CHECK(inVal_) << "Should have input value"; CHECK(outVal_) << "Should have output value"; CHECK(wgtVal_) << "Should have weight value"; - memory::dims wgtDims = - (gp_ == 1) ? memory::dims{oc_, ic_, fh_, fw_} - : memory::dims{gp_, oc_ / gp_, ic_ / gp_, fh_, fw_}; - memory::dims strides = {sh_, sw_}; - memory::dims dilations = {dh_ - 1, dw_ - 1}; - memory::dims padding = {ph_, pw_}; - memory::dims padR = getPaddingR(); - - // create backward handle algorithm algo = algorithm::convolution_direct; padding_kind padKind = padding_kind::zero; - auto bwdWgtDesc = - hasBias ? conv_bwdWgt::desc(algo, - inVal_->getMemoryDesc(), - MKLDNNMatrix::createMemoryDesc(wgtDims), - biasVal_->getMemoryDesc(), - outVal_->getMemoryDesc(), - strides, - padding, - padR, - padKind) - : conv_bwdWgt::desc(algo, - inVal_->getMemoryDesc(), - MKLDNNMatrix::createMemoryDesc(wgtDims), - outVal_->getMemoryDesc(), - strides, - padding, - padR, - padKind); - - auto bwdWgtPD = conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_); - CHECK(bwdWgtPD.src_primitive_desc() == inVal_->getPrimitiveDesc()) + auto bwdWgtDesc = biasVal_ != nullptr + ? conv_bwdWgt::desc(algo, + inVal_->getMemoryDesc(), + wgtVal_->getMemoryDesc(), + biasVal_->getMemoryDesc(), + outVal_->getMemoryDesc(), + strides, + padL, + padR, + padKind) + : conv_bwdWgt::desc(algo, + inVal_->getMemoryDesc(), + wgtVal_->getMemoryDesc(), + outVal_->getMemoryDesc(), + strides, + padL, + padR, + padKind); + pd.reset(new conv_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_)); + CHECK(pd->src_primitive_desc() == inVal_->getPrimitiveDesc()) << "primitive desc of in value should equal"; - CHECK(bwdWgtPD.diff_dst_primitive_desc() == outVal_->getPrimitiveDesc()) + CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc()) << "primitive desc of out grad should equal the out value"; - CHECK(bwdWgtPD.diff_weights_primitive_desc() == wgtVal_->getPrimitiveDesc()) + CHECK(pd->diff_weights_primitive_desc() == wgtVal_->getPrimitiveDesc()) << "primitive desc of weight grad should equal the weight value"; +} - // create mkldnn matrix - const MatrixPtr& wgtGrad = weight_->getWGrad(); - const MatrixPtr& outGrad = output_.grad; - wgt = MKLDNNMatrix::create(wgtGrad, bwdWgtPD.diff_weights_primitive_desc()); - out = MKLDNNMatrix::create(outGrad, bwdWgtPD.diff_dst_primitive_desc()); - CHECK(wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc()) - << "primitive desc of weight grad and value should be equal"; - CHECK(out->getPrimitiveDesc() == outVal_->getPrimitiveDesc()) - << "primitive desc of out grad and value should be equal"; - VLOG(MKLDNN_FMTS) << "Backward weight, weight grad format: " - << wgt->getFormat(); - if (hasBias) { - const MatrixPtr& biasGrad = biases_->getWGrad(); - bias = MKLDNNMatrix::create(biasGrad, bwdWgtPD.diff_bias_primitive_desc()); - CHECK(bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc()) - << "primitive desc of bias grad should equal the bias value"; +void MKLDNNConvLayer::resetBwdDataPD( + std::shared_ptr& pd) { + if (inputLayers_[0]->getOutput().grad == nullptr) { + return; } + memory::dims wgtDims, biasDims, strides, dilations, padL, padR; + loadConvSettings(wgtDims, biasDims, strides, dilations, padL, padR); + CHECK(inVal_) << "Should have input value"; + CHECK(outVal_) << "Should have output value"; + // create backward data using input and output value memory desc + // but using weight memory desc with any format + auto bwdDataDesc = conv_bwdData::desc(algorithm::convolution_direct, + inVal_->getMemoryDesc(), + MKLDNNMatrix::createMemoryDesc(wgtDims), + outVal_->getMemoryDesc(), + strides, + padL, + padR, + padding_kind::zero); + pd.reset(new conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_)); + CHECK(pd->diff_src_primitive_desc() == inVal_->getPrimitiveDesc()) + << "primitive desc of in grad should equal the in value"; + CHECK(pd->diff_dst_primitive_desc() == outVal_->getPrimitiveDesc()) + << "primitive desc of out grad should equal"; +} + +void MKLDNNConvLayer::resetBwdBuffers( + std::shared_ptr& wgtPD, + std::shared_ptr& dataPD, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + CHECK(wgtPD); + resetOutGrad(wgtPD, out); + + resetWgtBiasGrad(wgtPD, wgt, bias); + + resetInGrad(dataPD, in); + + resetWgtValBwdData(dataPD, wgtValBwdData_); +} + +void MKLDNNConvLayer::resetBwdPipeline( + std::vector& pipeline, + std::shared_ptr& wgtPD, + std::shared_ptr& dataPD, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); + + if (cvtOutGrad_) { + pipeline.push_back(*cvtOutGrad_); + } + + // add bwdWgt handle + if (bias) { + bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt, *bias)); + } else { + bwdWgt_.reset(new conv_bwdWgt(*wgtPD, *inVal_, *out, *wgt)); + } + pipeline.push_back(*bwdWgt_); + + if (dataPD == nullptr) { + return; + } + + if (cvtWgtVal_) { + pipeline.push_back(*cvtWgtVal_); + } + + // add bwdData handle + CHECK(wgtValBwdData_) << "Should have weight memory"; + bwdData_.reset(new conv_bwdData(*dataPD, *out, *wgtValBwdData_, *in)); + pipeline.push_back(*bwdData_); + + if (cvtInGrad_) { + pipeline.push_back(*cvtInGrad_); + } +} + +void MKLDNNConvLayer::resetOutGrad( + std::shared_ptr& wgtPD, MKLDNNMatrixPtr& out) { + const MatrixPtr& outMat = output_.grad; + out = MKLDNNMatrix::create(outMat, wgtPD->diff_dst_primitive_desc()); + CHECK(outVal_ != nullptr && + out->getPrimitiveDesc() == outVal_->getPrimitiveDesc()) + << "primitive desc of out grad and value should be equal"; + // TODO(TJ): merge outgrad - // add reorder if has user output grad + // create reorder if has output grad does not match + cpuOutGrad_ = nullptr; + cvtOutGrad_ = nullptr; if (!outputIsOnlyMKLDNN()) { const MatrixPtr& cpuOut = getOutput(CPU_DEVICE).grad; - memory::dims outDims = memory::dims{bs_, oc_, oh_, ow_}; // same PrimitiveDesc with cpuInVal_ CHECK(cpuOutVal_); cpuOutGrad_ = MKLDNNMatrix::create(cpuOut, cpuOutVal_->getPrimitiveDesc()); if (cpuOutGrad_->getPrimitiveDesc() == out->getPrimitiveDesc()) { - outGrad->setData(cpuOut->getData()); + outMat->setData(cpuOut->getData()); out = cpuOutGrad_; } else { cvtOutGrad_ = MKLDNNMatrix::createReorder(cpuOutGrad_, out); CHECK(cvtOutGrad_); - pipeline.push_back(*cvtOutGrad_); } } +} - // add bwdWgt handle - if (hasBias) { - bwdWgt_.reset(new conv_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias)); - } else { - bwdWgt_.reset(new conv_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt)); - } - pipeline.push_back(*bwdWgt_); +void MKLDNNConvLayer::resetWgtBiasGrad( + std::shared_ptr& wgtPD, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias) { + wgt = MKLDNNMatrix::create(weight_->getWGrad(), + wgtPD->diff_weights_primitive_desc()); + CHECK(nullptr != wgtVal_ && + wgt->getPrimitiveDesc() == wgtVal_->getPrimitiveDesc()) + << "primitive desc of weight grad and value should be equal"; + VLOG(MKLDNN_FMTS) << "weight grad format: " << wgt->getFormat(); - /// backward data - const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad; - if (inGrad == nullptr) { + if (biasVal_ == nullptr) { return; } + bias = MKLDNNMatrix::create(biases_->getWGrad(), + wgtPD->diff_bias_primitive_desc()); + CHECK(bias->getPrimitiveDesc() == biasVal_->getPrimitiveDesc()) + << "primitive desc of bias grad should equal the bias value"; +} - auto bwdDataDesc = conv_bwdData::desc(algo, - inVal_->getMemoryDesc(), - MKLDNNMatrix::createMemoryDesc(wgtDims), - out->getMemoryDesc(), - strides, - padding, - padR, - padKind); - auto bwdDataPD = conv_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_); - CHECK(bwdDataPD.diff_src_primitive_desc() == inVal_->getPrimitiveDesc()) - << "primitive desc of in grad should equal the in value"; - CHECK(bwdDataPD.diff_dst_primitive_desc() == out->getPrimitiveDesc()) - << "primitive desc of out grad should equal"; +void MKLDNNConvLayer::resetInGrad( + std::shared_ptr& dataPD, + MKLDNNMatrixPtr& in) { + if (dataPD == nullptr) { + return; + } - // create mkldnn matrix inGrad_ and reorder if necessary // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done - in = MKLDNNMatrix::create(inGrad, bwdDataPD.diff_src_primitive_desc()); + in = MKLDNNMatrix::create(inputLayers_[0]->getOutput().grad, + dataPD->diff_src_primitive_desc()); + CHECK(nullptr != inVal_ && + in->getPrimitiveDesc() == inVal_->getPrimitiveDesc()) + << "primitive desc of input grad and value should be equal"; + + // create reorder if has output grad does not match + cpuInGrad_ = nullptr; cvtInGrad_ = nullptr; if (!inputIsOnlyMKLDNN()) { const MatrixPtr& cpuIn = getInputGrad(0, CPU_DEVICE); @@ -360,43 +516,28 @@ void MKLDNNConvLayer::resetBwd(std::vector& pipeline, in = cpuInGrad_; } } +} - // create new weight value for backward data, and reorder if necessary +void MKLDNNConvLayer::resetWgtValBwdData( + std::shared_ptr& dataPD, + MKLDNNMatrixPtr& wgt) { + if (dataPD == nullptr) { + return; + } + + // create new weight value for backward data, and create reorder if necessary // since the primitive_desc would be different with wgtVal_ - if (bwdDataPD.weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) { + CHECK(wgtVal_) << "should have weight value"; + if (dataPD->weights_primitive_desc() != wgtVal_->getPrimitiveDesc()) { wgtValBwdData_ = - MKLDNNMatrix::create(nullptr, bwdDataPD.weights_primitive_desc()); + MKLDNNMatrix::create(nullptr, dataPD->weights_primitive_desc()); cvtWgtVal_ = MKLDNNMatrix::createReorder(wgtVal_, wgtValBwdData_); CHECK(cvtWgtVal_); - pipeline.push_back(*cvtWgtVal_); } else { wgtValBwdData_ = wgtVal_; } - VLOG(MKLDNN_FMTS) << "Backward data, weight value format: " + VLOG(MKLDNN_FMTS) << "weight value format for backward data" << wgtValBwdData_->getFormat(); - - // add bwdData handle - CHECK(wgtValBwdData_) << "Should have weight memory"; - bwdData_.reset(new conv_bwdData(bwdDataPD, *out, *wgtValBwdData_, *in)); - pipeline.push_back(*bwdData_); - - // add ingrad reorder after bwdData - if (cvtInGrad_) { - pipeline.push_back(*cvtInGrad_); - } - - printGradFormatFlow(); -} - -void MKLDNNConvLayer::updateInputData() { - cpuInVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); -} - -void MKLDNNConvLayer::updateWeights(const UpdateCallback& callback) { - weight_->getParameterPtr()->incUpdate(callback); - if (biases_ && biases_->getWGrad()) { - biases_->getParameterPtr()->incUpdate(callback); - } } } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/gserver/layers/MKLDNNConvLayer.h index d1a78ac1c0..f84f2f737c 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.h +++ b/paddle/gserver/layers/MKLDNNConvLayer.h @@ -18,6 +18,9 @@ limitations under the License. */ #include "mkldnn.hpp" namespace paddle { +typedef mkldnn::convolution_forward conv_fwd; +typedef mkldnn::convolution_backward_weights conv_bwdWgt; +typedef mkldnn::convolution_backward_data conv_bwdData; /** * @brief A subclass of MKLDNNLayer conv layer. @@ -43,7 +46,7 @@ protected: std::shared_ptr cvtWgtVal_; // save forward primitive_desc, which can be used backward - std::shared_ptr fwdPD_; + std::shared_ptr fwdPD_; // MKLDNNMatrixPtr which should be created from CPU Device MKLDNNMatrixPtr cpuInVal_; @@ -99,7 +102,6 @@ public: void convertWeightsToPaddle() override; -protected: void printSizeInfo() override { MKLDNNLayer::printSizeInfo(); VLOG(MKLDNN_SIZES) << getName() << ": fh: " << fh_ << ", fw: " << fw_ @@ -116,6 +118,7 @@ protected: VLOG(MKLDNN_FMTS) << " >>> " << cpuOutVal_->getFormat(); } } + void printGradFormatFlow() override { if (cpuInGrad_) { VLOG(MKLDNN_FMTS) << cpuInGrad_->getFormat() << " <<<"; @@ -126,6 +129,107 @@ protected: } } +protected: + /** + * load the dims settings of this conv + */ + void loadConvSettings(mkldnn::memory::dims& wgt, + mkldnn::memory::dims& bias, + mkldnn::memory::dims& stride, + mkldnn::memory::dims& dilation, + mkldnn::memory::dims& padL, + mkldnn::memory::dims& padR); + + /** + * reset the forward primitive descriptor. + */ + void resetFwdPD(std::shared_ptr& pd); + /** + * reset the MKLDNNMatrix buffers used in forward. + */ + void resetFwdBuffers(std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + /** + * reset the forward pipeline. + */ + void resetFwdPipeline(std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + + /** + * reset MKLDNNMatrix of input value + */ + void resetInValue(std::shared_ptr& pd, + MKLDNNMatrixPtr& in); + /** + * reset MKLDNNMatrix of weight and bias value + */ + void resetWgtBiasValue(std::shared_ptr& pd, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias); + /** + * reset MKLDNNMatrix of output value + */ + void resetOutValue(std::shared_ptr& pd, + MKLDNNMatrixPtr& out); + + /** + * reset the backward weight primitive descriptor. + */ + void resetBwdWgtPD(std::shared_ptr& pd); + /** + * reset the backward data primitive descriptor. + */ + void resetBwdDataPD(std::shared_ptr& pd); + /** + * reset the MKLDNNMatrix buffers used in backward. + */ + void resetBwdBuffers(std::shared_ptr& wgtPD, + std::shared_ptr& dataPD, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + /** + * reset the backward pipeline. + */ + void resetBwdPipeline(std::vector& pipeline, + std::shared_ptr& wgtPD, + std::shared_ptr& dataPD, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + + /** + * reset MKLDNNMatrix of output grad + */ + void resetOutGrad(std::shared_ptr& wgtPD, + MKLDNNMatrixPtr& out); + /** + * reset MKLDNNMatrix of weight and bias grad + */ + void resetWgtBiasGrad(std::shared_ptr& wgtPD, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias); + /** + * reset MKLDNNMatrix of input grad + */ + void resetInGrad(std::shared_ptr& dataPD, + MKLDNNMatrixPtr& in); + /** + * reset MKLDNNMatrix of weight value for backward data + * since the primitive_desc would be different with wgtVal_ + */ + void resetWgtValBwdData(std::shared_ptr& dataPD, + MKLDNNMatrixPtr& wgt); + /** * get padding_r according to * https://github.com/01org/mkl-dnn/blob/master/tests/gtests/ From f29920639690adfe687a8f52e4113d68c2ab216c Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Wed, 13 Sep 2017 22:58:45 +0800 Subject: [PATCH 072/115] Using LoDTensor instead of Tensor in every operator. --- paddle/framework/lod_tensor.h | 2 +- paddle/framework/operator.cc | 48 +++++++ paddle/framework/operator.h | 38 ++++-- paddle/framework/tensor_impl.h | 4 - paddle/operators/add_op.cc | 3 +- paddle/operators/concat_op.cc | 2 +- paddle/operators/cos_sim_op.cc | 12 +- paddle/operators/cross_entropy_op.cc | 4 +- paddle/operators/fill_zeros_like_op.cc | 2 +- paddle/operators/gather_op.cc | 4 +- paddle/operators/gaussian_random_op.cc | 2 +- paddle/operators/lookup_table_op.cc | 5 +- paddle/operators/mean_op.cc | 4 +- paddle/operators/minus_op.cc | 2 +- paddle/operators/mul_op.cc | 10 +- paddle/operators/recurrent_op.cc | 18 +-- paddle/operators/reshape_op.cc | 4 +- paddle/operators/rowwise_add_op.cc | 6 +- paddle/operators/scale_op.cc | 2 +- paddle/operators/scatter_op.cc | 9 +- paddle/operators/sequence_avg_pool_op.cc | 90 ++++++++++++ paddle/operators/sequence_avg_pool_op.cu | 25 ++++ paddle/operators/sequence_avg_pool_op.h | 81 +++++++++++ paddle/operators/sgd_op.cc | 9 +- paddle/operators/sigmoid_op.cc | 5 +- paddle/operators/softmax_op.cc | 5 +- paddle/operators/squared_l2_distance_op.cc | 10 +- paddle/operators/sum_op.cc | 5 +- paddle/operators/top_k_op.cc | 4 +- paddle/operators/uniform_random_op.cc | 2 +- paddle/pybind/pybind.cc | 9 +- paddle/pybind/tensor_py.h | 1 - .../paddle/v2/framework/tests/test_tensor.py | 128 ++++++++---------- 33 files changed, 409 insertions(+), 146 deletions(-) create mode 100644 paddle/operators/sequence_avg_pool_op.cc create mode 100644 paddle/operators/sequence_avg_pool_op.cu create mode 100644 paddle/operators/sequence_avg_pool_op.h diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index f2b77ca345..fac5cd20aa 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -59,7 +59,7 @@ class LoDTensor : public Tensor { void set_lod(const LoD& lod) { lod_ = lod; } - LoD lod() { return lod_; } + LoD lod() const { return lod_; } /* * Get a element from LoD. diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index e1e122091f..25faeff0d1 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -186,6 +186,54 @@ void OperatorBase::GenerateTemporaryNames() { } } +template <> +const Tensor* InferShapeContext::Input(const std::string& name) const { + auto* var = InputVar(name); + if (var == nullptr) return nullptr; + if (var->IsType()) { + return &var->Get(); + } + PADDLE_ENFORCE(var->IsType(), + "The Input(%s) must be LoDTensor or Tensor."); + return &var->Get(); +} + +template <> +const std::vector InferShapeContext::MultiInput( + const std::string& name) const { + auto names = op().Inputs(name); + std::vector res; + res.reserve(names.size()); + std::transform( + names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { return Input(sub_name); }); + return res; +} + +template <> +Tensor* ExecutionContext::Output(const std::string& name) const { + auto* var = OutputVar(name); + if (var == nullptr) return nullptr; + if (var->IsType()) { + return const_cast(&var->Get()); + } + PADDLE_ENFORCE(var->IsType(), + "The Input(%s) must be LoDTensor or Tensor."); + return const_cast(&var->Get()); +} + +template <> +std::vector ExecutionContext::MultiOutput( + const std::string& name) const { + auto names = op().Outputs(name); + std::vector res; + res.reserve(names.size()); + std::transform( + names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { return Output(sub_name); }); + return res; +} + void OpProtoAndCheckerMaker::Validate() { validated_ = true; CheckNoDuplicatedInOutAttrs(); diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 4600b06009..b2d7908408 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "op_info.h" #include "paddle/framework/attribute.h" #include "paddle/framework/framework.pb.h" +#include "paddle/framework/lod_tensor.h" #include "paddle/framework/scope.h" #include "paddle/framework/tensor.h" #include "paddle/platform/device_context.h" @@ -305,11 +306,9 @@ class InferShapeContext { auto names = op_.Inputs(name); std::vector res; res.reserve(names.size()); - std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { - auto var = scope_.FindVar(sub_name); - return var == nullptr ? nullptr : &var->Get(); - }); + std::transform( + names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { return Input(sub_name); }); return res; } @@ -318,11 +317,9 @@ class InferShapeContext { auto names = op_.Outputs(name); std::vector res; res.reserve(names.size()); - std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { - auto var = scope_.FindVar(sub_name); - return var == nullptr ? nullptr : var->GetMutable(); - }); + std::transform( + names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { return Output(sub_name); }); return res; } @@ -363,6 +360,27 @@ class ExecutionContext : public InferShapeContext { return device_context_; } + // redefine Output function, + // use Variable::Get instead of Variable::GetMutable + template + T* Output(const std::string& name) const { + auto var = OutputVar(name); + return var == nullptr ? nullptr : const_cast(&var->Get()); + } + + // redefine MultiOutput function. + // use Variable::Get instead of Variable::GetMutable + template + std::vector MultiOutput(const std::string& name) const { + auto names = op().Outputs(name); + std::vector res; + res.reserve(names.size()); + std::transform( + names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { return Output(sub_name); }); + return res; + } + const platform::DeviceContext* device_context_; }; diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index cc4d908834..642b53efc7 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -16,8 +16,6 @@ limitations under the License. */ #include "paddle/memory/memcpy.h" #include "paddle/platform/enforce.h" -#include - namespace paddle { namespace framework { @@ -55,7 +53,6 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { - LOG(INFO) << "------ mutable_data ---- "; static_assert(std::is_pod::value, "T must be POD"); PADDLE_ENFORCE_GT(numel(), 0, "Tensor's numel must be larger than zero to call " @@ -145,7 +142,6 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { } inline Tensor& Tensor::Resize(const DDim& dims) { - LOG(INFO) << "---- resize -----"; dims_ = dims; numel_ = product(dims_); return *this; diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index 8dbd47cf0d..b43c09d4f0 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -26,7 +26,8 @@ class AddOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), ctx.Input("Y")->dims(), "Two input of Add Op's dimension must be same."); - ctx.Output("Out")->Resize(ctx.Input("X")->dims()); + ctx.Output("Out")->Resize( + ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 0ebefbab26..72fd179354 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -26,7 +26,7 @@ class ConcatOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto ins = ctx.MultiInput("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); size_t axis = static_cast(ctx.Attr("axis")); size_t n = ins.size(); diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index c033af3b74..7b856c9776 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -32,9 +32,9 @@ class CosSimOp : public framework::OperatorWithKernel { "Dimensions of Input(X) and Input(Y) must be the same."); auto dims = ctx.Input("X")->dims(); - ctx.Output("Out")->Resize({dims[0], 1}); - ctx.Output("XNorm")->Resize({dims[0], 1}); - ctx.Output("YNorm")->Resize({dims[0], 1}); + ctx.Output("Out")->Resize({dims[0], 1}); + ctx.Output("XNorm")->Resize({dims[0], 1}); + ctx.Output("YNorm")->Resize({dims[0], 1}); } }; @@ -88,8 +88,10 @@ class CosSimOpGrad : public framework::OperatorWithKernel { "1st dimension of Out@GRAD must equal that of Input(X)"); PADDLE_ENFORCE_EQ(out_dims[1], 1, "1st dimension of Out@GRAD must be one."); - auto *x_grad = ctx.Output(framework::GradVarName("X")); - auto *y_grad = ctx.Output(framework::GradVarName("Y")); + auto *x_grad = + ctx.Output(framework::GradVarName("X")); + auto *y_grad = + ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); if (y_grad) y_grad->Resize(y_dims); } diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index ab1e1c101a..10ba3ca5ca 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -29,7 +29,7 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2."); PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1."); PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]); - ctx.Output("Y")->Resize({X->dims()[0]}); + ctx.Output("Y")->Resize({X->dims()[0]}); } }; @@ -39,7 +39,7 @@ class OnehotCrossEntropyGradientOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto dX = ctx.Output(framework::GradVarName("X")); + auto dX = ctx.Output(framework::GradVarName("X")); auto X = ctx.Input("X"); dX->Resize(X->dims()); diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 9d51f6e3a1..0c9734892a 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -23,7 +23,7 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output("Dst")->Resize( + ctx.Output("Dst")->Resize( ctx.Input("Src")->dims()); } }; diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index 123bed296c..8883d6d5fe 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -28,7 +28,7 @@ class GatherOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx.Input("X")->dims()); output_dims[0] = batch_size; - ctx.Output("Out")->Resize(output_dims); + ctx.Output("Out")->Resize(output_dims); } }; @@ -38,7 +38,7 @@ class GatherGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto X_grad = ctx.Output(framework::GradVarName("X")); + auto X_grad = ctx.Output(framework::GradVarName("X")); auto X = ctx.Input("X"); X_grad->Resize(X->dims()); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 3d76516405..25b0776a37 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -44,7 +44,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& context) const override { - auto* tensor = context.Output("Out"); + auto* tensor = context.Output("Out"); auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 94d40890a7..b3d15f1ec9 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -25,7 +25,7 @@ class LookupTableOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &context) const override { auto table_t = context.Input("W"); auto ids_t = context.Input("Ids"); - auto output_t = context.Output("Out"); + auto output_t = context.Output("Out"); output_t->Resize({ids_t->dims()[0], table_t->dims()[1]}); } @@ -56,7 +56,8 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &context) const override { auto table = context.Input("W"); - auto d_table = context.Output(framework::GradVarName("W")); + auto d_table = + context.Output(framework::GradVarName("W")); d_table->Resize(table->dims()); } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index d3d0e55a67..3e523d31b6 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -25,7 +25,7 @@ class MeanOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input of MeanOp must be initialized."); - ctx.Output("Out")->Resize({1}); + ctx.Output("Out")->Resize({1}); } }; @@ -45,7 +45,7 @@ class MeanGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index a4876feb2e..8a583f24ed 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -33,7 +33,7 @@ class MinusOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( left_tensor->numel(), right_tensor->numel(), "Minus operator must take two tensor with same num of elements"); - ctx.Output("Out")->Resize(left_tensor->dims()); + ctx.Output("Out")->Resize(left_tensor->dims()); } }; diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 710a56a0e8..015e13de9a 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -18,6 +18,7 @@ namespace paddle { namespace operators { using framework::Tensor; +using framework::LoDTensor; class MulOp : public framework::OperatorWithKernel { public: @@ -45,7 +46,8 @@ class MulOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( x_mat_dims[1], y_mat_dims[0], "First matrix's width must be equal with second matrix's height."); - ctx.Output("Out")->Resize({x_mat_dims[0], y_mat_dims[1]}); + ctx.Output("Out")->Resize( + {x_mat_dims[0], y_mat_dims[1]}); } }; @@ -94,8 +96,10 @@ class MulOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto *x_grad = ctx.Output(framework::GradVarName("X")); - auto *y_grad = ctx.Output(framework::GradVarName("Y")); + auto *x_grad = + ctx.Output(framework::GradVarName("X")); + auto *y_grad = + ctx.Output(framework::GradVarName("Y")); auto x_mat_dims = framework::flatten_to_2d(x_dims, Attr("x_num_col_dims")); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index e826703c60..d3413d7cb9 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -26,10 +26,11 @@ namespace operators { using Scope = framework::Scope; using Variable = framework::Variable; using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; void RecurrentAlgorithm::InferShape(const Scope& scope) const { seq_len_ = scope.FindVar((arg_->inlinks[0]).external) - ->GetMutable() + ->GetMutable() ->dims()[0]; CreateScopes(scope); auto step_scopes = GetStepScopes(scope); @@ -88,7 +89,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { - step_scope.NewVar(var_name)->GetMutable(); + step_scope.NewVar(var_name)->GetMutable(); } } } @@ -106,11 +107,12 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { void RecurrentAlgorithm::InitMemories(Scope* step_scope, bool infer_shape_mode) const { for (auto& attr : arg_->memories) { - Tensor* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable(); + auto* pre_mem = step_scope->NewVar(attr.pre_var)->GetMutable(); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "memory [%s]'s boot variable [%s] not exists", attr.var, attr.boot_var); - Tensor* boot_mem = step_scope->FindVar(attr.boot_var)->GetMutable(); + auto* boot_mem = + step_scope->FindVar(attr.boot_var)->GetMutable(); if (infer_shape_mode) { pre_mem->Resize(boot_mem->dims()); PADDLE_ENFORCE_EQ(pre_mem->dims().size(), 2); @@ -192,9 +194,9 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( "memory variable [%s] does not exists", attr.var); PADDLE_ENFORCE(step_scope->FindVar(attr.boot_var) != nullptr, "boot variable [%s] does not exists", attr.boot_var); - Tensor* mem_grad = step_scope->NewVar(attr.var)->GetMutable(); - Tensor* boot_mem_grad = - step_scope->NewVar(attr.boot_var)->GetMutable(); + auto* mem_grad = step_scope->NewVar(attr.var)->GetMutable(); + auto* boot_mem_grad = + step_scope->NewVar(attr.boot_var)->GetMutable(); if (infer_shape_mode) { boot_mem_grad->Resize(mem_grad->dims()); } else { @@ -205,7 +207,7 @@ void RecurrentGradientAlgorithm::LinkBootMemoryGradients( void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { seq_len_ = scope.FindVar((arg_->inlinks[0]).external) - ->GetMutable() + ->GetMutable() ->dims()[0]; auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index b7061153d2..d281702092 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -46,7 +46,7 @@ class ReshapeOp : public framework::OperatorWithKernel { std::transform(shape.begin(), shape.end(), shape_int64.begin(), [](int a) { return static_cast(a); }); auto out_dims = framework::make_ddim(shape_int64); - ctx.Output("Out")->Resize(out_dims); + ctx.Output("Out")->Resize(out_dims); } }; @@ -90,7 +90,7 @@ class ReshapeGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) shouldn't be null."); auto dims = ctx.Input("X")->dims(); - auto *d_in = ctx.Output(framework::GradVarName("X")); + auto *d_in = ctx.Output(framework::GradVarName("X")); d_in->Resize(dims); } }; diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index fa8f0ff1a8..c6101685a3 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -37,7 +37,7 @@ class RowwiseAddOp : public framework::OperatorWithKernel { framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, "The width of two operands must be same"); PADDLE_ENFORCE_EQ(ctx.OutputSize("Out"), 1, "The output size must be 1"); - ctx.Output("Out")->Resize(x_dims); + ctx.Output("Out")->Resize(x_dims); } }; @@ -76,8 +76,8 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, "The width of two operands must be same"); - auto *dx = ctx.Output(framework::GradVarName("X")); - auto *db = ctx.Output(framework::GradVarName("b")); + auto *dx = ctx.Output(framework::GradVarName("X")); + auto *db = ctx.Output(framework::GradVarName("b")); if (dx) dx->Resize(x_dims); if (db) db->Resize(b_dims); } diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index ea991f683d..35e6b70ba9 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -28,7 +28,7 @@ class ScaleOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto *in = ctx.Input("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); out->Resize(in->dims()); } }; diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index f901edefa2..0f7510983e 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -35,7 +35,8 @@ class ScatterOp : public framework::OperatorWithKernel { framework::DDim data_dim(ctx.Input("Updates")->dims()); for (int i = 1; i < data_dim.size(); ++i) PADDLE_ENFORCE_EQ(data_dim[i], ctx.Input("Updates")->dims()[i]); - ctx.Output("Out")->Resize(ctx.Input("Ref")->dims()); + ctx.Output("Out")->Resize( + ctx.Input("Ref")->dims()); } }; @@ -45,9 +46,11 @@ class ScatterGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto *dUpdates = ctx.Output(framework::GradVarName("Updates")); + auto *dUpdates = + ctx.Output(framework::GradVarName("Updates")); auto *Updates = ctx.Input("Updates"); - auto *dRef = ctx.Output(framework::GradVarName("Ref")); + auto *dRef = + ctx.Output(framework::GradVarName("Ref")); auto *Ref = ctx.Input("Ref"); dRef->Resize(Ref->dims()); diff --git a/paddle/operators/sequence_avg_pool_op.cc b/paddle/operators/sequence_avg_pool_op.cc new file mode 100644 index 0000000000..59a361761a --- /dev/null +++ b/paddle/operators/sequence_avg_pool_op.cc @@ -0,0 +1,90 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/sequence_avg_pool_op.h" + +namespace paddle { +namespace operators { + +class SequenceAvgPoolOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input of SequenceAvgPoolOp" + "must be initialized."); + auto* x = ctx.Input("X"); + auto dims = x->dims(); + auto lod = x->lod(); + PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); + PADDLE_ENFORCE_GE( + dims[0], + /*batch size = */ static_cast(lod[0].size() - 1), + "The first dimension of Input(X) must be large than batch size."); + dims[0] = lod[0].size() - 1; + ctx.Output("Out")->Resize({dims}); + } +}; + +class SequenceAvgPoolOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SequenceAvgPoolOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of SequenceAvgPoolOp."); + AddOutput("Out", "The output of SequenceAvgPoolOp."); + AddComment(R"DOC( + SequenceAvgPoolOp averages features of all time-steps of each instance. + More detailed comments will be added later. + )DOC"); + } +}; + +class SequenceAvgPoolGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Gradient of Out should not be null"); + auto og_dims = + ctx.Input(framework::GradVarName("Out"))->dims(); + auto x_dims = ctx.Input("X")->dims(); + PADDLE_ENFORCE_EQ(og_dims.size(), x_dims.size(), + "The rank of output grad must equal to Input(X)."); + for (size_t i = 1; i < og_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch."); + } + auto* x_grad = + ctx.Output(framework::GradVarName("X")); + x_grad->Resize(x_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(sequence_avg_pool, ops::SequenceAvgPoolOp, + ops::SequenceAvgPoolOpMaker, sequence_avg_pool_grad, + ops::SequenceAvgPoolGradOp); +REGISTER_OP_CPU_KERNEL( + sequence_avg_pool, + ops::SequenceAvgPoolKernel); +REGISTER_OP_CPU_KERNEL( + sequence_avg_pool_grad, + ops::SequenceAvgPoolGradKernel); diff --git a/paddle/operators/sequence_avg_pool_op.cu b/paddle/operators/sequence_avg_pool_op.cu new file mode 100644 index 0000000000..bc9d1611fc --- /dev/null +++ b/paddle/operators/sequence_avg_pool_op.cu @@ -0,0 +1,25 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU + +#include "paddle/operators/sequence_avg_pool_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + sequence_avg_pool, + ops::SequenceAvgPoolKernel); +REGISTER_OP_GPU_KERNEL( + sequence_avg_pool_grad, + ops::SequenceAvgPoolGradKernel); diff --git a/paddle/operators/sequence_avg_pool_op.h b/paddle/operators/sequence_avg_pool_op.h new file mode 100644 index 0000000000..ba68b5e4b9 --- /dev/null +++ b/paddle/operators/sequence_avg_pool_op.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +template +using EigenMatrix = framework::EigenMatrix; + +template +class SequenceAvgPoolKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in = context.Input("X"); + auto* out = context.Output("Out"); + + auto dims = in->dims(); + auto lod = in->lod(); + int64_t w = in->numel() / dims[0]; + + out->mutable_data(context.GetPlace()); + auto place = context.GetEigenDevice(); + for (int i = 0; i < lod[0].size() - 1; ++i) { + Tensor in_t = in->Slice(static_cast(lod[0][i]), + static_cast(lod[0][i + 1])); + Tensor out_t = out->Slice(i, i + 1); + int64_t h = static_cast(lod[0][i + 1] - lod[0][i]); + auto in_e = EigenMatrix::From(in_t, {h, w}); + auto out_e = EigenMatrix::From(out_t, {h, w}); + out_e.device(place) = in_e.mean(Eigen::array({{0}})); + } + } +}; + +template +class SequenceAvgPoolGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in = context.Output("X"); + auto* in_g = context.Output(framework::GradVarName("X")); + auto* out_g = context.Input(framework::GradVarName("Out")); + + auto dims = in->dims(); + auto lod = in->lod(); + int64_t w = in->numel() / dims[0]; + + in_g->mutable_data(context.GetPlace()); + auto place = context.GetEigenDevice(); + for (int i = 0; i < lod[0].size() - 1; ++i) { + auto in_g_t = in_g->Slice(static_cast(lod[0][i]), + static_cast(lod[0][i + 1])); + auto out_g_t = out_g->Slice(i, i + 1); + int64_t h = static_cast(lod[0][i + 1] - lod[0][i]); + auto in_g_e = EigenMatrix::From(in_g_t, {h, w}); + auto out_g_e = EigenMatrix::From(out_g_t, {1, w}); + Eigen::DSizes bcast(h, w); + in_g_e.device(place) = (out_g_e / static_cast(h)).broadcast(bcast); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index ad267e7f08..7997bf6907 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -23,10 +23,11 @@ class SGDOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE( - ctx.Input("param")->dims() == ctx.Input("grad")->dims(), - "Two input of SGD Op's dimension must be same."); - ctx.Output("param_out")->Resize(ctx.Input("param")->dims()); + PADDLE_ENFORCE_EQ(ctx.Input("param")->dims(), + ctx.Input("grad")->dims(), + "Two input of SGD Op's dimension must be same."); + ctx.Output("param_out") + ->Resize(ctx.Input("param")->dims()); } }; diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index 761c6de8d4..de6a1ba773 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -23,7 +23,8 @@ class SigmoidOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output("Y")->Resize(ctx.Input("X")->dims()); + ctx.Output("Y")->Resize( + ctx.Input("X")->dims()); } }; @@ -44,7 +45,7 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("Y")->dims()); } }; diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 7166b2f60b..239d3d141e 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -25,7 +25,8 @@ class SoftmaxOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, "The input of softmax op must be a matrix."); - ctx.Output("Y")->Resize(ctx.Input("X")->dims()); + ctx.Output("Y")->Resize( + ctx.Input("X")->dims()); } }; @@ -71,7 +72,7 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ctx.Input(framework::GradVarName("Y"))->dims(), "Input(Y) and its gradients should have a same shape."); - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index 9f51d3efa8..ebe5bd352e 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -48,9 +48,9 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "First dimension of target must be equal to input " "or to 1."); - ctx.Output("sub_result") + ctx.Output("sub_result") ->Resize({x_dims[0], x->numel() / x_dims[0]}); - ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.Output("Out")->Resize({x_dims[0], 1}); } }; @@ -94,8 +94,10 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(out_dims[1], 1, "Second dimension of output gradient " "must be 1."); - auto* x_grad = ctx.Output(framework::GradVarName("X")); - auto* y_grad = ctx.Output(framework::GradVarName("Y")); + auto* x_grad = + ctx.Output(framework::GradVarName("X")); + auto* y_grad = + ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); if (y_grad) y_grad->Resize(y_dims); } diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 5805826ee8..7170e7256c 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -23,7 +23,7 @@ class SumOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto ins = ctx.MultiInput("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); int N = ins.size(); auto in_dim = ins[0]->dims(); @@ -55,7 +55,8 @@ class SumGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto outputs = ctx.MultiOutput(framework::GradVarName("X")); + auto outputs = + ctx.MultiOutput(framework::GradVarName("X")); auto dims = ctx.Input(framework::GradVarName("Out"))->dims(); for (auto output : outputs) { output->Resize(dims); diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index 38d2f0a09a..ff0e77a344 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -35,8 +35,8 @@ class TopkOp : public framework::OperatorWithKernel { framework::DDim dims = input->dims(); dims[dims.size() - 1] = k; - ctx.Output("Out")->Resize(dims); - ctx.Output("Indices")->Resize(dims); + ctx.Output("Out")->Resize(dims); + ctx.Output("Indices")->Resize(dims); } }; diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index b8fbc9b52a..ed79736936 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -50,7 +50,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext& ctx) const override { PADDLE_ENFORCE(Attr("min") < Attr("max"), "uniform_random's min must less then max"); - auto* tensor = ctx.Output("Out"); + auto* tensor = ctx.Output("Out"); auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index b5afe2f55b..e61aa3a2a5 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -122,6 +122,8 @@ PYBIND11_PLUGIN(core) { }); py::class_(m, "LoDTensor") + .def_buffer( + [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) .def( "__init__", [](LoDTensor &instance, const std::vector> &lod) { @@ -172,10 +174,11 @@ All parameter, weight, gradient are variables in Paddle. .def("set_int", [](Variable &var, int val) -> void { *var.GetMutable() = val; }) .def("get_int", [](const Variable &var) -> int { return var.Get(); }) + // .def("get_tensor", + // [](Variable &self) -> Tensor * { return + // self.GetMutable(); }, + // py::return_value_policy::reference) .def("get_tensor", - [](Variable &self) -> Tensor * { return self.GetMutable(); }, - py::return_value_policy::reference) - .def("get_lod_tensor", [](Variable &self) -> LoDTensor * { return self.GetMutable(); }, diff --git a/paddle/pybind/tensor_py.h b/paddle/pybind/tensor_py.h index a32a0b6790..95171acf72 100644 --- a/paddle/pybind/tensor_py.h +++ b/paddle/pybind/tensor_py.h @@ -42,7 +42,6 @@ template struct CastToPyBufferImpl { using CUR_TYPE = typename std::tuple_element>::type; py::buffer_info operator()(framework::Tensor &tensor) { - LOG(INFO) << "---- CastToPyBufferImpl -----"; if (std::type_index(typeid(CUR_TYPE)) == tensor.holder_->type()) { auto dim_vec = framework::vectorize(tensor.dims()); std::vector dims_outside; diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index fc6abe9806..8cd93b35d7 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -4,7 +4,7 @@ import numpy class TestTensor(unittest.TestCase): - def not_test_int_tensor(self): + def test_int_tensor(self): scope = core.Scope() var = scope.new_var("test_tensor") place = core.CPUPlace() @@ -23,7 +23,7 @@ class TestTensor(unittest.TestCase): self.assertEqual(1, tensor_array_2[3, 9]) self.assertEqual(2, tensor_array_2[19, 11]) - def not_test_float_tensor(self): + def test_float_tensor(self): scope = core.Scope() var = scope.new_var("test_tensor") place = core.CPUPlace() @@ -44,82 +44,66 @@ class TestTensor(unittest.TestCase): self.assertAlmostEqual(2.0, tensor_array_2[19, 11]) def test_int_lod_tensor(self): - places = [core.CPUPlace(), core.GPUPlace(0)] - for place in places: - scope = core.Scope() - #var = scope.new_var("test_tensor") - var_lod = scope.new_var("test_lod_tensor") - - # tensor = var.get_tensor() - lod_tensor = var_lod.get_lod_tensor() - - lod_tensor.set_dims([4, 4, 6]) - lod_tensor.alloc_int(place) - print lod_tensor - array = numpy.array(lod_tensor) - print "---- array ----", array - array[0, 0, 0] = 3 - array[3, 3, 5] = 10 - lod_tensor.set(array, place) - - # lod_tensor.set_tensor(tensor) - lod_tensor.set_lod([[0, 2, 4]]) - - # lod_v = numpy.array(lod_tensor.tensor()) - lod_v = numpy.array(lod_tensor) - self.assertTrue(numpy.alltrue(array == lod_v)) - - lod = lod_tensor.lod() - self.assertEqual(0, lod[0][0]) - self.assertEqual(2, lod[0][1]) - self.assertEqual(4, lod[0][2]) - - def not_test_float_lod_tensor(self): - places = [core.CPUPlace(), core.GPUPlace(0)] - for place in places: - scope = core.Scope() - var = scope.new_var("test_tensor") - var_lod = scope.new_var("test_lod_tensor") - - tensor = var.get_tensor() - lod_tensor = var_lod.get_lod_tensor() - - tensor.set_dims([5, 2, 3, 4]) - tensor.alloc_float(place) - - tensor_array = numpy.array(tensor) - self.assertEqual((5, 2, 3, 4), tensor_array.shape) - tensor_array[0, 0, 0, 0] = 1.0 - tensor_array[0, 0, 0, 1] = 2.0 - tensor.set(tensor_array, place) - - lod_tensor.set_tensor(tensor) - - lod_v = numpy.array(lod_tensor.tensor()) - self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) - self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) - self.assertEqual(len(lod_tensor.lod()), 0) - - lod_py = [[0, 2, 5], [0, 2, 4, 5]] - lod_tensor.set_lod(lod_py) - lod = lod_tensor.lod() - self.assertListEqual(lod_py, lod) - - def not_test_lod_tensor_init(self): + place = core.CPUPlace() scope = core.Scope() - var = scope.new_var("test_tensor") + var_lod = scope.new_var("test_lod_tensor") + lod_tensor = var_lod.get_tensor() + + lod_tensor.set_dims([4, 4, 6]) + lod_tensor.alloc_int(place) + array = numpy.array(lod_tensor) + array[0, 0, 0] = 3 + array[3, 3, 5] = 10 + lod_tensor.set(array, place) + lod_tensor.set_lod([[0, 2, 4]]) + + lod_v = numpy.array(lod_tensor) + self.assertTrue(numpy.alltrue(array == lod_v)) + + lod = lod_tensor.lod() + self.assertEqual(0, lod[0][0]) + self.assertEqual(2, lod[0][1]) + self.assertEqual(4, lod[0][2]) + + def test_float_lod_tensor(self): place = core.CPUPlace() - tensor = var.get_tensor() - tensor.set_dims([5, 2, 3, 4]) - tensor.alloc_float(place) - tensor_array = numpy.array(tensor) + scope = core.Scope() + var_lod = scope.new_var("test_lod_tensor") + + lod_tensor = var_lod.get_tensor() + lod_tensor.set_dims([5, 2, 3, 4]) + lod_tensor.alloc_float(place) + + tensor_array = numpy.array(lod_tensor) + self.assertEqual((5, 2, 3, 4), tensor_array.shape) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 - tensor.set(tensor_array, place) + lod_tensor.set(tensor_array, place) + + lod_v = numpy.array(lod_tensor) + self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) + self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) + self.assertEqual(len(lod_tensor.lod()), 0) + + lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_tensor.set_lod(lod_py) + lod = lod_tensor.lod() + self.assertListEqual(lod_py, lod) + + def test_lod_tensor_init(self): + scope = core.Scope() + place = core.CPUPlace() lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_tensor = core.LoDTensor(lod_py) + + lod_tensor.set_dims([5, 2, 3, 4]) + lod_tensor.alloc_float(place) + tensor_array = numpy.array(lod_tensor) + tensor_array[0, 0, 0, 0] = 1.0 + tensor_array[0, 0, 0, 1] = 2.0 + lod_tensor.set(tensor_array, place) - lod_tensor = core.LoDTensor(lod_py, tensor) - lod_v = numpy.array(lod_tensor.tensor()) + lod_v = numpy.array(lod_tensor) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) self.assertListEqual(lod_py, lod_tensor.lod()) From cb28428323a70e82ed84be1f2d9db507ff8f8a30 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 14 Sep 2017 01:03:32 +0800 Subject: [PATCH 073/115] Replace LoDTensor in elementwise_mul_op, pad_op and recurrent_op_utils. --- paddle/framework/operator.cc | 31 +++++++++------------- paddle/framework/operator.h | 25 ++++++++++++----- paddle/operators/elementwise_mul_op.cc | 6 ++--- paddle/operators/pad_op.cc | 9 ++++--- paddle/operators/rnn/recurrent_op_utils.cc | 19 ++++++++----- 5 files changed, 51 insertions(+), 39 deletions(-) diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 25faeff0d1..27e7784940 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -189,13 +189,7 @@ void OperatorBase::GenerateTemporaryNames() { template <> const Tensor* InferShapeContext::Input(const std::string& name) const { auto* var = InputVar(name); - if (var == nullptr) return nullptr; - if (var->IsType()) { - return &var->Get(); - } - PADDLE_ENFORCE(var->IsType(), - "The Input(%s) must be LoDTensor or Tensor."); - return &var->Get(); + return var == nullptr ? nullptr : GetTensorFromVar(var); } template <> @@ -204,9 +198,11 @@ const std::vector InferShapeContext::MultiInput( auto names = op().Inputs(name); std::vector res; res.reserve(names.size()); - std::transform( - names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { return Input(sub_name); }); + std::transform(names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { + auto var = scope_.FindVar(sub_name); + return var == nullptr ? nullptr : GetTensorFromVar(var); + }); return res; } @@ -214,12 +210,7 @@ template <> Tensor* ExecutionContext::Output(const std::string& name) const { auto* var = OutputVar(name); if (var == nullptr) return nullptr; - if (var->IsType()) { - return const_cast(&var->Get()); - } - PADDLE_ENFORCE(var->IsType(), - "The Input(%s) must be LoDTensor or Tensor."); - return const_cast(&var->Get()); + return GetTensorFromVar(var); } template <> @@ -228,9 +219,11 @@ std::vector ExecutionContext::MultiOutput( auto names = op().Outputs(name); std::vector res; res.reserve(names.size()); - std::transform( - names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { return Output(sub_name); }); + std::transform(names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { + auto var = scope().FindVar(sub_name); + return var == nullptr ? nullptr : GetTensorFromVar(var); + }); return res; } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index b2d7908408..bbf9930f0a 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -306,9 +306,11 @@ class InferShapeContext { auto names = op_.Inputs(name); std::vector res; res.reserve(names.size()); - std::transform( - names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { return Input(sub_name); }); + std::transform(names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { + auto var = scope_.FindVar(sub_name); + return var == nullptr ? nullptr : &var->Get(); + }); return res; } @@ -317,12 +319,23 @@ class InferShapeContext { auto names = op_.Outputs(name); std::vector res; res.reserve(names.size()); - std::transform( - names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { return Output(sub_name); }); + std::transform(names.begin(), names.end(), std::back_inserter(res), + [&](const std::string& sub_name) { + auto var = scope_.FindVar(sub_name); + return var == nullptr ? nullptr : var->GetMutable(); + }); return res; } + Tensor* GetTensorFromVar(const Variable* var) const { + if (var->IsType()) { + return const_cast(&var->Get()); + } + PADDLE_ENFORCE(var->IsType(), + "The Input(%s) must be LoDTensor or Tensor."); + return const_cast(&var->Get()); + } + private: const OperatorBase& op_; const Scope& scope_; diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index 1742925545..ae88ec1b30 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -31,7 +31,7 @@ class ElementWiseMulOp : public framework::OperatorWithKernel { auto y_dim = ctx.Input("Y")->dims(); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), "Rank of first input must >= rank of second input.") - ctx.Output("Out")->Resize(x_dim); + ctx.Output("Out")->Resize(x_dim); } }; @@ -80,8 +80,8 @@ class ElementWiseMulOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto *x_grad = ctx.Output(framework::GradVarName("X")); - auto *y_grad = ctx.Output(framework::GradVarName("Y")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), "Rank of first input must >= rank of second input.") diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 7e78b6ec13..6cf7bd6f35 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -34,7 +34,8 @@ class PadOp : public framework::OperatorWithKernel { for (int i = 0; i < x_dim.size(); ++i) { out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } - ctx.Output("Out")->Resize(framework::make_ddim(out_dims)); + ctx.Output("Out")->Resize( + framework::make_ddim(out_dims)); } }; @@ -95,9 +96,9 @@ class PadOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); - auto *x_grad = ctx.Output(framework::GradVarName("X")); - if (x_grad != nullptr) { - x_grad->Resize(x_dims); + auto *x_g = ctx.Output(framework::GradVarName("X")); + if (x_g != nullptr) { + x_g->Resize(x_dims); } } }; diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 97872c67ac..6c082cb182 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -21,6 +21,7 @@ namespace rnn { namespace f = paddle::framework; using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; void SegmentInputs(const std::vector& step_scopes, const std::vector& inlinks, const size_t seq_len, @@ -31,7 +32,7 @@ void SegmentInputs(const std::vector& step_scopes, PADDLE_ENFORCE(input_var != nullptr, "input link [%s] is not in scope.", inlinks[i].external); - Tensor* input = input_var->GetMutable(); + LoDTensor* input = input_var->GetMutable(); f::DDim dims = input->dims(); PADDLE_ENFORCE(static_cast(dims[0]) == seq_len, "all the inlinks must have same length"); @@ -40,6 +41,8 @@ void SegmentInputs(const std::vector& step_scopes, Tensor* step_input = step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable(); if (!infer_shape_mode) { + // The input of operators of each step is Tensor here. + // Maybe need to modify Slice function. *step_input = input->Slice(j, j + 1); } step_input->Resize(step_dims); @@ -54,21 +57,23 @@ void ConcatOutputs(const std::vector& step_scopes, auto output_var = step_scopes[0]->FindVar(outlinks[i].external); PADDLE_ENFORCE(output_var != nullptr, "output link [%s] is not in scope.", outlinks[i].external); - Tensor* output = output_var->GetMutable(); + LoDTensor* output = output_var->GetMutable(); if (infer_shape_mode) { auto step_scope_var = step_scopes[0]->FindVar(outlinks[i].internal); PADDLE_ENFORCE(step_scope_var != nullptr, "%s not in scope", outlinks[i].internal); - f::DDim step_dims = step_scope_var->template GetMutable()->dims(); + f::DDim step_dims = + step_scope_var->template GetMutable()->dims(); std::vector dims_vec = vectorize(step_dims); dims_vec.insert(dims_vec.begin(), seq_len); output->Resize(f::make_ddim(dims_vec)); } else { output->mutable_data(platform::CPUPlace()); for (size_t j = 0; j < seq_len; j++) { - Tensor* step_output = - step_scopes[j]->FindVar(outlinks[i].internal)->GetMutable(); + LoDTensor* step_output = step_scopes[j] + ->FindVar(outlinks[i].internal) + ->GetMutable(); // TODO(luotao02) data type and platform::DeviceContext() should set // correctly (output->Slice(j, j + 1)) @@ -94,8 +99,8 @@ void LinkMemories(const std::vector& scopes, auto scope = scopes[step_id]; auto linked_scope = scopes[step_id + offset]; for (auto& attr : memories) { - auto mem = scope->FindVar(attr.pre_var)->GetMutable(); - auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); + auto mem = scope->FindVar(attr.pre_var)->GetMutable(); + auto linked_mem = linked_scope->FindVar(attr.var)->GetMutable(); if (infer_shape_mode) { mem->Resize(linked_mem->dims()); } else { From 1ead57ec6a990ff35119cda4a90187c368f388d1 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Wed, 13 Sep 2017 10:32:32 -0700 Subject: [PATCH 074/115] "better format" --- paddle/operators/name_convention.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index a090e0b545..379385dc5d 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -38,9 +38,11 @@ public: AccumulateOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done."); + AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. + If the output size is not the same as input size, + the output tensor is first reshaped and initialized to zero, and only then, accumulation is done."); AddOutput("Out", "(Tensor) Accumulated output tensor"); - AddAttr("gamma", "(float, default 1.0) Accumulation multiplier"); + AddAttr("gamma", "(float, default 1.0) Accumulation multiplier").SetDefault(1.0f); AddComment(R"DOC( Accumulate operator accumulates the input tensor to the output tensor. If the output tensor already has the right size, we add to it; otherwise, we first @@ -51,7 +53,7 @@ Accumulation is done as shown: Out = 1*X + gamma*Out -where X is the input tensor, Y is the output tensor and gamma is the multiplier +where X is the input tensor, Out is the output tensor and gamma is the multiplier argument. )DOC"); } From f6b518c9708ce07fa589234d8a43e8f2959cb01a Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 14 Sep 2017 01:47:01 +0800 Subject: [PATCH 075/115] Fix elementwise_mul_op.cc --- paddle/operators/elementwise_mul_op.cc | 8 +++++--- paddle/pybind/pybind.cc | 4 ---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index ae88ec1b30..e37c582adb 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -31,7 +31,7 @@ class ElementWiseMulOp : public framework::OperatorWithKernel { auto y_dim = ctx.Input("Y")->dims(); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), "Rank of first input must >= rank of second input.") - ctx.Output("Out")->Resize(x_dim); + ctx.Output("Out")->Resize(x_dim); } }; @@ -80,8 +80,10 @@ class ElementWiseMulOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto *x_grad = ctx.Output(framework::GradVarName("X")); - auto *y_grad = ctx.Output(framework::GradVarName("Y")); + auto *x_grad = + ctx.Output(framework::GradVarName("X")); + auto *y_grad = + ctx.Output(framework::GradVarName("Y")); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), "Rank of first input must >= rank of second input.") diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 96c4e88845..926e601426 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -176,10 +176,6 @@ All parameter, weight, gradient are variables in Paddle. .def("set_int", [](Variable &var, int val) -> void { *var.GetMutable() = val; }) .def("get_int", [](const Variable &var) -> int { return var.Get(); }) - // .def("get_tensor", - // [](Variable &self) -> Tensor * { return - // self.GetMutable(); }, - // py::return_value_policy::reference) .def("get_tensor", [](Variable &self) -> LoDTensor * { return self.GetMutable(); From c7db6e8d146df415ad0011afac7e4d2562f83dcb Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Wed, 13 Sep 2017 14:14:49 -0700 Subject: [PATCH 076/115] cond op passed --- paddle/operators/cond_op.cc | 166 ++++++++++++++++- paddle/operators/cond_op.h | 173 +----------------- paddle/pybind/pybind.cc | 1 + python/paddle/v2/framework/op.py | 6 +- .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../paddle/v2/framework/tests/test_cond_op.py | 40 ++-- 6 files changed, 198 insertions(+), 189 deletions(-) diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index cb7fed7ebd..a3e4a2506f 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -13,15 +13,175 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/cond_op.h" + +#include +#include + #include "paddle/framework/op_registry.h" +#include "paddle/operators/gather.h" #include "paddle/operators/net_op.h" +#include "paddle/operators/scatter.h" namespace paddle { namespace operators { -class CondOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { +using Scope = framework::Scope; +using Variable = framework::Variable; +using Tensor = framework::Tensor; +using DDim = framework::DDim; + +void CondOp::CreateScope(const Scope& scope) const { + auto sub_scopes_var = scope.FindVar("SubScopes"); + PADDLE_ENFORCE(sub_scopes_var != nullptr, ""); + auto sub_scopes = sub_scopes_var->GetMutable>(); + auto& sub_scope = scope.NewScope(); + sub_scopes->push_back(&sub_scope); +} + +void CondOp::CreateIndexTensor(const Scope& scope) const { + auto index_tensors_var = scope.FindVar("IndexTensors"); + PADDLE_ENFORCE(index_tensors_var != nullptr, ""); + auto& index_tensors = *index_tensors_var->GetMutable>(); + index_tensors.push_back(Tensor()); +} + +void CondOp::InferShape(const Scope& scope) const { + auto sub_scopes_var = scope.FindVar("SubScopes"); + PADDLE_ENFORCE_NOT_NULL(sub_scopes_var); + auto& sub_scopes = *sub_scopes_var->GetMutable>(); + + for (int i = 0; i < 2; ++i) { + // Create two sub scopes for true and false branches + // sub_scopes[0] for the true branch and sub_scopes[1] for the false + // branch + CreateScope(scope); + + // Create two tensors for true and false indices + // index_tensors[0] for the true branch and index_tensors[1] for the false + // branch + CreateIndexTensor(scope); + + PADDLE_ENFORCE(!Inputs("Xs").empty(), "Inputs can't be empty"); + for (auto& input : Inputs("Xs")) { + // Create a new tensor in sub-scope for input-type tensor + Variable* v = sub_scopes[i]->NewVar(input); + Tensor* sub_input = v->GetMutable(); + sub_input->Resize(scope.FindVar(input)->GetMutable()->dims()); + } + + for (auto& output : (*sub_net_op_[i]).Outputs()) { + for (auto& var_name : output.second) { + sub_scopes[i]->NewVar(var_name); + } + } + + // each net calls InferShape + sub_net_op_[i]->InferShape(*sub_scopes[i]); + } + + for (auto& output : Outputs("Outs")) { + Tensor* tensor_t_out = sub_scopes[0]->FindVar(output)->GetMutable(); + PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should be NULL"); + Tensor* tensor_f_out = sub_scopes[1]->FindVar(output)->GetMutable(); + PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "True output should be NULL"); + + auto* tensor_out_var = scope.FindVar(output); + PADDLE_ENFORCE_NOT_NULL(tensor_out_var, "Output not found"); + Tensor* tensor_out = tensor_out_var->GetMutable(); + PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should be NULL"); + // check output size should be same + PADDLE_ENFORCE_EQ(tensor_t_out->dims(), tensor_f_out->dims(), + "Outputs not of the same shape"); + tensor_out->Resize(tensor_t_out->dims()); + tensor_out->mutable_data(tensor_out->dims(), platform::CPUPlace()); + } +} + +void CondOp::Run(const Scope& scope, + const platform::DeviceContext& dev_ctx) const { + auto sub_scopes = scope.FindVar("SubScopes")->Get>(); + auto index_tensors = + scope.FindVar("IndexTensors")->Get>(); + + std::string cond_name = Input("Cond"); + Variable* cond_var = scope.FindVar(cond_name); + PADDLE_ENFORCE_NOT_NULL(cond_var); + const Tensor* cond = cond_var->GetMutable(); + + // Step 1: get the true/false index at runtime + // index_[0]: vector, contains all index for cond[i] == true + // index_[1]: vector, contains all index for cond[i] == false + for (int i = 0; i < 2; ++i) index_[i].clear(); + + const int* cond_data = cond->data(); + for (int i = 0; i < cond->dims()[0]; ++i) { + if (cond_data[i]) + index_[0].push_back(i); + else + index_[1].push_back(i); + } + + // put index_[0] and index_[1] into two tensors: + // index_tensor_[0] and index_tensor_[1] + DDim dim = paddle::framework::make_ddim({0}); + for (int i = 0; i < 2; ++i) { + dim[0] = index_[i].size(); + int* tmp_ptr = + index_tensors[i].mutable_data(dim, platform::CPUPlace()); + index_tensors[i].Resize(dim); + memcpy(tmp_ptr, index_[i].data(), dim[0] * sizeof(int)); + } + + // Step 2: collect data by calling gather + for (int i = 0; i < 2; ++i) { + // i= 0/i for True and False branches respectively + for (auto& input : Inputs("Xs")) { + // find Tensor + Variable* v = scope.FindVar(input); + PADDLE_ENFORCE_NOT_NULL(v); + Tensor* tensor_parent = v->GetMutable(); + + v = sub_scopes[i]->FindVar(input); + PADDLE_ENFORCE_NOT_NULL(v); + Tensor* tensor_child = v->GetMutable(); + + // Resize child + DDim dim = tensor_child->dims(); + dim[0] = index_[i].size(); + tensor_child->Resize(dim); + tensor_child->mutable_data(dim, platform::CPUPlace()); + + Gather(dev_ctx.GetPlace(), tensor_parent, &index_tensors[i], + tensor_child); + } + } + + // Step 3: run + for (int i = 0; i < 2; ++i) sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); + + // Step 4: merge output results + for (int i = 0; i < 2; ++i) { + // i= 0/i for True and False branches respectively + for (auto& output : Outputs("Outs")) { + // find Tensor + Variable* v = scope.FindVar(output); + PADDLE_ENFORCE_NOT_NULL(v); + Tensor* tensor_parent = v->GetMutable(); + + v = sub_scopes[i]->FindVar(output); + PADDLE_ENFORCE_NOT_NULL(v); + Tensor* tensor_child = v->GetMutable(); + + ScatterUpdate(dev_ctx.GetPlace(), tensor_child, &index_tensors[i], + tensor_parent); + } + } +} + +class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { public: - CondOpProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker) + CondOpProtoAndCheckerMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("Cond", "The condition, which is a bool vector"); AddInput("Xs", "Inputs of Subnets").AsDuplicable(); @@ -41,5 +201,5 @@ Out[i] = subnet_t[i], if Cond[i] == false } // namespace operators } // namespace paddle -REGISTER_OP_WITHOUT_GRADIENT(cond_op, paddle::operators::CondOp, +REGISTER_OP_WITHOUT_GRADIENT(cond, paddle::operators::CondOp, paddle::operators::CondOpProtoAndCheckerMaker); diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h index b776f8ccd9..27a6e9e3c3 100644 --- a/paddle/operators/cond_op.h +++ b/paddle/operators/cond_op.h @@ -19,22 +19,19 @@ limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/operator.h" #include "paddle/framework/tensor.h" -#include "paddle/operators/gather.h" -#include "paddle/operators/scatter.h" +#include "paddle/operators/net_op.h" namespace paddle { namespace operators { -using namespace paddle::framework; - -class CondOp : public OperatorBase { +class CondOp : public framework::OperatorBase { public: - CondOp(const std::string& type, const VariableNameMap& inputs, - const VariableNameMap& outputs, const AttributeMap& attrs) + CondOp(const std::string& type, const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { index_.resize(2); sub_net_op_.resize(2); - LOG(INFO) << "Initialization Done."; } CondOp(const CondOp& o) @@ -44,87 +41,14 @@ class CondOp : public OperatorBase { PADDLE_THROW("Not implemented"); } - void CreateScope(const Scope& scope) const { - auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE(sub_scopes_var != nullptr, ""); - auto sub_scopes = sub_scopes_var->GetMutable>(); - auto& sub_scope = scope.NewScope(); - sub_scopes->push_back(&sub_scope); - } + void CreateScope(const framework::Scope& scope) const; - void CreateIndexTensor(const Scope& scope) const { - auto index_tensors_var = scope.FindVar("IndexTensors"); - PADDLE_ENFORCE(index_tensors_var != nullptr, ""); - auto& index_tensors = - *index_tensors_var->GetMutable>(); - Tensor index_tensor; - index_tensors.push_back(&index_tensor); - } + void CreateIndexTensor(const framework::Scope& scope) const; /** * InferShape must be called before Run. */ - void InferShape(const framework::Scope& scope) const override { - auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var); - auto& sub_scopes = *sub_scopes_var->GetMutable>(); - // auto& index_tensors = - // *scope.FindVar("IndexTensors")->GetMutable>(); - - for (int i = 0; i < 2; ++i) { - // Create two sub scopes for true and false branches - // sub_scopes[0] for the true branch and sub_scopes[1] for the false - // branch - CreateScope(scope); - - // Create two tensors for true and false indices - // index_tensors[0] for the true branch and index_tensors[1] for the false - // branch - CreateIndexTensor(scope); - - for (auto& input : Inputs("Xs")) { - // Create a new tensor in sub-scope for input-type tensor - Variable* v = sub_scopes[i]->NewVar(input); - Tensor* sub_input = v->GetMutable(); - sub_input->Resize(scope.FindVar(input)->GetMutable()->dims()); - } - - // Inputs that do not require tailoring - /*for (auto& input : (*sub_net_op_[i]).Inputs()) { - // weights are located in the parent scope rather than sub scope - for (auto& var_name : input.second) { - if (!sub_scopes[i]->FindVar(var_name)) { - sub_scopes[i]->NewVar(var_name)->GetMutable(); - } - } - }*/ - - // Outputs - for (auto& output : (*sub_net_op_[i]).Outputs()) { - for (auto& var_name : output.second) { - sub_scopes[i]->NewVar(var_name); - } - } - - // each net calls InferShape - LOG(INFO) << "OK 3"; - sub_net_op_[i]->InferShape(*sub_scopes[i]); - LOG(INFO) << "OK 4"; - } - - for (auto& output : Outputs("Outs")) { - Tensor* tensor_t_out = - sub_scopes[0]->FindVar(output)->GetMutable(); - Tensor* tensor_f_out = - sub_scopes[1]->FindVar(output)->GetMutable(); - Tensor* tensor_out = scope.FindVar(output)->GetMutable(); - // check output size should be same - PADDLE_ENFORCE_EQ(tensor_t_out->dims(), tensor_f_out->dims(), - "Outputs not of the same shape"); - tensor_out->Resize(tensor_t_out->dims()); - } - LOG(INFO) << "OK 5"; - } + void InferShape(const framework::Scope& scope) const override; // Set True Block void set_truenet(std::unique_ptr net) { @@ -137,74 +61,7 @@ class CondOp : public OperatorBase { } void Run(const framework::Scope& scope, - const platform::DeviceContext& dev_ctx) const override { - auto sub_scopes = scope.FindVar("SubScopes")->Get>(); - auto index_tensors = - scope.FindVar("IndexTensors")->Get>(); - - std::string cond_name = Input("Cond"); - Variable* cond_var = scope.FindVar(cond_name); - PADDLE_ENFORCE_NOT_NULL(cond_var) - const Tensor* cond = cond_var->GetMutable(); - - // Step 1: get the true/false index at runtime - // index_[0]: vector, contains all index for cond[i] == true - // index_[1]: vector, contains all index for cond[i] == false - for (int i = 0; i < 2; ++i) index_[i].clear(); - - const bool* cond_data = cond->data(); - for (int i = 0; i < cond->dims()[0]; ++i) { - if (cond_data[i]) - index_[0].push_back(i); - else - index_[1].push_back(i); - } - // put index_[0] and index_[1] into two tensors: - // index_tensor_[0] and index_tensor_[1] - framework::DDim dim = paddle::framework::make_ddim({0}); - for (int i = 0; i < 2; ++i) { - dim[0] = index_[i].size(); - int* tmp_ptr = - index_tensors[i]->mutable_data(dim, platform::CPUPlace()); - index_tensors[i]->Resize(dim); - memcpy(tmp_ptr, index_[i].data(), dim[0] * sizeof(int)); - } - - // Step 2: collect data by calling gather - for (int i = 0; i < 2; ++i) { - // i= 0/i for True and False branches respectively - for (auto& input : Inputs("Xs")) { - // find Tensor - // Tensor* tensor_parent = scope.FindVar(input)->GetMutable(); - Variable* v = scope.FindVar(input); - Tensor* tensor_parent = v->GetMutable(); - // Tensor* tensor_child = - // sub_scope_[i].FindVar(input)->GetMutable(); - v = sub_scopes[i]->FindVar(input); - Tensor* tensor_child = v->GetMutable(); - Gather(dev_ctx.GetPlace(), tensor_parent, index_tensors[i], - tensor_child); - } - } - - // Step 3: run - for (int i = 0; i < 2; ++i) sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); - - // Step 4: merge output results - for (int i = 0; i < 2; ++i) { - // i= 0/i for True and False branches respectively - // for (auto& output : GetAttr>("sub_outputs")) { - for (auto& output : Outputs("Outs")) { - // find Tensor - Variable* v = scope.FindVar(output); - Tensor* tensor_parent = v->GetMutable(); - v = sub_scopes[i]->FindVar(output); - Tensor* tensor_child = v->GetMutable(); - ScatterUpdate(dev_ctx.GetPlace(), tensor_child, index_tensors[i], - tensor_parent); - } - } - } + const platform::DeviceContext& dev_ctx) const override; private: // sub_net_op_[0]: subnet_t @@ -216,17 +73,5 @@ class CondOp : public OperatorBase { mutable std::vector> index_; }; -/* -class CondGradientOp final : public OperatorBase { -public: - void Init() override; - - virtual void InferShape(const std::shared_ptr& scope) const -override; - - virtual void Run(const std::shared_ptr& scope, - const platform::DeviceContext& dev_ctx) const override; -};*/ - } // namespace operators } // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 3eeae856fb..34214ad2b3 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -19,6 +19,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/cond_op.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" #include "paddle/platform/enforce.h" diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index bddd4d8908..1469d207d4 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -217,7 +217,7 @@ class __RecurrentOp__(object): class __CondOp__(object): __proto__ = None - type = 'cond_op' + type = "cond" def __init__(self): # cache recurrent_op's proto @@ -227,8 +227,8 @@ class __CondOp__(object): self.__proto__ = op_proto def __call__(self, *args, **kwargs): - if self.type not in args and 'type' not in kwargs: - kwargs['type'] = self.type + if self.type not in args and "type" not in kwargs: + kwargs["type"] = self.type # create proto create_method = OpDescCreationMethod(self.__proto__) proto = create_method(*args, **kwargs) diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 6b22c00082..a2e3e978c7 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -27,6 +27,7 @@ py_test(test_operator SRCS test_operator.py) py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) py_test(test_uniform_random_op SRCS test_uniform_random_op.py) py_test(test_recurrent_op SRCS test_recurrent_op.py) +py_test(test_cond_op SRCS test_cond_op.py) py_test(test_sgd_op SRCS test_sgd_op.py) py_test(test_gradient_checker SRCS test_gradient_checker.py) py_test(test_lookup_table SRCS test_lookup_table.py) diff --git a/python/paddle/v2/framework/tests/test_cond_op.py b/python/paddle/v2/framework/tests/test_cond_op.py index 1fe5889b7f..37177ae0b2 100644 --- a/python/paddle/v2/framework/tests/test_cond_op.py +++ b/python/paddle/v2/framework/tests/test_cond_op.py @@ -11,15 +11,15 @@ class PySimpleCond(object): ''' def __init__(self): - array = [True] * 10 + array = [1] * 10 for i in range(1, 10, 2): - array[i] = False + array[i] = 0 self.cond = np.array(array) self.x = np.ones(shape=(10, 1)) def forward(self): - self.index_t = np.where(self.cond) - self.index_f = np.where(self.cond == False) + self.index_t = np.where(self.cond == 1) + self.index_f = np.where(self.cond == 0) y_t = self.x[self.index_t] y_f = self.x[self.index_f] y_t = y_t * 2. @@ -36,7 +36,6 @@ class PySimpleCondTest(unittest.TestCase): def test_forward(self): output = self.condnn.forward() - print 'output', output def create_tensor(scope, name, shape, np_data): @@ -67,47 +66,50 @@ class TestCondOp(unittest.TestCase): self.create_cond_op() self.create_sub_net() ctx = core.DeviceContext.create(core.CPUPlace()) - print 'running infer shape' - print self.scope.find_var("SubScopes") self.condop.infer_shape(self.scope) - print 'ok 2' self.condop.run(self.scope, ctx) - print 'ok 3' - return np.array(self.scope.find_var("Outs").get_tensor()) + return np.array(self.scope.find_var("Out").get_tensor()) def create_global_variables(self): x_np_data = self.py_cond.x - create_tensor(self.scope, "x", [10, 1], x_np_data) - cond_np_data = self.py_cond.cond - create_tensor(self.scope, "cond", [10, 1], x_np_data) + create_tensor(self.scope, "X", [10, 1], x_np_data) + cond_np_data = self.py_cond.cond.astype("int32") + create_tensor(self.scope, "cond", [10, 1], cond_np_data) self.scope.new_var("SubScopes") self.scope.new_var("IndexTensors") - self.scope.new_var("Outs") + self.scope.new_var("Out") def create_cond_op(self): self.condop = CondOp( Cond="cond", - Xs=["x"], - Outs=['Out_final'], + Xs=["X"], + Outs=["Out"], SubScopes="SubScopes", IndexTensors="IndexTensors") def create_sub_net(self): truenet = core.Net.create() - scale_op_t = Operator("scale", X='X', Y='Out', scale=2.) + scale_op_t = Operator("scale", X='X', Out='Out', scale=2.) truenet.append_op(scale_op_t) truenet.complete_add_op(True) self.condop.set_truenet(truenet) falsenet = core.Net.create() - scale_op_t = Operator("scale", X='X', Y='Out', scale=-2.) + scale_op_t = Operator("scale", X='X', Out='Out', scale=-2.) falsenet.append_op(scale_op_t) falsenet.complete_add_op(True) self.condop.set_falsenet(falsenet) def test_forward(self): print 'test cond op forward' - py_output = self.forward() + pd_output = self.forward() + py_output = self.py_cond.forward() + print 'pd_output', pd_output + print + print 'py_output', py_output + self.assertEqual(pd_output.shape, py_output.shape) + print 'test passed' + return 0 if __name__ == "__main__": From 2c8cbb8c3fd779a9993afd623e4ee53ed047a4ec Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Wed, 13 Sep 2017 15:39:29 -0700 Subject: [PATCH 077/115] if_else_op.md --- doc/design/if_else_op.md | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/doc/design/if_else_op.md b/doc/design/if_else_op.md index 7370c2a24f..954a19c073 100644 --- a/doc/design/if_else_op.md +++ b/doc/design/if_else_op.md @@ -1,22 +1,4 @@ -IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has M (M<=N) instances, each corresponds to a true element in `cond`. - -```python -import paddle as pd - -x = var() -y = var() -cond = var() - -b = pd.create_ifop(inputs=[x], output_num=1) -with b.true_block(): - x = b.inputs(0) - z = operator.add(x, y) - b.set_output(0, operator.softmax(z)) - -out = b(cond) -``` - -If we want the output still has N instances, we can use IfElseOp with a default value, whose minibatch size must be N: +IfOp should have only one branch. An IfOp operator takes a `cond` variable whose value must be a vector of N boolean elements. Its return value has N instances. If cond[i] == True, input instance input[i] will go through true_block() and generate output[i]; otherwise it will produce output from false_bloack(). ```python import paddle as pd @@ -39,7 +21,7 @@ with b.false_block(): out = b(cond) ``` -If only true_block is set in an IfElseOp, we can have a default value for false as: +If only true_block is set in an IfElseOp, a special case is that we can have a default value for false as: ```python import paddle as pd From b3f6b5a9599c548324dd02e225db6a9e1a3e402e Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Wed, 13 Sep 2017 21:09:15 -0400 Subject: [PATCH 078/115] design of RNNOp (#3727) * add rnn design --- doc/design/ops/images/2_level_rnn.dot | 56 ++++++++ doc/design/ops/images/2_level_rnn.png | Bin 0 -> 52666 bytes doc/design/ops/images/rnn.dot | 87 ++++++++++++ doc/design/ops/images/rnn.jpg | Bin 0 -> 44320 bytes doc/design/ops/images/rnn.png | Bin 0 -> 185148 bytes doc/design/ops/images/rnn_2level_data.dot | 75 +++++++++++ doc/design/ops/images/rnn_2level_data.png | Bin 0 -> 68929 bytes doc/design/ops/rnn.md | 153 ++++++++++++++++++++++ 8 files changed, 371 insertions(+) create mode 100644 doc/design/ops/images/2_level_rnn.dot create mode 100644 doc/design/ops/images/2_level_rnn.png create mode 100644 doc/design/ops/images/rnn.dot create mode 100644 doc/design/ops/images/rnn.jpg create mode 100644 doc/design/ops/images/rnn.png create mode 100644 doc/design/ops/images/rnn_2level_data.dot create mode 100644 doc/design/ops/images/rnn_2level_data.png create mode 100644 doc/design/ops/rnn.md diff --git a/doc/design/ops/images/2_level_rnn.dot b/doc/design/ops/images/2_level_rnn.dot new file mode 100644 index 0000000000..a498e882a3 --- /dev/null +++ b/doc/design/ops/images/2_level_rnn.dot @@ -0,0 +1,56 @@ +digraph G { + + rnn [label="1-th level RNN" shape=box] + + subgraph cluster0 { + label = "time step 0" + + sent0 [label="sentence"] + sent1 [label="sentence"] + + rnn1 [label="2-th level RNN" shape=box] + + sent0 -> rnn1 + sent1 -> rnn1 + } + + subgraph cluster1 { + label = "time step 1" + + sent2 [label="sentence"] + sent3 [label="sentence"] + + rnn2 [label="2-th level RNN" shape=box] + + sent2 -> rnn2 + sent3 -> rnn2 + } + + subgraph cluster2 { + label = "time step 2" + + sent4 [label="sentence"] + sent5 [label="sentence"] + + rnn3 [label="2-th level RNN" shape=box] + + sent4 -> rnn3 + sent5 -> rnn3 + } + + + para0 [label="paragraph info 0"] + para1 [label="paragraph info 1"] + para2 [label="paragraph info 2"] + + rnn1 -> para0 + rnn2 -> para1 + rnn3 -> para2 + + para0 -> rnn + para1 -> rnn + para2 -> rnn + + chapter [label="chapter info"] + rnn -> chapter +} diff --git a/doc/design/ops/images/2_level_rnn.png b/doc/design/ops/images/2_level_rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..0537a75beb175c0c284717421f7aa908da2a5038 GIT binary patch literal 52666 zcmZ6z1z1#D+doVUB^^o+-6$y_Fw`g|4T=RwgVN0q64G7LT_OqyNH>zwpmazMC^d9{ zYtHk&-~WBib+J_FqT9 zw|;>bQS5j`z8wD^olYh3--jq1$a3OV{o2df<*@$y!xkp|??YH5SfcR^f~QFkWxxOW zSg~v4{_Bu(o4lW5JT`7?{QtdP^uMFn`~Q82Kmy#KQ`?gMzwbOJ?jh~JW{4RR50g%X zgyvH9Ul)v(*N=~lN1{i^a?|T4EJyp16+2%siua>|1F6^KjB%L|`*#=C_uRKsG|nugdl z?RV!1du){ys}5(uA{s)d6EFz$?)ei?a`vi=&+9$K{UH|r>&4rfmPM#B&SjTD^tn%OUk3t`hAsPNxc(6xfL=}9${XhiV!T)`hu`%Gdw~L7rNtsRv zDHAkd;yinRR>Y#0(S44EROU42@^D=9S~gab**}FchosNY?_hl}L446$m zX)%);vixmXeK+?OquZ4Hub1|)&H2XKrh_3Vg2Hy21V1UP36{-_JN(a>Ei)X0tKUv) zWO=Tq9wuBq0*@f5t%vCyKdJPdpZE3I#*-nA{kA@?Z2rcbhEiiEaD?!fs`+^4Z9*vP zFVCVls;cE57DVb9650zMD^b1c>@U-hz^XyxC?@+!;GZ}Pb~t~~nnGCeB#Wah2F zvE3PFhGTa;1`)=@ZOhOGlD-cXmgmjaGS2I250E*5W@o51L#4q}J@cZ=aoQWaBifew#E*=#HZQ;kl8?MdN51N(y3jpNG#B zza77q=xFO_f~4z~88rI4J&pbJ*k7gPvX=8V)8Ik}v!ibOut{PUeJ|9(9LsoxX0yjW z!vYgnO9YjkYO2d5%liy>Dx6OBEpI(Rj>09~{vutgBV!=E0di$0i18wIw)ECl*2iq^ zl$>g@f67}8pBlJ6DS7%O?X|^^mWv&}cUMQrYH6ZIP zdUaqnAHS%v;ki0L647jLNT)XRXjc=dipi98{q$Pqs%F-XPoViQ-uvok(n1{d?bnOP zVCI=TPv+fmZrAIV8(PpzErbj|a9&TzvKY9#@TT;O8jgF`@MT$IeC-0_F{fshO(GT@ zwa@vXkfK4EC>zOPSq>gOj9-hnw7jJuj4s^l@^n8Sc=r5Hxxlc@#pg|*_Rx{_bo;C` zj{~pY9YGc^7!mI>bKSOQDt8+G^ncXOU(JiS=ajYVPN7WYf1B@VZ!8O3t@)*C)cweN zn~SZj)mEb&pXYx}u+Th`pYOasB4pYVt-i_Fbh$B{Eieo_$=DQPlyJxqwHjfLvMf8# zbcGXxQsx_q=I{M|_0xZBeB zoGoPwwTclkHev9r;$bIqy3w-N z9$u1vxYlPivSkc39;{JqPFEE#i7V8IuAKHK@vZf;#Zb|RX)$=BLngTB+^oga<{$bO*CMFA;ib)?N88 zYVE$bU~{>aU*&m44#JWR5hbw1^N9$q!3QuSN!LiDOAd_G(R_CtrhvB&cj1DNL#y)N zUy8GfV1nr+qi-(;zJZNJ!@%o!+A`g=p^DX2ZMaIm;XrXygWmbG)|JLQ@-$D%CJ}n1 z!8n=;-TK-C6=|(1oQ^6R3r-)?HJC``(bd^EbZE!nyqej2%=qKzQA|_nTTRK$f>+6V zbuo}|YoofK;xe4#+S-<0<$pBrb&aYmr>P=7BFWgfMF#C$lzD99^wvdMY(OM#ft5NW z(Vy*0v6tu5zUlU&v76_$?^}yDq>e?~1hv}pugztpUux1)c>_1!9zUCJa2*V#dS$^T zeSNxO;lTj!5_{bbU2X@D4@y>m2OC2=)?e&24kpSSE=a;H(tbZP9#v8* zv|KmGID&|ds4(-@#9mX;=$;AMK5{pq>ESuss%;)V3zHZ*h$Jsbw`=H&M3}066_NN; zLvMfs(`iPyPCIlG>yBEn4M&DIUmedn9gZp$M;{Op#L78BGLFdIpMD~dUkJC3SUMl@ zOO;Al!-{by+Gmwx<1t)I`ibw59%Q4c7IQBQsx7STz28M$#0rx9wq3sHr16XFog)$5 zg9;oA62!a+1H3^rBTr9WRqle9j^8xsuC|yS2T!A<(oB{KZh7C;GXj_`wV= z_~^gc1eg3Z<=Ju>Ja*FCHcsu%qW3}6>j<3B5f-6Lm}M&A&r_2=rNj=tI+9dvhb zDVY7=5ll%M@yB?d{C-t(M1NvT%$mWM({npr$7EQ<(m|$C&mf3gATru=oMp&zUkY59 zFV@g`%1!@a)!9x&Ac?#k>f57?`bWj&zdSmRJj7N;JnyzDC4lcit-dh-6Sw^EES zb*)C|BZFp!{K93tPaQULC)`CPE4b;#&jhh>cJ-V$hVB!VJ8D~`2@zL(abwQixT`-M zxH_uDlmrq(;quUBz@z1Ko64}y_3LS=#gSM62_Ywo?-nH-DyWg)o&{uO$Oqz1!!OzW zzf)8YQ?(u{gFCr*{@Vk`P)PP{m2yR!GTNN1vap`YAgFF>lyZo9Nif(SqMRd&mAZR0 zX42=I%p?iX>{oLxk%Ty|y^UCs4lpFOYnn6Go6|7pcC3ymQcn*qClX=?v4R_9mJkYk zdKl1=Fk8Fmo1XRp7jE7c&(V!1TlSOi_rL=Ncig|!s|>%}AJ9Cq5@mjEK(1&exQ)}? z;J_BmvYa0+sY)WfC+DobE-Ou_cw2`2{CJyhim!bqIri?LWtL~rzDWp=7>%bwKK@R9 zTnMOBq>k`6tj1$xudR-$d;c0}7<_KcaGSBN^w?h(CH2sh-hZEp`;odxN?;A_vFw-= zN)Q^tk5zl8!LcQ92cmI3@(ct-64G9x^p{G%RB7&)J>}@l4P2?a&T(ZxKrL)M;`6u* zzJ|p3->3TRK>~*<#Cy?Uw&Zg(DkB{DVn$*lzVUKn1ZH04FJbN^2dO^N8tEvz4nM7LDYs;(VOHl7!s(Zu{{GO`TsCJKB-RG=$yUX^yl{rSU^Dr^qb9PYWAL4(8ZAC zAAX!+$(g(L3f@HR6ms<)Rcg6QK_{sS!%Jr_Scv(Y5~IDEg5pU!gXIYp#0}dKsHb`6 z$KRAmoh}95ZW_$J_ot+KeafzJyXdfi**VhFkn^)Fq60reo4B)5T_!)j^Q4gxN2+d5jt!jliWdxy`tk+9rEvF~avhj*~H zvb=V3p*)bmm4mKofw?XEn1qxmr10V&1a8QfqJN2il?W0x$yy8-9*^wcD}bxoM&!UC zsRPF$ZggNjYO*rYgX_4;f1vtH#Th9k2!Uhd-%V+a*|hb=9jNcxt9CZynmibJ8Brok zv)1qCQzmMf{~$6vS0AUjM~oSIc>c<<>$b%%hs}$6FiizT^HYCNK6&DV@Hq#yb~0(q zUXnvB2E;1iOof;(%MUI1S>8rHKctOrNVh^iKAT1VYUo%EN#>E)8C;ptxSNp`@aLfL zL2{YodWz{lfc4+m_us-pDRn(*mgRF&j)PwMP{w5RuqD+m{4p$Ac+42!vZm^Mk8%0Y z%kPh&k);w?hSdVd`7(+1T%*isk3Bwl+nH)d#`|>Q_|rt}48utlb*%0U8(rU0F}rnn z;Nxx$Mi$R2jVez=8;xD+&vC5tuupGn%gho==ot+{JJo&sq>5k*}ww}oV2Sa z@q$Fa6+bJU550Gm*v8G(ucS(lEu|GmBC{FS~-{&wJf+xAQlwj<58oN?1)yl$lYGrFvDV zWaYvHufzhaJ+Qg?5n|ay)cGa)J+@-!$cbG^>`J@;_SW@3XUfsTs{aK|q+{A;1E}y0 zX3eAcCkypXKQ`U!d-o*%g=tv{MwC^}zKV?h7Z#Pnst4G=|50$yzc4L@il3qmYeZ%$ z0ObFd0_c4Nj^7iO*7z5uRR)lm;`!eJ(SM!x9ysn4_uKKm24VJpNC6;ouZd;RzfS9f zhSCv4F#x{*UtWbh9&9-e??{QL|Le37;P@*ukBtA~LzJ20AYisCr&6u|b=p_p_!?{H z%D?6yj9m*BS3eZL*%d+kv*W+h>$?E&CO(R|LlC_(>}%BGU4IF%Wu?pKwx=S44x0E+*ed1xnGQfiA6l-LPUSaxR8H{~;QkqEe z+-v*W;QjrAr}ZU8(t38&=Sfwd{#IHa%6xkmY8x|3i zCF}{v!v~f_8Hqa;h*f})#aw4Cm>8KxSU%2661*oDA_8vKXvJF8W02zzedd^C2b1g5J1a0%U~m6QA9URa5EyQxnRkzlh(M;Ce4 zw&WAGG>RkG_=3-Vy>eEUm%=#5&SbOVT9S%{(#WPFASdQE@YibX9|Kz27D3e^kieyR zr*|9R5D2m3`>mg)ghZT3;@LaxC&c1&T3eIl_GQ0i-{EUPFMn@6`h44#-B=U}%CF=w zO0Kll7@6~48n~9spU+j%+PDB@$5CoZk8_b3p2hK7LM$sJ{Snw(QijhWwJ^>E3)%?}fR z&)Es2MwP{&H&<{pqm)Td_fLHzAM?XrY*J}vbV`b1LRPAaO@Bv{mOdWY?9`&e?1x*= z#d}jruDm9jcEgt$t;bq{uyrAO{j{BWSwd2xNoGmpVL`u_E&b$!Px5}UA$xffD#B#(?;XFt7FOy`{gDt%meOge}l{>!-ekPI>%6O0D$gls>&R0yL3o1Uj0uKz; zO>duaOU7+Yh(jxXC=Z-?Evj`Y!NXrYO;jaE zc0|k1_9Lc$gbKW^E_!3YqQP+OZX|R14fKpPP zgU@X^SNgra*+2VjdX7_Iv}P&X@ZFWGNPwvn*giHFnj1GiM}7K=$wrcS7hr71Pj^kE z(uC5!0o+kM!-`}!=~eeb#imDPTS{9SwJG!7N8-kI7^TRcZ1K*osKOXl+?54C_GH86-?Neu5#6kK>*~Ev}3D-pd%%ESr{_+6L2OIDMF@+SuNlP%M9de zEOaQ0vk65VdF_t-bz%d44S|yH8{SI>9p*&mmgNp@me?9|4p&*isTH^s#1)hj^c5@> z+!aFdb_BWeAM=p@{s9--CagX0);#-#`(6@+W8%(xLCJ^okKQC3xM`!o$=LX8;*QP! z!M^|ZC~0PaUP@H3bN~z$*%XM`*$UwXLj+Q9M^wJx>f#m>;2VnPcsv2vNn^vE2I_7- zz4<={iqQ-uhuo3LG2u3^5X!IS0eSWYz#Y*j#(DbBBoa#)M`Mo#c;>OGrd=-L1p5PZ zv=i`ZqwWYKKI0J%k_>3T>cn@&!^PgB*rIyI$s+g99prt*HnH_WJOt&<|!Jv4OA>r!L3M(}eDzu4pxL2wl zlfMmO6Mu)BbTl4MV21juaQy33CePKl^`Np1)F-f9jbK-Yk22-vYWdCr#5-l* ztEDS2*($~~J`za@eomD0bTEoH?t^#HtDyP1G3fk21l@rV0ME83h4FiQM;*4V;0x~U zJ5$^xfh3a3)+ZZZixfP$n@$51^=ad)0_dAGo@cnM57`xCBwF?iu?cyQb>4!}|k?)6h zfoWYUn?d~fGU)Kd?aRx3T^T2W1nM5+_81x=1#9Xoi?|v$_p(D7KEsEM<ddpoPal zYD}*Z>)0#FKq+^ujTh@4Jcj>zhS5c+XhcrD$CeJdmRs#K?5aGSdpwfwU9R zT*Ze7MzXKs2uF>Q^h)@OcxbjAe^k&ny~Vcmh6;O)B}HfdF|Im>i4?N}$Fu_X;3IB7T##cWL|QL0&SloNwuJ8fn~w(W^GRN=5Vjaq`-ak`B+E?K zUtx0-wEb{7g(Z*3NfcRi-1|-0So2;{RL7Dm=HF6CCh>ffBz>WrCw_})(}$3pDE8*F zx|z4-%>z{PeC18?Y$KSVWQu3)bmMvLP;;^*^bgs=52xr=5aQ0Rm-{{G#`=1R5`&t= zhTcu@Sl8blZ_n!g{PJ?rBX9qjpsP#SlRG+jQs0Y*RGv;{FAqW+%+E$j)DGeq9rApI&9 zzbD+aUQF}GXFb)j<8B;8mhr;{;4f?uPRv@yIQ{TvtK->!lB6N#?D7_)gaovTGqfEn z7>pzaB$?2n+=rg{#g`)4TS4d_45rY1Mx^dfSvz7 z6@OxgR)xN&mELNuYDXo5-)992md04)f=m_$tdZX%Ya^QkRm!F2*aU!U$_d->HKba_EFtVt9)lLI zmNo9g11=zZA?|@VM;1Y3Ta@?$H$0e#eBO%-oe^FScLj2j`Y*d3RS2%Nt7Ea`$3E%h zAL}QR!9+SCU65`Qo6KPHm1lOcZWl?{spMwSS-JFPzq{rsZ$gDzDY4txt=yNPkfY7v z*Wv^sAVs3KmOqM&%KBuOKo@Gmi!B~sq>;&sFcm_pU4`#%uEiKHBQY>V?PhB21uk}5 z-@Wb+Ly`Fh49C3}TxC(x86#OS$E6MW%o;^B%7T$eGEc%7)4=#JM;anuUv2rGh15u<^r?DX(`n!b zJj*xO_v5=`m;~%w&Wh%|4+dY!F7XH9`|M7XBv{YA@;zItH0io!q^sz;ce|V`4v&Y_ zj)Exly-glwbhXgK7>r-R@;&g${OAi>Zc^W|BhDS>wRYR^^19_))}whrwEHezq~(Mk zFckB(6}kCZvDez|JJx!b3Zx};=|9Ga+Dr+ewZg-{)8nbZ&wP}2T?<1*h9uxXEPNf@ zbTz=Y)~?yG(8ZVt9;#50v7Fej^nsdqvJxtd9Z81tK%URqx6V%rgWCz8tm^$DEw>TE zcz7+20+K=hW^wjCL`-=_g6`NsF18W4llYyeAL`uniRAsa%-8k7A3gdt>k$|qGN6ZN z!47l*HABQE2U-^t9w$p*L?rY-RHFnKq402XyvO4cs?-78V%(9;0`qFS?Wf-`;>s{+ zw?uFOk}Q$f1?F;%TKnO{$J-xhZv~5?LHPwYXM^W38rzq|r%yMWVpkS3bAP6*Mde~3 zRV4p|=ES%J? z-*sdgCsU@RXsPfye{`^EtM8Q>!&s1X%d`x-nmB|D3V`o{WD|=g%Yc7NL zCp~T(BzbU?)|(G@sK!fANmHMDgY2;SxoU{l<$2%6yYo%1m43SHKV^+&E0brVqawR%z@F1PE@#Jb_a^-mn?rNoM+!o0}BI zF?-P;ubL)3<-Kw7e!!-(k8=!YhTh3Z-es{QeJ4Wr*s)u8#~lQMs{Oq;hqm?m=8f5?*ejnkvm|y;hxJ+S;kmYwl*KNa zW4C{bkeikiWJJSL0g@*kxBdSmcK(TU^X68;jwR0Xw-{_{}!e%w&gXWps*QfRdEfpw$?rB zqBcr7Kv!O)#LA1n=}J}Ox zc?*mhwVuqtYCL^gHc|+^Pw_JCk18ieb)l#~v33805O9U3YVD0&r;I~9??!XBcuPX4 zSG{5yM_FP$C%9JiH6MuCl1o=<&AU<7;>7RH>mnbBkqF3YKjju_1xfMZq8}AqB~3Nn zQgJqTnK*#mb$L%Q%mCmmReO>rYKLXzKkvO(55(z?AVxUWo`vI*x;gxUy*W;lyxi-I zicb|Z4tP)$ax=KQT{Al*de4DO6(?NzWUO#!oPM=%t-ZAFD3SapU_mCF_sjwEeFM~| z`#p2gT-z3;zD+mR7o0a!M^iC!p1pV9CjC>T1F+J|x*TM4P7bi1*f%h2QQkGs@h41U zU>3e`LxBcl-o%4>zPDp}N^f*}hGZ^moYhV$i=sEJ%nA`in5!doP|nR zKf&U#Wh#_8N3uNucsr4V?=WK`3hL`W)Z=`G56X7Q0*ck#e`rnY2k4ZXWC+;_QU2sD z`F+r9E{a5+ix=g!=yOttS}L+@3~L-|>=|^{2}2wP)bi=@q*z_IvX;}=wL1v9+I|rDsI0ZEs|t$ez;lLb>}7iSg?d( za}na`U7SrtJMKE7F4zV4m1Us5V`o)Am;#IZ``I9f3tsOC1*C%@fL&T{LI(wCEXgZfRca)W6l{Z;TVBK21~l;^XSUT zwe=?%Vo-X4HF@>}pQMML6;9enN~b{U9N+i>D0uX2m2^Jhp6j1q>MxzO z&8R#Dilr|@dU)QdB!%2qtzHMu*RmwUs(WZ0NZJ4%DPSmx-mXsmP`Pr>>Brr?;=!=2 zW!7g(ImNSuzgQZ4#QMduNqFwt6%R1qMTBQglE*9_p+6#Z91Z_OLORzqH(_j6`QGeuHCx~cp4iw6HVEbL4ofv z29rXlz@QPty(ULPlRs@B$?fz7W=MW7t;>b`-dwq_FtEO4ZANI+)HyR&H9*-Md?Bl0 zU?dyxaHPUMDY3*jw3t}F(QD2-WQ*;yk%R~#uAsGu@1+d}$oZ9@&f&Qc!eis5gWSGC zC%2vNfog6d}pq{ zdz{M6S7`;au~G^f@hF&-X-8UbPbx89_bY;2Trr}8hm%U)V)u?>r(n|ehz+WNj1{cC zhnfw}ThS}kTt+;k(?Xn2pq1kv5qeT^!Ab(!BT3|g>!MDqd)?m>a70+YOw!;hwg?^V zL`&^_6gH>v@-Wlr8%!=!V_beRw`UfTGpSVwGnC4T0}@{MxO4%Gd6*Q5Kzjf2O2+oZf%Subh`AU?tZk*%kO-QI>`AzpEpfofp#h} z^x{L2s~6LEa;41#qR>+j)fUMv{Y*+N7Sz}MmuCke0`9ubwJlcI)g^gJA^@Z5638 z29RVZI5v)II$%d?Cxt)}%Fz$jX=r7Ao1W_jjS7ll&8@e;Lbl6-au6}$D2u)J+o&N` zlD!1Ar1YIjdyEA9W!4SCfoJo)-E7k{e)Bq5XkWYOZ-vJU4aq7+nQWe36$ zy%ROh|M52B(1w2MP4)FmW(ePx2@J6cgm02VTmjjZd^unv&T2S+Dp=O^yHcMXzSTh} zpl0ytgkO7mwpJW!EQ;~7-q~`6gn_#4&%6p(rrFRJphb66P5@cOvBR|_tpkrUK0Z8#b-uK8 zxl`YZxiw5DqG9161)c><7rc--?cPtxn38aTCNb!zrJp1S%voIKw!rlV8yTisvM5{_ zhP`%!_+-XpaKe6Gn6ty}GNsC>Mu!P=hCr`(b!$UKa8#E;r0uQlTKVX+9|XAGXjjUP0+2G`II$ z#@$eVz+GGtvY&JT4f(qK7vrn-;U_zgWj&4<(su?ltG{ZA{RiNQZ`Sq0dA1|eZu!%z zC}SitDcV*(K@C7q?hcTVk34JUx^}mCNY^M1H?Eyl+rKa*efs{_NB;izE0$NDUrlbf zZN5iIGMWKgsWN+2)gOn~8pRHCU8sg1FL>FDP^`%4H;rjx31WQJWSpuOY7b@BoF&RD(@xXf4>F28D;Q!f3!=KX+MaFHXPfDl@ zont2xzV$pP!mc2R#T#pnvjJ@!Lu)S2+MxfrjQOgLL_BexiSDN7*_R;TdwGg8me=nX zQet2x;$+xw;+9Q!bNDHxDQQ6iSPkZ!JL2)u`?Vt3Jxtpz+z~AeZO{F)?ir7F4wKKQBSX0AX4P;`s42;Re&3xOxYi{Z7MAWH$@ zF1kNA91$SBsxkl0vS!+Xeg%S?Wp_mPJuxuprfjJ;2OoLurHji=D6uX-4PoRam8TrO zYU1w2)}aoMdyfZ%rWud00IrXN;)}1EW&$COB6VCC<$i#%OlS|p`w>VglQNStN}-oFS^(ha}DWH34TrBcLoxUDex%!3ls1G^^Wys(K(-;G?!MT4fAAr zs=C^s(G6XIp*5F1CJx_&B){V{4j^Y#AiH+ElZ1+zrU#e3eH^lSc}P8%PQ7`<0mA$d z`&?~1pj4+JQhg=X^I$c2P4jV9>Yr2< z7|G<($j^~!hoERl`fq@ID|>hu8)7m;ZFi*3!s@_TSp2F4STak{HsZQ%0TkrX@CJl~*yl`+QqAfRQtl0ug2;%6c{#8&)+Z388MPJk>0NUU6 zopO)@X924@doK6VyBolz-ESsDaG4dl+s3%ypp3lx-(elA)h`7Q>H$;SlXNlLiE&L? z1K$G7EvrsABs%{o*$Lj`tEeP zFZvXtjYHqIK9zm~j3yuKBon@PC;ErxVIB#i7% zo z$^tEFEiNtbSec326SNg3r|zr7lD8XfOWtX?BPm|SrWDklh>Fc7qq}e151)u+e@v)h z99x4XzJc6dnxHB3+~@Rn$g#_^Y&(`(8*XIVr+CIqEhs9&I|UWX4W29Zop2cdpH%H% zSxNI5(+vX0VR4o>sxSOLzRiM&>{o8aDCxrqB+N8%A@K=X(%Awzt+%P#4RcZ;uzpx(i|uO1NUu3#x)C zy?i;Q9UtK98JBC4{7%2H;|x2KwgXg>?FRq_e= z0TgLBGq?X&!JGhE>2I-5;Z4yfW?o(tnJZi0Q;Z4P3C0PQ3V4HdtyYiLB(Hher$GT1 zRu{HkZ{&gO*C7}{5_C^pC%;?R(N_nW$406x9-rZK0QmKisv*fAHnJFuV@;mUh2mEF zK%js3$gL?yguoLHjniMHeJS9F`HUT=1%IY<_3JNZon^XjP zKj<5JKC0%EuVR@-`x8Lz5tjw?N}FF2pS2OGGNz60;_$xMuFdlTvHv}c(tNByCGUid zd_FWm#^+plvBl@n$$9v(*zII~oDW0ny@7Ur!wr=gRXj)XMX;K4=p{XWcpoB1fpa66 zuAly$5SJ_V^XkWM^Wh*wEX5Kh#vhrXU9+2HNb>o|FVw1Y4I8aSS@XqnlycylYAMVT zMa5%%0CoT#TtjM`9VAwZkDHOpoj}I@p!$!D%NfdG9b=#vl5;nJSm!A8^XJy<^P;<0 zSN^Sb^9?FMPzY4km>zys?^KAkjF;5lyM!8MTg@;cAPY#c>PKQTR{53LPr}G0)_!NY zFOcsQNJDhTKsw~@R(4`l3M5QA2Q0oIF}5g)#ZZRzJ$Y=U@<(Lb`9@+>udxrN0TnSi zZkM4AIcBu}*<35^x}|q;0%*dtVIUg5s-86&jVLMvu8pF&;NY8sVOfEO9{p%QnY4}3 zoZvnAB%OneK|Fuw?u&tv{nzT%O&7nsWQf>L#7>sC3d8$S@@Gk&HaWlRMo};Nn z((6w18(7qn%3H4`)kDxOj)3Sp5EyYclC0Td#^rNP+?}T{I+s?uK*SqrxiBfciek4U zIcm7)37#TOlFXRulUe2(Se}w8gi*v8;!&;42s)95Qq zHsnPkyg9d7+gu*Kge97rQix}$@U^O7yzp6G`vG7x2^IH794ev+Cf-6KkqY+DU;(={ zU*(%?&atzRv0ocHx5&M>N*s9RjgLYXBDmGx$z|speBUBPQe6Ii6yfIq+E=XTjI8`@ zL3dTr#tY3bp^dX)DXgGXOmocV&(PK*OBxhL>cq~hDGzDVYj$&0`i0*{K1WH`5jkvB zl8|=@SP8AV5;mRd&n+_Vyo0V1_+B$`gD5V_#XMsS*u}58GIqsRe3;9WQ*7e}Su{R; zdF8L!M0tdu827|V4Z3b)l6)_Cj?>%`oisq5jn=c7l_YK6)=N~xChXpUcHYJ{{iEU&8V`{r7y;lc`$)tj^U7`7oLYpP>A*Y8`$bzl30V`zCFu#}ej> z&q$||;HOw%GrdL7#1zw&r&-6PxQnyiW-=-m*a{iZ#lt2Drobk67>4nS%Q#TU1{sko z5%>E<-%VrtLuEmNu>#4Ls%z-uF zNPk=``M08THBnb1?(x0)hl){jU4!n<6`$UBipKNiQXQ-sgPtJa&bBE591=!B%Mb1J zSkE?u+;?6@vwH?6d_7h6`;&0#9|1aG_$6(oDl)uS&koGGRk6+p+C1@(SC$(JE;GU( zM4b5SE;O&9{R*JNQ)38pz?s->d=p{^K^7eJ)FvJX=C;y{?e{svq=WD{gHZ%iP{}eC z-fRxjy<|UU9}4LOp+rqTFV>{tSfNv;8~`%M$_%6$Fi-G>ELPhd+&NafTnZ$GupJU5 zX`*K*`D-5wBC*2#7Eyuo&;uo0QYinAWC2a(ovS`h1oCf0Z#I|UQ;NN4KZES|-&9^@ z>&fWvgz3q^yO#to!KlFkuMnmXG%^r&TzZTEKJ)Sxb8!C4yO|i7Gm*sQCjNENfZH3rlAFGUROy>{2W3 z#Y%cE)#z?d$KHVZ%2raw;Rc<-#C(#&1Fs}OH|nM|#r|8+T9>Gjz}W}70i!Nd{}K&q zNn=Xda%a)*;%9^KOwz}7(<}f*9>K{&-WZPE<75n_c-4(Yhu;dop&K&?KqH#Dz&~8) zEv@K#B{}M`&_@wtS)Y{oX5dK{P}k6%iCFp8r+dH6ds3_MN8pw)e2%-j1NPno5+Hb1 z7~zhPzgfy)9C3Q}uH2QMn3Qc7zz*=pf>FIKCi-XvttF28OM@=ll?5-r@_Zd4r8Gic zeqmL_Rp!y3$z-lAj@OBo(Qa$na$40Hvn4ssqmVS#+sWznd2Hp+3>Y+n2DlCGFK#W9fFGLjue9TPtioB58u9XOYKPp%u7ru{#O3-ZE}wPEQu3MCw=8- z$!M4G?=0mVkNr;$1g`^iBN@htJ$^f#0&yc!x76okUO!r58Q&8y28Gt+MM3tfw{NS1 z$a+Lwelc*gySV6K`@G_{Kdpr#=`FY&7ESOC;FP+KC)QfxEtQ!YAcDhCSRv?mr}}%* z6`PK6{E$w_1ak7QKPuFcRS^SM2e1gprd+vt{^yksD<1U&>6U%Z%pExTXjUrQY^5_X z(2jYnUc0KP5?!v9o7Tt4_r^-~1sVv3u!oKZ9MyH8%F?$zv8)~%YYQvJ)XVU=J349> z%-MKw&One&Ebq>|!If7|Tcr1`_9d)+sa=SNlRRL*A(l?(MvwetE=Xh+^y7Gf0(vmR zi#q~Xj(9W*MA)?9=uF1Y8CrAQl$2_5?J83sCht)4TS1HOu(4;+qjnuI*;YW z7QLOV#|px{0mxnV+9~zq3`6_szfKUY(%RiCuV1J01pD6_+7yI#1NeeoWGr`%Kcb59 z65)xTh09mxoP2b4#<~!9Z90hI1Q&Tiv2q#crduy-1JHF6V2%q|{TN#LHz4AU2|Yze z(W5t<9$5MU-7Nt`rGCrVI{oHoeh9kd{LEf?~`Z#tD)@aCQXO;#` z>Qo`qV8`7nK=Jg0E~}}1awUHfi9ykLEL><$51MORnkIOgSWz+q8U_TA#_DhUO0~FO zYScdd1ktl=**-4-+6edl1(&gA0UF_c9{#(~?6r1B()sBqq{30U=DcDkm{+pOQ&`;h z(nEv|Hl4@4!P!#V+)I43dcaCh7i+pp3FJNq-Mv|Hvv#sTI$8O&H6Xy zy-&OisX^gd%$bT9$WZ6Cm-Spam4D<5S6ehPCC4hwe>`l&Yz6l0!I>@K>K)~0@q6il zOW^5|oPR4Dcr*!~IBx2Rd87zNCB7zDtnP0Kw>#_dDzrq(W>nKSoA) zr~GSc+tgQ6XM-1UJwmngWvxQQasCkAIJx*qE)e0WiAb{zf+}cy4d*7h1;e&xS~1J# z__=?ny;`O@v%d*wWa0flQ!Wk>^u9Tswf_N1UTe2cE$3UMl<;5;dJ5f%u%VA&NBPCg z!I9JBd3|~21h!TVjvoV-L6pif$`l!2Y#KI;P{5MZhX(w>6J4@LVR3v3nnn8rDhWbs zpMH5^;dp}fwo$)w@NY9e1yjZ=v*B`Mu>WX)=vo%DQD(DlC7SXtwce&NpdFpIzfC%U zD;Ge%Un2!x)-e#S*|)O~Kp9KrQs>cHkZcM zo9kH*^C%w+Co>FO=9+sFTOaeykR!PkX^cf(BC&E0z;a50$D9=5qjhXy>oInJFB>A~ z&U$zuZZ!LoH+{lJ)8HIqphVxiyF0PtiVNAfGy;tZ#72EdVFiDx(xAu~G`w1Cs5(zE`x#NzSy>8yH* z^$2TDe;s_ij`M=GAIX-3k{}z02jTIB`i*;HOI}=`wBDrN@7irT(6DhnJ6(w(EII`w z!?^_~NWBIo>C~LkX^>A!BG1*H%n@XG?bKT^b}qFC;zZ&Yv0-GSK&((@aWP=N8$SdZY{RCNpp<5W<^HiJ2|@S!N0lssXO@B9PIMAZBl(Qn$S zU(3z=QGCpJb%Ia%NgU0fqUP3U)NsO zT64`g7pC6bLJ7A=TQhwOxBR~eShQYaxp!>%5|~@lvVskP*A`KSfr!dHW!FIA#N_BD z0L23WjCx)~NI=Djp_=0?5jpI^WgS|5o#X%rFL_YTFtz8)FEot>d~;{QslalT8y6#? z60;JA$EB{hExnQFmrUq?Y-Q8Hi-m6@8)J#jXI0RnfIKs&@8OYC(Zk* zcOh*V0ZJSflB7r=T_Bqv3GQYkb)CJ#yRjT@e$b zz%c#N&bjY5$yZs3o=uXxPmCsMhX#}ue7IkbsCv%!!S5F*<|`z$B=N85b3{zh)Ho?) z5g5N){!L=#f)I#j0=)Ml6tYXGgsFQZE1#DW3SCTPci`x=d#JYzRi;z`{bAt;vU5;C zdr0@EjBzj3Zja}oXYTs7{rHK@dwjoW8bb^ zU66b}X&vd*;j=bSUPpRPDEVki;uHPb{d$@z{ai^Ko(FGuB9-xq>TE-pXJxHJEZ6<% zI8i6;uEyVlr!Sm#pJ~Lb?7H12mQ0uX*2+QlC=?I>hyHSoTix)7;W)!jytSIDP8T#X zL^WURr@n4lf~ar#4V+lA`lw%h5DBBefNVDFemA~9@PUNK{;s7d-EgtD>Bm3HC|#9g z>hxfiyE8U|H}VT?az;Z^akY!4@%X`+f=<(W&+EyY!zhP_)zk;_@`xz`@!rD=pYb(} zfVp{SIpD`h289%E2awPlElml72mKE#qX~sNCZ*kmz7BPJ`B%s~$O6XgoM!H$>AEO(vegi`9MAGAw8jr<(>=D;60Mi4ZJ$fZLW%OcKfR-4gB6rgtes<^?35R8PRq>+9B!s z0tPKKt-qbvB0&xjpa~+q+UW-JhlZT~$-TwKbuPQSg#MLR3*>JiXtB;EhIK9+IIvz9 zrC;JoFr!L|$@avm_^5ty*iWx z`EpNAdRH!B1%4soOt(kf{n`+R(0E0`zvO<8Om#F4G0_7q-AB=>c#O`rw>h06mU@12 zv|d!5OR~Txj);C$eteH} z`7&(Hu2qN6S5fHM=Y1q{cz1>eJm)zyQ>j|{)bC%|$V=hB%lh5qR!307Z~76QWOngN zvh*9gCm=(O&ZJSy!Y;XD4K;#PU3X!~HXrCg4Xepx#chwN-@5gYm^o$93zsNB@!K=d z3QvNK%Dx>I?AnB*p-m;fU^5ETmmj4BVeN%Cq1qjrIsee;lzY`!Z?tby2o?ZQ0beUS z%M(YT&(i{kA<-_mxvT*2z|6Y$ixxsDu+A}wC1K9F<3*?RHH;!)dq%`~w^s|RP#a~) z6*0wGbFqJu0wpUyd2j&OO>BX^v`55Mm5}|o3#)X+>px2;lVb7l(v{&{wg>s&dIDlw z0<1*I?kA@AW`LaRC~NjrT+WRS;pzB7);S7Aogh%h%eS)1?w3{n3gVVix#Alp^C%9z)ch+ve{kI9w4fy#-UNHiDRRhCn&zh>| zx72n#v-ZfWF_Zn$Qpk?L+xw}pD?bj_I9hB1()oYX*i*KGsevA@#(U%4`qy!O<0A6A z%uwh?waEhOJ5p0o&kuHfJ-!L49A;>kIG(kqaENya!wVo0YH<>4Q2fbcS|86HWPWpx z3M0AtMj{6$ofh4_6xHsZl@Kum3A7J5i2)rJP0=x6*VW=Q3y*EC(Ukm>!^+-jrD$+i!F@S3*&%gGaNak@fd3R$Lf4=Z0Y$b*^dB?g& zpeD;3xIu)Hrw+UY82-|;FE}NN)TrD-dl~ej-o}Fg8(P8;^0Xui5oyg<_H1cxUT=8! zC{31K-0$B$nop|HH`niVoyQh%@_(OLw_rbxzL16Sy&DJnmJ-g6!`GjY%LUuNAEd`Y z&K4(`6FpVRTjO$^!@tcgeVBL8#`!v4odg#R8cC1v!-q38Dwe`wa9s1X4mp-+Y zC%I*=-mum4a>nirt%`o+qKL*G;Uru4m#bstZ`=>Mt4^`pUc;XB@bO_JSl_W0iR%21 zB>V>e4OE|=+l~B>5|FO_osw_ZM82tMKi z0lA9*k*Ig$!aE9$7~%b&+pg-u8itOh^6~ulb@u;%_w(P~i+^Cq9p61&uKNXijUGTK z9s*e&4krQzM4^I3c-|I3Rj*t70sX-*yDgs{A?N~dm^l#lxShMBos+wb z&{evQxVOD!F$MAZO~}TGTfn8QEI$`o*&PtH*UdlgdNIl7Z?9b!Odh2SLOmXvRKaUQ7EyzW-b>3}Rx1VgSe? z5Z9_XVKC0(LIPMQPz8PA`fXirk_bP_FWh{V_r}6+T%sH>D=3+&YX51fneV-sfMTY&Ay;q-xM&bBD*VhP75D zA!zG#zC8CX$iQ@NP3u+}2VLbmk=pGE4$A}dUx(2oxGw&0)jvEWVsxaX$V%CRtD z35Kk(ef$70qszO}VTf=dcQysDy8}{<;zEmmeq@l6kG?+CJxBfd$g@c@Gw^ zzU7j_h^s8gB;Fr5B8>)jvyHY*s!rw<#pZZY%KSyc=(V8|%#!_*Wjq5b<{r`s5kDnf>S`m9cpgiDzLaRKO1U4ASf* zAMXoXzEPvqj&n9uYQK&Ml!PdGuGaX5feKV)F`q~Hq`~V`7eW|7q*{}Z)ChpfpzM?P z<>U!_X}&uplJ(=_A0CT2Rj_5<~#3GV)+>N2`5mtLpJxP120 znA1_9G`8@`IamzP&fsR1OV&Anw1V10e7H>e0Ri{OXdn{{%}LwCTC0m6LkA;KYE1av z`bJE;0w@=aKBgV&ykaO-L4cG?r~Xa8U{}tH1cSnv@)10%&Dh$HT^-`v6DW1TB5Zj(>EL^I$1%gDol?n#A%X`l^Hl?s;J{e!(z zibF(aJxjC_^6|bi5mn8{9$gBJSW`?F6*cM|Xr6&77d~QP_5~JG(nYnD&W+c-UL)ss ziUAX&jBgoGNYsF?1t_`}!0AylTtQb~IF$=7XTK-oeInS)tBHkk*a-<*XjP8-K1@B zB*jcw9=qn6f2sVBqhju>zKg>Co7e2vWD1d;Jql2uVf}-uNjVaqkn1ha-zC_7@_D)a zL~O4-Y(uWj#J5Ap9d+H@-sVaCo3sm<;*QVh1wc1OL&p?@N+wQLwXxx$(R&7-lvU=| z!g)cf2?Oy6$npjJGm zQQ&NwXCj;ON9yG`mObKJK)333i=qlhN&PlG5O+tyb%LeBeN;7&7>^-i+F!sqt}8ag z+%t3jZ0EoI*r}l5iRHbL4FZK}sgM2-L^3C_QU{hmU;YJ9MJXTatP41F9sL3~pL0_0 zK|JLJM5N-Zb}kM*um%M@4^SG#4tlJo2|JDFXwNTLwF<{z5-%G`V9g1@(V%RMEY$^RC4&tYv>-94_RMvSZww2 z1>Nl9tAw4>3qgf?mdmS2xozVd8)} zJB1+abq2}bd2KhDow5ftWe24Rc#m?T3?f%`@li?|Mg6So-V>>wJtn_$^v z-4;zeFB4Pr#*)0C~?w}7C$;5S$$)@a~`WmJMDX_lVWpVUDT<~Je$Ut@D~y{LD4x0>f{L8{=|sv z0CHw5$@fz?I`kQ9h93ka`Yd^V3&8eDNZzz14iS!qS#&`a- zjp7J=plnJFcn?;-5~HeyJwUyBsoe8rEhT(SK`#~px#|KKf28{9CeEqn!(LGDjJ8cz z-0hv$a~ZYRPbssbG!d8m%34t?b3d0h{wSp7(@Kig9{Fe2T@nT-vos6?jrY#)JT=^y z8JF`|!9_KB-`bwgVo_3Ys=iO0kiShc?K_wrSqTR(Cr^p$RrOL0APk>ng$}kbY~Oe4 zorAj7B=fLw#;k{UC!@q2>556TJg(gtBkgvHsXn_SUicboEAO1>@Hy?s|C29dC|@Ln4_eJhsnwB!ksT&rEnA#|Ib>iF*mWpB|i(b_O% zE?Lhe`^(H3uP_y0?SZrGS!85%0IwPD>uWc$58tULSI^5jE7dkqEyNSH3$inZP?|U5 zY{xQs3bP1!;GbF2p5U}PQqc$C?mWo*HklFo&nG>%{RXeFSB~dw z5KXmt*t@5;+gIuWnGTJ{+NakQ=6K+E8QKrB0BkkUZCIV{qqkgjAd6I~#C~XuQim46 zLe6Jj8{ZIAkO|S&Q2qqG344dhdZycth9*;%7^~)QPZB{vtM3Ij-eoK74ysAK{uvH_ z*_H`%Yl}@{H}H@6td|>! z3vHXBcU~N?n7wF==c@@hGF=;{Ft%=xochnJQ+!akLK%@k=v4y1r-Vk+y*D=1TuU6A zS4L@lJJ-C^Xah#|^Sc`lJJ%Lx-;9N%1c|A=2zec7!Oit2l^XLm`(+vDG#i3&q640y z6zZ>%)yH`0PscVpL$dBXboo6KzUw!A-`wn=i_H|QkB{R^q8<&{ZZ<%&0GIDvl6PR@e z%vHmfz`H1RdWWq#@9j6;cZh~*I>CSI<(f|Wr5kO5lNPx8A1G5lMPF9< ztpbtHv}&|zP@&v1?ecF`5xI^XzAzoHrl-(0bUREF-<+zMIr&aFqJP)$xzaB#p#4Z^@-t=+(E!?G5$%tuV$RvL?7)AQZ^iz_+pNlRoICxS`uXp= zy5&r+eU9&BY*dt<+OtxN<6}JuC`rpT^+CWbGT2mH+oW=x%P`$wxBRNh;N^AM-63@s zub}ld+wBs`EULpS`onjxL*LvPD3v44S`r$Yk@#$o#&lWXd0rd#n+TTrjbqi{ncG>C zsXEoKs-mfWlswVP39(v!ZdNE*aWC@Fx74eMv1h_#@@-KHKii)%C51K<;heERrq8{j zNOEA$EeLU7?#t6c&z&1w9c$es;MGPTi@x{=F?P&)C(Eai@R~KyVWY^5WB4*d+r}`R zn$_!U0Tu;{(k|fcyFZyu-D25n`ZU2B!l}E}X=~EumN$oc_EGXm(mlt9Qy@-0@^+b{ zb>n>aJ?prA^1Uk7#NKj+AWgO1!yB|owQ;;0Kt;*i&d$jClQkW7;ULw|oCiXb)JdZ_ z!IzKMb(ni7bzi1w7zH~}m$!EcXKsGH>ShW$AL)&t^M?dG6YbZ2h0JhoaCz%?t;8~} z%d@WI^GgeHWrf`4d$X!A#T{{?yx*}N=xt1U^iHkp+4MI1PVpxrTF+l99k7QJRAEE^ zCc5uf6;L*F-aEumNUx8wctADoYg7F{0}WIWCtuF}Q2zh&1x!~KN1})0OX2JPW9fGh zj0CADf)pS94}6mY@Qr$-1^Uu|!#9!t^3&v5b^qI7kW3$7nv$D>der}OUoE&!40b`< z|4ey^=LWcEil4S3@qg~i3D;3Yy&3$Uc?>|eK)iGbjlp97|2rc9_+bp%7b7?9{s$f^ zL2MbtM`Nnk{~I2{OMvU7cONzU*Hx5JQwaYm=S#22|I8+czETvT_2!EmhmJM$##5X} z(EJoyB#~!F8@hn9WInK2Z*oQf{&?vp%MzO?xPKgJ(jb?<77zDPji&JJg08Hn>r7?z z=NCWRujWeXPk@EgOYlc42UFkIn%7hUm2c4f;y?h$MnT7AW+0p7fn;DBDu!gS;opz_ z6-;!7Z-bWPxphyJk!Nk|T?7q5{H7iHVISOAy6@O@lj5fA#W;E?6cot@6d943C z9i*riY(_0(sCgUL2;t{+Sv!DmDf5dM!?YFXw!WZ8L{{J&uw}iG#_r5T`Jx4@< zw#9?26r!|$zkvQ2jLwDE)B=4vXVE=16YEM=a*iOXuOB;0) zAl(sEOq>=QEoL&Oz^x`NguA~(!SWw$Hd2cc{#1*Wl#-Sl7K`18Oix1|&zciI()in| zp_LxjKQTl3I!ZVb&NcHB>@@goWpGiQ;$>ht=vf0$k^#GH6m}os?Xk=M33+U`*pGy6 z3nH)`*a-;P^4a-nvE4%A)>C2Y~jRc_|oNAVL%8V z$eCO{1{B`f5fh9SQL7IB#)J$Gj3>l+d7iW{OKhUpH6V`k6GRUuC;}TYEsS0y=HI{i zfNzFu+7D;@y!dADQ2h8|nZUbc()Ptq%>MTkUP(og?>a4XQ9Od3Pes{0FVV%|&;~US z^?6Bxr_N*1{{&Yp9&61^>5qc>gy9*=NMw9`1;z#tkQFiRqauQG{`uJ0o@3W!^l#nC z5Mbx#XGfm0j18UqgUlI>Kuw5 zCz0*biHDAmrdZYjoGHcSU#HxED8Pm!T1CR}yx^-uKvAiRp)vjSL*o|^%+&a%;{M+< zS7XDgspjhQS5JYUTKN#~EhQle?Rg3ehtXtuG209)?*D5(D8rY^i_5tT6Jc^IWok#?7t)o#v*pYJp$4ds~*4_eVlzH^`8?DTgMs<&>fHbf*Q>O z-d!JW6!};Oq}9lZ`=lI^(iD{y{_%Fbj~yxSGP^-t#sN%O(YHI+cq?8&#&`}=r?;0| zY+omu_=003AB17^ zn2gFAF=nUFkNZyT?)Qigd;Rwky59=BFT;{6F9V90E%rvfku4bk^AR3a)Z77zLQ6Cf z8fIG1b|hf01Scl&sUaQkz7!-P#16UW8H7ZLz&Rh6IqkIkK#K z!B_xxd?x=%6~`IL1rNIMT@d=goEG*47Z&i59%74lOm-%z=(W`G5;~lM^nnLFuoM0n z8%3pp@G3;!?mqt*l|9Lw>z=twg~U}V6%2(>XT6cQEa*LPWDJrtUiZSKiYO;Sv}+(7 zAnRYVhVGE2WhfgV!B0n9tgwJ=6YRv?L01q(tLqp+83*)O@gw1I2*Fg^|1_Z=(drLm>hzg$*VT*l!q~K@89tqPlq1hoyifm$Kd~D%cSub(OQsngrA!9Mb7dfP%&=QPA;|*H$P&EnFI|4OITwjaw-7+{5(_eyimk#A)3Yld zvs4T1I*1(3`S4S|S_EZbA5wJV+?$x<6ki7t?NP_+J||dt-$j|JVX6(wG*dHLf8UvV zbC0tFKJad`?oGFfxgps8k6nVUT>0!q^8>LQ3Cu*-%Erts4 z08=EyFS=xHg%~!4?d;))0PRWx?{kmv2c+=e*HQ0|ZT+sF2qWhX75hGP*K~mfW}FS& z=fHn_guTKpVp8y?VMuz!QpQ2B8^411kG5SBB<0(?Dva4t`qgm*9U^5nKm-hF{ip=t z9nSPZAfX=~t=yy-(k_DbkSH{@{K5bpFOAH4KxKgJf8mq>ARLNGNx8qD4C=Tnz`X?7 zEykcK*l=}WHUN=fQn^GcT^SR$KLN(73&oJ;GHf);;~@ybiaPR|sB%g=`Jnoj!O(U# z(DxEhkX+J64Rc$6++YQ9)G$uZ;sdZn&-2_sQ3zELq0FR?k!D|sdXoLY>gy_{b4KG3 z#Ixq+3-_DbLPw{G)R)H`pYIc34-)@qGLEbdCrs_ZqFujhS10x7`Mtr0`DQm=z(~ir z!+$h|o7H>1Dm_KGNbAoa%2n-2$ND{9O1xBBhE8fF>vrD-YDXEk);_E?04VB(qQflL z`8jYUwBB))`0LTj>KwQq=l`99dnjrvIEr5F=bkHyTyTq(n-!BQTC5!!*^&B@H5cG`Q9(^P*&O#?# zNrzb3(@7y7chy&m!#>D5>`-she10q0mgi&lvr0nN7oK@;f2+oLV($g!DRf*%p(pEJ z;E+<`enOZ}_i{V4)^xT~ZJu1yF@hX1Gc(S<*5aP8YN^`!ys*;gYuQlQ;HRr7!yu(% zMVTF{>A2>5?+1F}g}UreeKMFZ`h3cX1hdc6Y1&J)FqI58Xsz{B)$d;bsyfWp+!Ve0 z-MweklaWE(*_uHF$aFn)bSFdc`uho4KR(w@uLmFNS<@R;%Y-AqStkqo)<7e*q&kMbu zja@s#UfmBH;lJ=~hw_ub8F}-gIJqE3JZcAzV6DgDE7E{>nZ@|0X@d9bxYLaA&49(hYL!MVrABgh%-bNZrGL z%JJsQ1xhMxf2O~U;$4Gd3@s}Gyc?7JUS4v0Y|K*bO5-1mC&OVSS64a;nK67ylB$|3 zjW_drW~-nFmm?dqcE_KfwByCqfUobTX82|<2X*)J3d5AF{okKGZXW#pEMPFO&I(D* zp9Y698N4FeyN92Lt6l+z_e$5VLtQa_oR%V+7d<$PNIPnrm4N}+Oa)uDUqT|8uIWlA zTlv%JLJScLpXF7zo4>l(7q0^3X8Y0Q<#5fD zKUC?V?qu%yS=eDLhaDd>Ztw8N!8a84q+PVeDPro$8oD2O*5^zfVm>9^r>N!}y++DG zE1WusHA=>UnMEJcZ+Mk1M!A%c*i*b$#FY6OocUtv20P~K)pqqB(rUc)-js2ctHp6}*LvhWLdBzW@I5C;bZD;s7t(6^u5PhjzS;c{ssN0885oee|H zEULmAX#LfE(QzWu(UrGfb%jBJHQH>eLNPc`o6MDbIds_qvw->{8QfO4$feS#oS?Z* z#WyAHt<7`XT>i-!DR7XO*ca1UA>LkaJJBGBX4UCEFKJfN^w0pV;F{TLx7{>o~?Yd*C#9M zN7=~PUw|}}} zhj}nrtac(G)$tgjm?7|ot~8Ro`W&RnhF$OJ0(>^B%hNsn0k!2gStY#=3G>-1PJ;^IvoMB8b&7L`; zTrd0fv7P7mS5{CbT?GyOT_rE-}5O;@~fXbKYvk$D^Pr7=({IGi7)LaQg1h#C_V7H#%?xAdR?EnDqYzGpUw|= zu;W5tGNapIjhA1t@Lc^9}7m9JpBx_oyguN4eF{|8amA|dvph>=1xk=N+o1l>E(s2wlJltkoD8NI`~mL z=%pBT{;@z>>1AhJ=991lQ6(;ew3w>$jEqjSn>-mE7?9Y_GRino1nB9d233mgps!#j z-^T(j*OHGQFqQ`SH3E~1R({TwrdD3wPKKw_<6P!O{pZ3WxUCXCVZLr}>aEzw(vB$a z3I?HOIAgTmCqHo~v%^~2sxLZ)>7`lay4kKokAhsWc5;4Ti#kp4I|`%?#7Ex1i8M)B zUYU z^)5@Hqi_rs_ZqHFIVDkKbi-^J6D<&iW{>5`7!%O&)4g@{4NK$;qba~ddFB_2PI6kg z;$73yDOteqj3XbS#JofvzYjuZos!wXx+;XcdngEzW~SC$sxq+y#8I{#CBq2Kb0*;X z=NLlT#GA)82gYQdsv%T;zoxYS!6D&5mWX+Rtq04KDWYH+&6BYL1#_?wWQUQV?$_u6xO!tNPc#z#_x!fV63+cL z8XgWd(M5L-MZ*I5;Livx+|G*LU$IIkA|m1wK@EMPqiW4XY#|NnaRga)#4M0eQg595 zD{whuoySh6x>s?Q6rflGiSxfN#1B6$N-L>%y~DRs-KPoZU}wSa7Snxr={i%Xj4uhR zB2$`v{HW~SeDmlQRJCm@5~L?8EyX5PMpxk+z28Mbi$?QZ1>Fu4C)T*FE_K2!#yjGW?RStx;Vv8A3#oY9oVI_R&~R0L)Ac zK3E38Z}3~{HjN)@9cPRjeYqqb9xC_8-|TFyBg%o)V3!ptfmrVtY5cJL2d`n`(>>n1{yOnn>n$xau58bkU@!dC2EeF8( zUi!}OU}H`1`$Xa1Pyc|69j@;#8CcfW&pUH5uSJeD3f;QhJ>4PTpZxM#Q=!p4g<_8|3DU6suTpWP~lB}S&0IKP3GovdmrI~^mGG_wIPAqU5hu{@6`U$=-1Iw zH5ZmzzDVh(-bA_a#F_GMOUD&-A(9tvK(cP?L_n7dz>v{~4BgX)_?H{8XIr0G)?1Rv ziq>MW=cr18g-a2q&v*U(9S+)2H0@6l=x=s}gEK_6i#e~re7(*6K_86SJ;W!y*uHd* zP9QVuvguVN$p?`7Zbj{Z`|Ep4!-XXn_q*->8OW$fw;op!B6C)nNh%4AkMZs8En-|< zlSLeChZ8JG<3f{TCDl@%nKhIk{7(824klVV&j@<SN7)EyHSZsp7&S}dGbY8E+0k$T z8R;=v>M~H>S*s8aO>3{TIDER0jR%2MHorM;1=6*xrCAgx1^!TI!W=&jngM zn#&7Ru74t%-k_fkK`kK&4djQQ1ao6Fyv?6E@U#Ay;_ET_8a66XKc4vzKEdiv36}X~ z;qHzLG;1NZSeNQ?hS3x*p)B4SXWXu>7SN!TBotw|Sc~gk+0a#bC4r&$lUE3CAJfJpFi+(;F5M$E-t8np8`*J3nF` zQgGT?^K!SgKugtLiU)XLfngB8!T(o^=4VnN`yU=XW&n^qu}V6pSH(68vAfjy3(MmZ zOgG>%yFCLUIgxp~Gtg4?qe+7YB5(8#vL033On#ZS+@)cj@hgR|iTafiGH@BZ;k6=B z7m%d+=W5T=UClHW>Ka`}DbGC7*S`}abR)|^o%l4uR?qFVa|G5)PWBCS(cmG_V7Ef9 z&u}p$96i{~8$9D{I>SHZT^~e#QLE)e{*_>?)G*;!>T<6k0^c#GZ0ks_EDrJrC5s@w z2ib9{)lIZbv4vO!69=0#%&PTG#zHl(#W5_(>pAC-^~Kn+++aHDuG{uS$J=y9N{ES} zBJHplB>d=MB(WX0?>Hs*B}dh{0MTm=UJEbAMO5SMSiT!I1;8lf)SKuUM-ek!crM97 zJngH1k}@9^twraA7ws-|-D{tn`(-8vgTWh*;N8e-DEp4xe`LopV>I*bj~xpY^U<}6 z20iZreTa9yV`jOj$#pdDZG~@pq=%Xx?OZ&a zcH+DG^T=!a&mf1@Sbx&=&3Mh@>c*K_ZI7f-l&z#v`RnNO`#jJ%>Jdw7znrYuj@Iv5 zuYG#;&p=~N93zeVn#!l3N4^knzR(#Mwk=&a&)Dyd#Kn2Py)Xlq!^gM_3tHbbPhbGM z_kOzlgDL;bzQW(EO6g{1q>+8x(qcooQqP<$F~7&{12gX{(%Li=CiIB@dZ#x~^7 zJ6z#Jjkv zGA!Sw(jt>J@y3EOY$&ZHmsujM9gT0wAWja@30om5e+@z#FHMdm&$a;yfVJpQ1b1X8Pw;9yX-?xlg$yXYSzNU#58elK(nq7phdHX%R$e33RHhaOP zE7#4#&~er&`fO|Dr?upj{XtyDUt3#5XoUZ`KR%d1JX|Di&<*6?ksA z?$39azRrFd%t6&?3VSKNcFvyp{{CDR5Bo>Mm7*)TAUc0SD!xRDlDysCyzz7NQ$cpf z&d08q!Tnd3j5rrLnDCRl#?LeT8+(0(pR1;w4uht4GW?)ETb7#7=t`hXr|EWd0G+d& z2F>4!39-q)DKE?GRQi51??#LEUfC#C)*iUqS!pf4HTezLSD6n?kvR5IYaBkjW;o#_;lF)yuY~2^$gqX#B$BOhT)@Bg4j*t}-UYAE`3lSsx z+2OYy2ek;*`jvFD@)sRMwiM2gW+Dz;HwLpc@KlXl1tM$ZXkMu$Q70Y4JlLv;_1fa3 z8w%IBS>|E><8No`t(Lr*Q;W(|!KU^<101N$ocD^`9lia`)3i+7j#-JK>8vbO; zUA3^WlV|p|G17j;W>jt^kVr+~UJBpd(c&?)ANK#{9MVcFR)hgzhg}9X7Y}EQ4Tiz{ zAkXI6gQc_BLxo32#=IwAB)aggX&sJp6@TxY8L#P^ieqTR8KU;c6>~~OH)CzD5G%P> z(w1~_1iM8|Kjmk6VeN`*Ix~wsKG(c&I|nnxdhX}@h_g8|rxQ4BeMzy4T;IFQy7&D| zmgVWaeX|R@Z=l6yY=XJ;?QrAG-UGt1t>tMbh|BqY;#6P<6^UMDGx|)O5OaZ-ar zyWBN~!C{G;mGE3&o=C-VF}-8&W%6P>gcfJ_qT<&Jx7L2BDU_%<6L4P0eDy4}171N; z&G(x^-urCZ`PQn*pPHfCnY!EJ_NugQQ*DfTX?-fAP8eDL?<|B4VbAAO8-If!V;KG_ zqc@MKKx^fTSPtLpk+Jwf&ZdK*md-KDpWEjMXCxr|9F)oZWv{_x_B4x7&fQt8=(kGL z2a2$vD7U!OBZj+7MB287Hi@+zdFjb?B z`s+hNx90bvKiU#pUY;pDj=O&F4_5fW4Q}cZmo0lo6+27Z+y=@-JtL4YlPd8 zbacQKp{Beh&Y=F2AIqVX1Wd?HB^3=4mnV+Dy;L5E?cDI$3wokFQ0fy>R9AOkopMel z*e%l4@nHPZdDq~9^5J0cRx{~fhR}S%`4V?Bk37E(v+b?ng1wL5ln1B|`&xU0A|Kc5 zPhJAR?Rxm#?jCM5yQ&%??a3dCZ!TrW+q*(($t;6(ak>Fx)KgdU3SsLNz}D;COFy_( z<#y>nK4OyS2i>5$tfuw2M^mOb5{aZ%Mny=k>uIZY_g^;3&Ke-%aTSp4O$*6{`5ceh zwqi{Ag9@XbTI41ZcZ7_HlJ0Hmyq9<0lo`-d@0`9EB4H7o+f!NhbR~1X0#w>+3)lu$ z-e>qNtKJ75ELO}%WaUdouCp~&bE_feD)zyRMYkGcfuCME%<${?k4LOs4*8* z&X=PY&-K2t590B@kSP#YEbI$-nIKoX@mR_}^``C@IzPHWH1D=zDZSsdu7vAlEIa9i z!Mb~NHdAYvAJ8LJnsQ!Vcce*7x8kpP(zMEg>7>t?TH=$hOn-}S1 z`JyTI7=K8_YyBbP;1{NHPNZ0T zG81S&qiIP)6b|cITU#}MLtX=CLfO66jg-f^kKCGAw-Xo8lOY3yi(eUShD!F`QzU2B zP>a&R-pPEP%621oh9g6dT*Tk<4G1j$?j3ckGAb7@!) zH+tGvFi2mMX^=Gz&Z%8#HGGd-0RGu>>%`CPmP{(VJ3U6l-0Yc-ZoL2VbOThG^4&o^ zN0PHgLS1W-!)R6+Q2BT#52xCpURFog&k{+0@0s+AOcR2%&G|gkp7E{ax<{*Bp15-d zt(R4#34vz`)(VZ5RZpH>$D=K^N}sPQXsMq)77SB}D2Sp}*Gi8!bh=g%R201KO$ggA5MR_ZgF3 zVnE#-YX-3*uh5&+(LOBkox<1IGrk-06y=Nj8uN}i>?rQar|2KD)j0!gRMNurR;BDI zm~Qu{jqdhgJqq}p6kgv(1PpcP}?Ub2UizzvnWenshy(hK472CMas(#~MjHFPW_N22z=K&Xj854es4bdG$TG za3^-pyEcss>ma@UY9C&+Ygn>OG5A=BJd@mb65bwhWbAzO_7!&N>G=4O@i8x_BSL9H z|IYDzDok%%tk@yiQ^0bk+su?KA^HSU>yC#Z%=8s9u)D#z4jM{kW#;-=Ods@e0%5_4 zXxpaJ?5^&!-2`tUpTJvywob_Cu^Fi7F-YNSJrdcDZYxgp=8TfPJ4p0=BgT$#=PpT7 zX37iA{2OfC!)uwwEb2Wy5nyIwY z+wJ9WCJju*${&r1G3YTYeJ=nVXftQLLnlfg7l7|Fqk_SPtV~VmKe}0Gd`$eA+-X^& zlz(*{Q9(!ct1Fd((j6Yrff8QNoQKC^J<|9wr85w+MTnl*L7<5-W}3}G!6c5erep5|{py*vV@I+832|AL)*ndy5=s9bcU7r!~~X1$z!E%2Q39Y{7hfy7v>p z6cNER@-97$WT>~cQ%ZSE=vFb?)tfO5oTrY>U=%7M(m8`outaU%>{`CGua{h0pHue| zh+#CID7+50S;ndsBkX7iw|*16J+K5sH~XvzPEel#l&mhrX~HF~#AOl~TP^}(`zVnE zu@6ly4-nDtPwymcI?q0H80Ze}(o^^{r90Uah1Q2J_Tgi(UELl6h2pWD?P;ZyKr3tP zf_sgz?f|w|5*f`3wI|4M5%7o5f^E$Y1TtDMd~|py-s8JI#!mA7sw=cuw>v4nb*$7< z7F+;I3&Zof;h)N1h6`=*oStTwyA7QU*D9=~h8Sx3YAW?#7XTrj+&woJ)L(pVr~dIO z=!_}8U-6J$9T4Uc!nEV!2czceJ$Sw3wszwVDNS@e)UsXBx1Gt|Kz(^|*Hu$WqZmAFZ9++JJ@=Am~;x6_rvt&NX4pR~q`AVUufKOM<-|Hu{qgeUX-_E9h z$}5~M?X#TPups#WjbW)v%Erz zX9lGo*a@_!%JP>TY>GKp(l;jZpHj9bcyK&1D0~Dst_#&qNHz`kUo-+(p9wA-*iSNA z$NSXU6E1K9gCZMd!xN&u_ePVf+Xh8Eg-KWdt(%@do=;2uh#R!?(s7&t+UTjNm*vIH zXJL}ytk3l-BQ6o2$0oevW-cAFH?aS}fQDTsXB9xoOV(TmQ<*bm{2X5bZQftc#=<6a zYD&|&v;T}w-1FcL^~RH+9g)DfY}L&6bDlakT=U!V%6$tIia{9NvPAqhgsxX-WeMH3 z8joiaf5>MXn}b99{X0~8@BB&D{esNmmfQU{uGYE$9VS45p@U`hzGA-`v+-xV){qc8n%9wLlR24%aoA2feX!U(wc=7SowvDvr*84pyo*6NKQEln;PZyq&};SHXto z=W)yaXWEgmeXE9;>4X8i$2=qrp9o9SetK{t6!7j#{3?_UAt$c)v?7+Hu^Z*_4Cbuv z)`lT-ADUlEi8$ICeX{4HVp(5K##)y*vy0sY^?nhnycU}ZqgkDW%x`5k%oH}gZnUid zMny^xrVY{qZ6ebO>VC0?ZwHPD{D*_1|F69-4~O!7`ySKC*vD4LGGqNJWGRttC>l$Y zE!meyAtG7RjHQsB%D$9Tl8A&5QYfL#79t{)L`q4X^BO(B_xb()eE)ji<9NP@<9no- z`@XOHy3XZue$LOyQrynfUwIUUCuf8We8oBA-A_>nF~oKky8*=jkW%yee0ZWxVmK!< zH{bZTjIb?i%Ewt_k*C93hK{`F3x2}=gm>yX8_Wa^huX=)hM4o>rWl{d(Uwh|1najE za^VDL5n2>e!?v;--G*IbV;rLOq_#-)k-ThTZpxh_IhUzS$2_R>^Ij23B?oZL9>Dqj zr7a~U;StfadD5?k&G|EHbx?t8oG7-(%E@@(Ym}Ubf>T@sj$s-RoHRZTC`PY+&bWg4 z^zcrSsP>$I`7-;Xpu711TLy`YI*Bm(6cEf5^UC$`jK_ceRzCem;O3OHhtJn7arfjY zIr&N>Mzk(d>!}bl`0)wX)b;qvqa4#b1@G9pygkLhf19_g{>LBw3>V?y+ zi7_%RJl3Pl{aVC5nCNp1%wI!CQ-4LU$|oa1JEYePmE!z{^tYtsc(sC(??)(A zTTyj+xNO@(jY$NVCwxzM=R_Yo?l+Q1;0+HfpJbJtZ?0)!vLVGwe^HFCILhPj+S-|C zoJAC`&wpq%gYrRqWg)fqkh>P%kRB;qurwPdhLw$3NOU2~DvwB^Wv8@FOJ5Xe5)yim+|#Y_qN7 zN^@k=>WTZlsx;7Oc~tnLH~f5`VySSm#8|`9pF~%JU4h&qCf$7a5jdBxcG~r@6#W@x zc%CKLuYbj^#`QckhRH^w|9#~79UepTwBja5Ujq#Sk`mggP?!khv9$2jU(Cj|4DQRA zUxHcsaA530leP_Q_Eql$IAF8=SPwalAcLbgr%HNvg1U{}ns+VzVU4-$|T8 z#Qv|Y_eWe=-;r>^Nz(C>E80<%iSgX|2bjVhmblY)!AUZWlzFL3s_(vTQHO4fU=SR# zJfOcE!LVWD$B@BqHXT1@{1(kahJxdv$V}BWtAT=QAAzdww<5*Q8TehNeWeh;@@b7= z_$iXF`ljf{t1rq+4_AiZlr<>2D{i8y=Grmu&xSb(MxR&3Q^roQCNMpPIb<^%u$@K( z)f%W;%{Xrpl(jy$f_E}@l6AJzdG%g1JS<^5USdR-MQm`Ny0dZrWugAbuH3g%M%-V% zMpr~BMw>!M!Bu;=OPgJ>J^UO3N_!o7bvzoOBV#5+iG^r`+zjNSEa#Haalw%?aCL6LK-^BW7>6N-&Ix}mO$q!SPJ zwfd^DJKwatWi|Lj^^3ceb{tN@;gj{90#GWz^ z_AKE)L5vd?n$mODlr^4l@?$HkHO<$8<`h%DlHuMRU8W~yxq79!kS}X>aLc9;b#a78 zj4dytn0a1@-2qjDUzhl#4ngoDgSCF843tSkk1UFFYBVs$RIH-uwCtm7ZI~eD*P!is zoGWcP&*_x`2G>Q}h{)RbbPN`ov}KoxeeI*)b4WWyOfRl|52t2Pq6+r;`6)!RC(AOE zL)F|n`2!wu_~iBYngD;K;Z{q1qWh(aPr{e>W9NuuQit$rlT9+&j*BY2z5ml16si`0 zu>1^&Qzat}y94J+NAlm7D3(Ic6!|5Asu1-~K4>Yb8voD@`7_k(GyWuDI#%J>98M)2 z_Ztk!L_SwH+#n?qlVivxqP?MUs~VXQ5e{ z5aj2AI+L zK-C5t@|)}ZNw{<}J{<@*<1kvCleLg5naf0kxWjv-Uad$Ym;mIke>bhD@O)dV!1IHZ}h`-3|kcBJ?Uj!xNAuieQ+O-}JT1EG>NG$k!mnvD=l>^)}+R6pan7 zqH|OyYEmFz@$&-@0)6adS79V_HJN!6B+V>4ED3NB^T}_Ma;KLZHfYAtPj9izX(Cj>;@BX z39lu$8!se^XbRrElDQogA4{MfssjS~j7=M;HSBY2B%4DG&u8`v1-m0s0>UTobYn?o`>JoKd)>=q6LZjPId~=7NY@VGK9V~+}A{>GSM(-XM&j(B z-!mT>IRwy$+0&OUMuOqb)4i$ki_pY@kEODkJUB7Dd*gNXSmJgu+OmZ2{HPA13j{PA z1fh^3z}042Lf^N>nn8=)vKO!CeyjxHGA!z2xBC?@C6YC8{5dMYzp~`^J77Wg{33Fj z!nJH*0inN&g#Ysk?AtS#dXii3)DJpRv6T~oj+?V+GmE#5!)89YkJ@}} zyzH6v7*4Dl`p}L+4-rzHs{8H%;^h$m`+%TQ$}4NFK(l#T3`bi+3HEI4&;ze6%o1v} z*UiVc0%MzxrTs2;S`^oXQ;%lN1BEsP801ec$=N~yUhBnY(A?Nw>pzu9sqIWhPcNni z{D2EsxDQTtub5IvCID%(xUY`JQz!ap~#j;r*WGd_9(UT7u zQFT*8SI+GOqY@)VtBQDKRbl}Pc(tEX_NhJ2y)%P^iqj}jA-u)UCRLvp-nzQ-<1$15 z-S!vVyx}7##CUjp(OgoM;09vO(l?KsO**{AhqeO|M10Qf07ICdc+||KAO$mu zWITIIQ=kxQ037hh_Jr&7#>p_mjm%$tVgpaJmxfFTgVF_Gb{ZsdNC!*;RGu@3fFR-r zq0=oV90N0lX$JiUGLh*i9&xvYy{9YIG+IS+9@jgm>jbZH0m|wn%I82&MSnx{AD}e} zPT?cC>F!TL99~yP|E!lX7$6E-bP2Ee_G>tT9n#E-xQsy*5RG3SudZ+1gS3uoZ`|+E zFSr$+SdUSe72X|10#182r6DuSI}^lzf$8A*e0ucW3DBjPd9niIBT_Yj7bHXp)nFN@ zxG+NdM8<@xb|7+L7-pOfhPDIArXh+Uc^nZLp8~;4{O7_4R`T>GOQee*D} zuXRxkpe&@^NwMi2%~Wzr1;urvLfz>1wV4cWgj-aw!c19&dA>o_j57{FW6>|Kn~F{k zR9?@bZp>m@cdOil!tsqGjBv6f2s+p?yvfRwEj}LwuUAE#!&_-f9ey+}So71c-w~Kl zMTBGsMXxZ}2hI&mv+Bj20bc7x5TP`Lf}0yy{RQx?&mf_91M&rLvRU$bZThG3GrX%j z;MBJui`J1yBjTgDbG}22SrXl{#|JhnW3hQ`Uik^BhSy-BzHvymRsbl%GPk*PTSQ0k zov2V}2${?Zbs1^m8$EJittEP-Nc>(9rk}BYat2+2jAbAc)Y@Z`gILhp*&Bg+o69`) zLCbS16Q{^)Vjdje&8m{+NKF)5h_R9II5@h>?(p=k`}5fn{I^zsj=?BR#02~qVR!!Ou(0|;g_@eIW|#_M-7 zMk4Pw3sP0uj2;%BMV=Y00{H48#9j7BPP$n(>Qk?1FHypYI&Qol}J5%_|oHT zNk2R+{3hB-cv;rP%DqN}xNL}Zts^hlkacZIM3-DHW94QVJrbutgRRGj&4(L1p@R-t zI1CBz@H~u}(oQJ0CKQdzhpzouC0!i*egTGoedJ1R+TiKKeUN-tDqZ2$q9k!3JWKnm zR<5Eo{}A2GMj=nF;N%WbFBw}%AeUR5?GhmV0|E2l5{sK+_?9dW=HjEU`+D6Aks6+1 zl~f5&B6JDdP{_%<5Gb_A@lk8)-y2&%Hx~RvmWFODLJaf#0L*vSwBbFwdC#*{?X%&{ zS(msa4VTq5%MwBAZCEL6*%yT&A(*|kJd#LJ#}5Xm6bbCu6`(#gTnnERC-C2vz zYpaOPqH^kEj}Es1IRv=I6L1Vo&B=Gq%niF|yhj~I_EBwT+U_!-9XjdJ*B9YFOv)?Z#I?O>sCpLyh`RjG9$jIHk7awfr4L1b2Id2~Yn zlSsFabs#zxzUttHPJAKtqSRv=KT4|w;-5j^T$SJu0DVz}WUmznIh`M#t9m?G31#Q# zQ*ztatuQ{G>{9Q}R(f0G0$Lvmk%M{J<~-l|=S~-q{Rbu77%xuF4TkaDz>`(K~1{)hhuA0SyWapq|WI!GAiWl}kC z#D79V0&0v2?=oIdD9cxJi$mcOnx64J_GaRzp;~hkgl;=jmOl3A{Po>}17|nLgn~0x zg8tmft!99a)dIKsL#mOy+H5}r)3|aZdYG;3cJ#U<^rTMrKW>`E0 zBYescQ28Q&OH$D`osxw?1915d1BW0xg|?OAsk5jj6kcqM9K-QcmuLCckSqe(8ZV^k zOBM;6*Qo_~&JEf)NK>_;KTaSGNj823G6d?Ie6;{PO9M>K3R)Z_z)1%*^+k`jlAp-~ z=E%Fv$~ffvCmAFbiumG*LTXpA&LHT4i(rWo`(l9_mv)~F2a_=Yrg%Y1Hh5v;Aj53! zRNkxV&08%fYY6k!fDvjhVIn2CebYG4`EI}quIq9yL?2*bh+j7#w~ajLUqJ`Fi}>t0&phH4q0Q18{}4U{JT<6p7}VA?_MX& z0KjX37-?K7v_LIPKHtVJxMSXf=KB)vXB+f|R2_MCX+QAIWuIs7x_otCQpgP3ey$xS zKwI%t{Z16`GNhI2}W5bBrGld?2UH5&5Tdg4wcz0Kq6rX^$v5PDY{!HV=E+K zYghk1yqg{pMPo#M?|Gr0qjkpS-d4h0uctX`m8*i^RE#dkVw*{uWP0v)tADAlEMZC@ z;D~J}Eh^K0MIpo*O?wys7=`7kYPUM5DVs8iemOJR(-0b{(=gZY!vPv%Gk-XLu&56@ z$yZ_~H2U}Aiiq^6_yuc4KbJ0hP~!IzD2*s0J+3r=;9b?RSG*V1PJK)^2DSG6j|Qub zi662Lu#6k0{aJ)!U`aL0tglncY2ZgsqyC(WaOv4VykOvPz~%6^K@y&?r+f&Ar+F?1 z4t+W=wQ+EZ9Zy-*k{$&$3LHMH+XEe6@y82MgjO5?=}yg*hDXx8L2h|5YjGgp(ib81 znhB9BrQK!zsk9#Y)+99zJp4F!OWc$0DkTUESTw#N>oUnRW1u-$2fzVMNgaL~aRJh= zXW2fDHxn64c{X$#Foe;F-+*0p^HpT3q*2L}jahsWrHh9_LH+lX_S%bp-74y(TpuiZ`K{JM=j2b=RTLF)oaTxr(a)Ywe>{0+3LZ~*J%+j^M= zFCwxyf)53t|K-!$`=(GUa^YmEiZ*BTIFICA`BDoKB1V@`bO^f3J2{0SH$AcaNWEVN z&v-a`SI!O)^jsZjJQG?A)ux@=cw?APP9`tS2fTzXD$*Lv6u$=P*mZE5^Faki~AY z#D(1!k5F*oiP}(f+xGsTJ%hLQ$|vOjsjooX3hp|ey#U@DoULt<;l-^pDV!WU$a%d{ zZN{;`tH^6B26ngQR*M@2o}r(#JQ|%i_>V_?_ktN|SZ8LFO?l8)LcJR?Fa*ObN~8j< z+tNdBr`$2Vd$d~~qMtsMrj!kXF2d@|yYpApt-T8*qh?+|D^JmFmnwrs>yNo%bv-pS zBc`e7xs;=Bs^+=d_UaqkG9vAAILW<)g7EP9Ruf0fE$<9TWYUUZ18->R1_aYJ6lq0?6DcVUrnZ$;i(k z5Y8?+7;)GJtRimh?Ga%yr{M#t(Gn9$yAxm`veLb!iQ9VWq(Fb8>+s0} z;ti)&GX8c@G63#erTwkoaf+w038Yg4Waj!-^T@jmQ}*S z^q(zrT@BCXOCk_ZS90jiT*HNT>!Hce-X`NCf3p&Kk`qu;v5YJ@`)ok{)%1xvQRZ!} zoBZN;?QYW;VoFbnJzUf$pwp~4ds%3lX0o`L z=t0!lz5owA_!=% zJ+G6wo%W6=7`q-)x8sz?40kbmm$IVo0jFvehqMNA2D>WmVup4m4%bLyeb+xzc zbl;u)p`h^u#nR{pagEFGFQABcu7uZf4|=7{d%?0}1EjJ+{ELi@rC@CCL85L5883H5 zuooSTOQy1d+IK3edbH(wcJr^7A0rIOL4|8~>VG5|b`-!a|M%y)|95@X-R1v3`+;Vh zXi%{&PjWrdNQUYbWOZnS-D=?ZpQk_ekUDbJ??Za>yi)YtNe6_etSm?WO}bn3h(3B-7Qi_V zztasF2@K47{v}w04&K1O>PnLR9~|iTx|?oZX)y$(pM_?hzP!7!J6$FXW?b4rWt2B% zA#+{K4Fm8@Yv?)_0I_7Pi3-GUIM;HsIAuzlM#HZr^?!T)*Ar3)&5}p0eBE5Xd-LAmU zYP67eA&QLzaLsF(N5RS){(6=9ePCC#bu~=rcSQ)m!ZhSE@`0zRAKv+fLl|%wP}jW; z^0LY6&+-!$9~!leUO?uFD33n{ss#{siQe88bq%bfA*ym-nJWeLC=|toAtdG9YTF<^ zC2!$|supdcE7$fdlXGnGY38?Yy!h!B_TO+s2@KXc?KPwX(J9iVB1=L0=&Oo6B|CV; zD>|8O=(Ax(_KqU>YZjX7l3mW2Q8K@Z{e31F#3a>Lgz5fp?C-uAy^JZo2vboy8qcg8 z0r)WQGlV4*(7F0cE#%vc(a5z5&C{PUQ0+!r;r64pm9sT#U0FMCu^67d5Ma2R7+?tT zVk1)h&%3>f#ta!k&n7C$);<1#D1LVq-xBiv?$9b&cuDWR$}x0lf*=E40g3>oUk9qa zv;RDFZR6mJ^G)23$6UZv5G>ioTFwc~EFXPJ~v3$SKoxIqdYD ztqD)Y{Dc5}0!nE;-2m91x~>sa{kEZ_lD=hPr}B=cAddDxZqP`#ms$SAlOPmWE65|t?*upR~{xY$U< z_h)^n1*bu2*$D3Jn%OnOI^da=62p1cT_)KDs?O=ws~x@cy+y`3)m1~yG!ur&g`;ME zsEjocx;M6kX=s@T1#8p_4hNTT~vHauVaGfElCDyo+@a;_GVx@9|Mn7 z8ubGM#6>Byt3`zeLI00XzuHuK-HpO*@Tg7VUBguVecV1b=>AzpB&AsUTMt7O!;u?y6*V9iUg+&lGNO!MNe znHL+Ud>wCN?G0>x#K@rJJ`y6IMC231a^p}{7++oF8(2GDBJfO`WdbOvyF+7=m5#I%0`{{ve}rB*%R|GlJ&i{^HrORT zlHfiQbI%oaRys7S*_FZYnV@W75m43YmWr%+wH?qGZHhg`}(Yu}j#KJq+JF~wU{1~y2+3EH{ zI7M!#B#G!we+%*^A62An$5Fl`pz2!y(g=Id09646C(LuHw9isbq3|?zI$)i*)zL1} zk9HNakYwYg4!LAVUClr@Oy@br1R ze4v}^5q!_^!TKfsSrCj)w!opDRd&j5U}+$ZRGD)jHNF2n8IF;KQB31Qh-evg!q0Px z=JhAI&mvdaI5@jVr@|27D*nHy{^yytp#cCEO(h z7=GbkE0Cuobe;|DMZXfG$&mH-aU-WS-SMbiXaZ_l5kNbFMoc5!pkxx(t2UIPfg9~F z1cxoSavZGl%nOBJo9;{WGR`+t#?kV>M1N1y+-M*bDn28|K8&Yushe7GS|Bu?c>xNO zd^ux(a{pFr{7}V4t-6!Gz(rUb?s0y{{Xy)ub(bVY_6%*&rC6d9h!bYzV4ropFk=}9@UG%O4{}@ zkVJ8F&w(A)6C6Do0Yi}5fvq9~U~n4rlSz1jAse9VdDLRgbp+3q?3f zCI6I3YsrzWTn|g|snl^*1Unn%Yn`z;z_PHAY~l!mxj{@;f2V#=A~0Wi(z@xQ3eO^m z)jh?)g=MNMz7AcUA~Xi+wZB3WJNKxE|E>YT*2^@eh%4bx&P8H2e6r<7nl210pjQL4i9o z|H`{Dl`STm1&nvwQr>|mt{aHO4bCBRsTHHc=-&pK7&l`@>e)^V+|^^@{fCGu9|&%m zO^WB52m}yc3Ap{YnOsc0aCsOE`vHT!Iu?Q-I?|>}7sM+-MCAPfjEpb@(XeZvS&{Nj zL@<53107LH=H`R^1fU1_vNC9tu0h%5AWt$Ol&NJuz(ucJZNNub5*H><{`z*zK6?l7 zo|^0Oz->%N)@m|&`V$^r2P^&(_;+tkpjvN85LDZbbMzMIYzPL!VS%v&A(W<44xlXE}~uPts>FP_t~xs(8g48+uta0SgZq zTIoiV_@x`j1*xfihdu9^>jR)ZUldHNJpmQ<*xg5|{y0^4RKgsBVNzSaQ8zS>Z*b;j z_SX4H=sd9f+wi?Uhg+Xt@reCqY7w#~g>U-nh;jIg`gxKZ@ab*R6imMRUT#W2ZVsdN-d?0IMODZcX+(@57Kp6av~5KkP3)#+J$_@c=p zJN~{_x7JllzxXGQ6|*F!fQe8i+Ly@`-mRv;XPp0biffe`e898fU;$_SAE-vWwjDHy zT&r}ufT$=u-rfXA)MY43PS@&F7eA@YhMaPQ0+7a1#*SmDbIMuUx`Xx8Ci(r>I?VI{R2VGag`X2$|D_Df^~SJs-QJvxOiy_Dkqm9=Q;>w^VIje zt<@2c=RwD&s9*HvS>HSTD^g}|3W!q1O|inRX)8;3&JLVF-}*8$l-{RJZyK`L!yjuA zG0mUDh0-BCE`nKUdGdQ|pSEh^tar<=JP5GBW6^1LTU(um6CWO7++I_D?y`!64~cmx zQcXbUaM8t4V+NTV4k<5($v4Mr9?yj1qbR6$e|yy&wBJ|jPLAt8*ydu3Pq9vN>+{ho zCAx#^Eg5TINX;se_#C7H^uH#1T){vMxUQWso)VrL88C@#qim$B%(Gbq1|bmJqf#RS ztE{zo)WqkQW?M=`xl8Bo){JqtsyMD%5e}(jGtUvA!JaoDFbSc_3;=y8TTESbA9eEs>+3+?@Z{ShYn^;$D*(|AET;*l z#dAk@Y%ozQIO+PgO0Er2)!5OeLiBAVfhDMpR(OHzF3Dc>qaHY+BJ$NEIh$_ms)mZ> zg_ExfR!W-|!PVW|;x%5>lqOf-xC)_LM8Ae_N&;^4d)NsnAic#BwJ|91kz!3-HM3Nd z%4bN7<6$n4*1(ydp0+G;yte|$6RJ&qL13NVD2sIVLA&TSh*A&TIaSkDHXLE=bGD

oX) zwJX|4wY74-Jfk0UkkbdGc+b1;Fz2Wr8Vec!t<&&8An*IiK7qIB<)a^|Ri#SRSe zhVkYHd4WoEjXOnO9|cL9koMA*lbwT-4I%+MW~+q8&)Fq4k2`?m;Wuljhzeh|JQ-g; zo_lTT!HWzv_tlGe7c@G5$t-0ZeSdIT>*{mA(dS+GJtiP1b1I)JLP&hxqw&%nhy#UZ z4|8TdQWw!!fvVNhpAZ%1Lb+W1NoZ2jF_$J6>5j@CJx!z7{q&2C+MwX8?H*C(PK>+OUkDW=bas=f2Y zfgn!&4D#Qi8s>fVS>@J2KEg_1uz?>O+X!zFQpfKIo&GMPBuy4pA%#HoHr8 zblIdmQcHWcn|)ARKiuv}Fs`$qyBbpZnp8~Z4W}oJKYAgANC#J${>>aBXvFD<*}O5|(#B`Kk5=fsjrWj?pfcN1;N z{K<}#E4ViD~BENR+p{BqHT`&_I0s zOwi70m-pa~1m)q%EjFCD4DQ~GIF@y1iSk|b{u(#+`a`J6w}TF{W&aM7?Ya)TzWbE@ z)?Y_i{sE=}R`T&8imZwHxY2rs6IDJ9OIAp}?nJ_m`}}XD%3a3nnu2Z_eJ!L;Tq zBWk7}j1Oh8D+8Bgx5oLV;@lc{@J5t9B~!PK(^n}0oa%c(w0E&J8*Y==lZS^kN{we4Lk7xeM?}) zBF*0{X9!YFUEX72V~zZqfE826%4=|2%*LaL{Ey<%CrP4rn(e^JZJUod1OsX1mjewR z`V7}u>L-cF<%ccAdVv>j=w13WV2;$6n44x(OYRDyM@#;OK5M=k(T zG84HB$lC-ER8oKiMuB*}2>MP_C0Si#_EkQF7cK#b`hkRZ-~)=tH0)Mh2nY6~G}6N= z+HJkskAtkuj8Go5m7d!2(16qU6h={@A%v*De@o^N)%|p5wIwKI^L*M0OkO#{w-K`j)b}6$!?Jcl5lVx$Ej6TqF`)f4u zd{C?6*28y;;Me!k=#7<8#S-Emr@Iot_5SdfdVfoFAQTRvxmOC(drKDmX6FwB z^q1@pFUe(uv2-0kxQr3`#8q?(SeAA64I%dsts{+1N=gYTB=SLge4nfrm+M6!1=m$|o9?(luCTJ&NkKK39?Y|e1@{CPz=sLt zeJdMiAA#{P_#MzCBY>j5ZMYLbNTqY5A^krOnRn~7QFRo$V2i#JAP-w*Zv$_DC`7g< z8%ueZ6E*k%JKK;Cvr+8owF#RF^f~~*?VsVEi-#@*pn$f6nK%Mrex4kJs;~Cq_fUGu zbQGcrf?$@;&0_X7fBS!?z%T>@ij@n@Hr&;>mSUC*>+{eO4kOjzh znoG6^)B2|)U;AN*|D)b;M9zVfCoi0!@Fu)X1)8vD&Txt>6OTB1YR51$GO!0UwS~8B znvCo@Ck)xX_ILH_`S177u7&ILSV4h#f`+5rfwX_?M|;rWtCItHf_?tyLjD_z69xC* z(KqF#h_+r-E~ZL3^!ulSg~tGY5TUi&s?xqcVNqyH65VXa6Ppgy{-pVfU`}{j1nqp^ za#ro_i9sj@_Zz1?^gPg@U<{j0C{W}U9UlH_h{-C1geQ21HX&=2uglhGVpVw3`{qt# ztGB@3D&x(P*_Hk4vFF(eI{1|Ag$|f)Qf0&XhyCmS)7+}k!&m prememory0 [label="init" color="blue"] + memory0 -> prememory1 [label="copy/reference" color="blue"] + memory1 -> prememory2 [label="copy/reference" color="blue"] + + edge[color=black] + W -> stepnet0[constraint=false, style=dashed] + W -> stepnet1[constraint=false, style=dashed] + W -> stepnet2[constraint=false, style=dashed] + + memory0 -> stepnet0[style=dashed] + prememory0 -> stepnet0 -> step_output0[style=dashed] + + memory1 -> stepnet1[style=dashed] + prememory1 -> stepnet1 -> step_output1[style=dashed] + + memory2 -> stepnet2[style=dashed] + prememory2 -> stepnet2 -> step_output2[style=dashed] + + input -> step_input0 + input -> step_input1 + input -> step_input2 + + step_input0 -> stepnet0 [style=dashed] + step_input1 -> stepnet1[style=dashed] + step_input2 -> stepnet2[style=dashed] + + step_output0 -> output + step_output1 -> output + step_output2 -> output + + stepnet0 -> stepnet[style=dashed] + stepnet1 -> stepnet[style=dashed] + stepnet2 -> stepnet[style=dashed] + +} diff --git a/doc/design/ops/images/rnn.jpg b/doc/design/ops/images/rnn.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9867e404cf959df0dce6ded5222b466c788fb840 GIT binary patch literal 44320 zcmdqI1ymg0vM)R_l8^vFgA?3c2MxhJFt{ejV1qNbBoGMhK1hPYAi)NQ5L{*m4ub`E zcYitOy>sqA=YIFxZ@sndTHpHi%$ly6-d(%uSJkzC!L4(cilKZ8z>CJZf$Xd>S5gr^k|SG@cL<^CpaHY3t~|H~kow*fumgvQIz@`dYC- z$Sa5^`?wEq0TO`L@BbL_{}0)G;`msixyc3*zIyHO| zax7CxqgSlCXAKj~>?*WF2kUgVz1da-89q5cvVi^PhTN)S?=1I;Wf$=g}EDKJIj$n5Hc}H@HR+~F*cD&p0@>DaQmK{ooRxy zBxVy%lwrC5I<4aE|1>sX?hHpGh6jqC6=#T%uQsH#go;zP+U?ctW7MbHLWENSsocSqH=e$`w9{rU~9iqw;ujPl0mjD%yc;$O@q4};1`bD_z!8+B(_wYrpbR*FNX zWYRPBLTRz!RI|7#cJX+X`ZP%LuviLz%@R(9{L&sU@yo_18qLXeH1L6hl^V;%OuaB*5FVGG0V`D+&ARkB7XN?Y5UK`LMH{J(XX9A_f-x!F>DOsU90h6+V!HDjq3 z7Vryy{AbH|)20q88Lx81NLy|;<*0{(?4{}RmcONia^zZAEUIYI=H$e3lMcv*gy3)w zQxq)~be~Z5uu(behEwUUBg1oNb}knyha!QNt~|}tDcZZ2O--jiC`$O~DH@CJ05&u- z51p*(%*C;9?Uoy|pt82Ps5Hl{U7}B)VzrHlX!JbJVxRTNboZ1J(!|EbM5u2`8x~Ps z&?b{8EI!ukEjCOhVICIXjlhhy`P0_>xP0^S5H*I`Ahdq|$4ZVg_b?CVjjJ?>i&vau z4SVTOhOl;25?WdSW&ZOpZCY>G7ZIWNRUb;i$<1;erwXnjGN~pVA(i&%>6{}JX8gJ> z_Hrfsvmki7*_A>rpg}uPcWAo6dL=g>nK04#KJ!PMl8I3ngrt^zQNKBjmp4sv7#z!8 z%E;1}m_(aerB6dsMOpE#?avHzuk1#B6m1f59Mt{AeJoXR7UK(y=v+}OFtt&pvY+pO zsi6ihT<;wSSUtPSkX~&;MmbY2++TlEXl}sJ_$i+T?D~=1d#SHwdf6zsS7^(gLm+E) zu5+5-=%xBhtk_dr|4A=f$To&ebM}ZAuiCe9N& z@miA0;UyL>`D9l1Y`E2GX{>z8b%zRGGS&Cz#>aq1lGi^b(9X%}gSbZV@=DvhVh_B- zMnuFs+ASVT=CDqXikDO?$urE{E?p4vtDaet9D;~dv`>2%qm=1)QAF&gG9JlQ`cA7~h>|I?&!#yrC$~DJ5DtR+M!V?$z z;^6`GnKlc>o`MZIe9N&`@yX!=kWx4|en}LSvJHF1sz!Ss>0$4e+Q$O>nYlBGl&R|- zm-+giq#XG6%fJfKVGY zx)w9hnT16JKlsS_YL15IjTYJ=(Uz!lCNLp;i7tOR(Z07KN?TQ&lrZOW++akS><<5t-YdEY|zwwMgmxM4qU5>0fX44TT+#yk4h|e?0s$mC}*8MT&`(KO$0JL3Dvq3+^CiZVZ zu1B))% z#beteG8LU>Z`v-wD0Yuf$d;{fUV-O>Erlm}@p=^8*{%?+0F2^{UbK+-JNj}S9b0KJ zC_X{IkUP~0gVg$vNC2y_bWg?iF95(ka|k3ok6R@H8Nj3gBk9G7BUDp3h`8?YiIqkk z4z690*WUq39;f_Oeay`3w*>u3)pvl$?lBF@N~s{HHFj}saYD@>1R`8JDF`ZSV?IP)@@v^dIxAY z?R0pvvBYipW^%}`*nzM}OPGR@!wh7#=-bd83A=(b?BJPMWg=U#1KlA8M>RPR{Mdc+ z$1pkqpAHz40g-SHNb2U69zOOnD(y;Y(v1D(iSme~f4=6F7Ea-5bb5L)AxT3#f|G6( zF`Lm^1FV*6Ff$Nc?leqBuMP-9Njtc~>AIm}`jQxPc`p>@>ul5COCPqcx?Cr4_V6*b z*{Nzj>zv@8g~V=g)wxPh1dhAoH*k-+-r5!mL=O7!yTJov>fJ(ZCwc^Sl6n;_^eP}< zZ!NizskOi(yv0|4MaM?MXWOb&tb@X-mcaxi%RQF9Rwn>78ZRC1qX@)VA5~}+Rr2t* zBXt%qCYAKGLNGD7;lPC6qQv5g4ngz--9p4!ozX1PPG`?tRP!Xq7QAh-sEcNH2<*wo z%!~}EYp67`^96WL&?lQ?G}DIP+@Z;tZn>H)+}q!B_VlJR9Oji%$HCGePyVVCfLQ9w zMeQ4?A0l?7nYM_ju|Q8z{5{hq%>Sdc2%dsRRbj4q|8pV6dP2VWc}GoSe)X5kv3(sh zc{IfYNBG=bk2i8WyQN&j)NNEhjA~LS=zDBDeX331wd_hWPNF?vAd|yy9d92!hAyZe z?S=WY#u<&Y(;(?05c09lLU7^sQ(|frO(?D7QXa*y>8y*uxD@;F^n1JR;FR>rT(`J;SjproGup+~v=y(h{0pn?;Ec9kc;JWx8vc(;F3`-a#WWcpV@jDo!c`Eb-96Q_eUO5ia&1LFmbhTSE-akgzG!?Z4=BA#6mMScf* zPPJ~+NCv!a;t1;1ue=7Cr{e{Z|6Snxz{_L)V(UZP{;9pHy* zSW4=ARgt(J`m-)7K7NWLPJjqF$~bZ*C+__EZxK`cN5s6}xdUWFlT|i+W52l5Ls?a; z&^7KSO6J0!f3Rl225_~9xudy2q&%LV;+~txiKm=(8d*qVg;Cj85n>+()qmI z<0OI+3+5BG?6#)cdoP_4K(a^1JGwGxHU0n>4Hk;_f!^$?UVfES5D9-CwFL-iV{{Z) zACzE!FcAO2K_sQ3M0IC)Ozu3ZNl}06shX}p@aVYnCcg}nSx1b~H?AVPG7O<2E9+gh zwDx|W!9vgCIYiN0kCtBylSDZ;El(JB?(Z$_^>UGE#y67bDA|YmNcPtxEMR6b7CfiD znX7O0leC9LHf-vPb+F^bClFsq>*_Z{$}U;ilF+DCX0&mTq}7kGgN_VpX3Ce>ih>8- zW%gwF4!Zi=bA9t`bg13QNynxiTy`_fq6;S{U^fL(kwn|n&QX7Uh5|(H%vU@;4e#f= zUYMpPUZ%7`W^wjyf28I=PKJMB9*lFPgeK~1H5ul*#_c-;Kf@#gSIdb$C$|&vc!npa zH01O7L{FP98CpDYl<^IHNObH)L>)o;^Cpp+K^3(OG@op7u(kjYHt}Jgr4i3#LVasIK4$5&a|{>WP&p+K)^8|%?f3XMd*3yeX)ET#X~U(gVPlyperziCZRi27{mKBWH1o8_X~TdG%8q3TmmH0@`b zm|&kn@Efrl&5A6i_JW>T=>k2C7i}R8a+x*BI_srhQ6gyqie}oom{omtb3BwbvW*Of zxCcX8u6ju65?IJda|bR8JN4kG%@?CB#Ef7BKX>bwNK*BC)p1hdY|# z)cj&r>G82~9*e5rHmh0#x_PCU(l{Q}oLGEHvd^i8%GOw>gvZKoUx^3B@m~!1O+GG_ zHKiVapA*p2CZ~s+kuWko4`1z5?3P8cGoeW(ubl|{1|~&Z9`nI0G^s&`2iPI8QQ)n2 zA1Tv731_IrLtONY4A5&#W<>0jvA4BmiKL`n?&(#b5p~{Exu}v^2#A#$U#GZg^0c`^ zXsSd>;c@w(`8%kE8J?`HfFg7O_ZQFY?wYsw;3g%*oD;qy_sOnVuU$ z?S=x<;(4MwRw`4lwgWp;_-7N1;zyUR$Q=tAU%CP8&&%D?#}c4};LDX;XJ0tnEh=bS z;6_Ho8?8f@XwL)FZv3W-(X%T~nOdGdfoO2o^%d`DPjn%O)ANu~t*XB1>PpFAV9X4( zJzjA$&M1cNh&$Kn74&G@a3z|wr9tymw{(Nd3{4Z*l!(2RJB3>|-xrytRNNfsf-#r} z%=bgXlvzAT*#3a@@p7?(kb<wX?6@M9H-&LUOolNVg;GmG!e9=Sh$SHuZy?~q z?m%}al6_WGX`s>J^Z9CL#nt^W*MW=%m)Dz#bgbe7ppa6>Ta?RaiyJ??fdY76IqVn( z`hhfJbH=Bm`7qpUK0ZQ}!FmpID67#f9~oP=5so+bSp!wC{{}vZgsPQm*sc@jvdHjo z)fBEG<5Bp`N)d@{KH(8&=l9Z4O<<$zKeYaL3RJlji$2%{&eC;WOWghNZ5HKq%%U0no7G$-ntlZyfYQ(9k z&fLx9UiB)Qhin;n^}1VXi1V6e9|$y3Cf}E0JmOKZJsKMWCt=31=bQ+4GCRe`e3&MR zB`oP?**E$`{4Xl9$Q&5BD#++o0^5mxeWyl2VfS(-U1zYFuV*`)^V@oalTXw; zQ|}kv!ieIN$mr*BLW4zBn)d4WBvx9OQW;^$gBB_?tQJrnj64*&Y*rz5MwFiv}JPCU!AIyz{{K{i?HnSxDj>+yZd&IS8=Xo~A z>vBr3{*$7D`TWDU3YCx+@uVxiwh8Yl(7DFhH1$(@uYoF``d3T{*wTdqa{0}~wLoV7 zxIGd$agnt)+H3{<{x9eNC^5J}%uz1ya;cro(x=)KHIk`#iMs~-A8~L;tN{mCzvIMH zCEf18wyEf*{d1YdLorU$*!wtQ2jjWR#%1L71+rpzg?*D+5Sly@SoCIg?3Ns2x_zoU z;g*jWdvcklKC7T*ERKf{cE-6f+PZvhMjSV*E@$K4MC-3ncI2x(!Ab-`;RXyXX_%Jd z;oxMC&<>dSd6$-zCOQ6STn8}9K*wza3;F2vzv}&8@q}9E1zw15oz)$MANr})o#D37 zlOg!jW}Zr5Z@<)zSvh_7GKpkmaHxu0SWCu~qSi228Xv@oTi929Or8&vL|{Qvs}GA~ z(o>!0XB=7z#UeE(CMqje?B=ztzvZR1D8Wcn!)hf_6oN~X~NiFu8awcz$K#os0WELQ}4jQrqxt<=-D*ZW7@XLJY z4p9Ekm_xESrORJjY%s}RNC?a~PCxtM5BofbJC*Z+$bguj-Y6H#N6drt_#fhBq2frwaw5}kw+k^OwvnrY4XvpYbP-7KL+jxthK?$|98 zZk$7B%d_4z;3{q05fL4p35nZPPk;+$qvY~GD#oH;_bV_4g*1c{^po<3G;yqAJN{w7 zL*i>Gqoh{a^IucygbFfxc77?&s%{hJre6=(W=qEcmmr>9(y#0FXay$h*s$KZCV@os zfafEz8YSl6%#>O7j76!B)C0#d7)t~UfBbA1u^n~m7ON=V_2%UnhA8in#<9>S=CI!T zX2-Q_dbn6hFsC}CpjkFg6ZbUJDY?${Mv^-ouA34i%67T1?*MvzT28f&o&G^Y;aGYJS~NQIf$OKd>LCrb|+ty%o+1f#{d9) z-97XUIpyh*bv6DB6;U8ByPo@-#Whj(KgLV%mD6&fG|FdAl{WQt$$*?`@@0%hIuX>6*LmEYdQPE&fUPXiArwNg2=$nR#RhDwwfX`LqY+~$KI1)zR zx_Ht8(nDgB=Ss6W2c#8V<#-CMOfzNMzN(Z{&>-&s@)$0EnIM}y)KAvfG;+_b*>Tt0 zv}x2gZY+$G!K_gg8wgKnI3fXtKJCnYA4+%N(k+>Nsxn0UO@76a{Ab~}Xvr!JB#h32 z_jg0-k-zxE#w~Zubi&g-n;d;oj@t^?GL_M7h<>l46wvdLX@m<7$52xT?f9T+vWD1F z0B*nF6XDlgKT5|HOkb>Ewq2|0HyUh2zc^;MEwV~vw=OTf5h*6oDN68o(ucI6s4w=` zf-3gWP*=)lL&=$n;ZzULi1f@#icm!;AJ{uoUE08wu%c^qSKx@jGd-uwx!SF?(U!51 zOLzU2ac@Spmf#cBj{GVMTuu^v+er17lV#$O$#F8*8sSC6=TTKBc0{PBaj63XDmljZ z?5KZ9ox}mtU#e%XEVr7Go)ggisDV+4Wy)s1SVnPtm~EBbOo#BdevKu%M6E=dfjus+ zjZ%)GN{wLqHi0a?<9R-3)vDa?Vy)l>X?2rD*kMdqZ>+iYiXPvXf(D^One%Xfi{WQ@SNd+sD!taHfBa<*dZl<1>l&DR>&lc!EC>JnNb~3DOD+lPJ+pL z1@-OO;#)h1)Pk%X15M(8>Bkz{EZFM0g(FN}(4i{AvP0IJ5m@MUI-AdJkyoFmG=p^p zLNv{=p`FFISPKo`kC8OXHsWRDr1L^Wf`?6%R~AvdXF=?^WL4sIc$O=V?Y1+RyU9qm za`q-LbtHv6P4ZpR#c1kFORKyMz6cP+GLw3%*OzZ!e(Gn0)_cz!Gv`g%gv$*LsXNeP z-c-qQia|*NV@|S^Y*pm$50*5@tjX_g=z0hyhj#H{S(ia~oO`qx_+|uZK4oiU&y^aq zuVE;c6;Q~Fy6PUM=cn5fX{8#!Y49_@10<{cV7zqFyPc+Ps}Z-wwR>&)CqDlr_e-JP z4J9DC^kgNUiqCYXUnpspl;4SI2V;|E4)|E(8C{@7vTmMyFJ+tEd+{8GPJmnA_h<7dVrW+n&wP7o$N6_)#U2>BrWywGEs1TKj`rXP*Xt z#jyFSE6)cI2eaOPfv(%Ycpma~>xvx_3EzgLjXXgLI(ZG}@T-;U?#Q<1;ya^gjBg}f z;!Q1(Wc=u0twr{f8C=mxl&swQGRGyF+U6-cIs%*4KRO*B)z9WYipfW3p-0jNd3kk6 ztCCc<-&$*0yOSt90rB;Z9zyAcEdp;H%)??Pg;FCKCHpo8X^1jCHjxy){JX$8@7;r; z7iS_EH5p?`v#&m3LT@0$WlN@doJ>Xnng+A}I$=)sL3XLk0zH*JJ3}O0B|C?OHNv#I zsS`2bcYv^rk%<-->vHoPf}mLUVYJ=oCmvKV>n1M&p zf;@xQJhe-jziqEOq2$>>YiDDYL@Jj0jnoGWw>5Fd+@BRX06-HS*t+rdLgqFck8014 z9%K9>&GUAwMl$5j$h%)d=_5vJStv?+iUP5^>09=!_`fj#b0Nrl5usyT1KDWecJF z0zvY`3(2E!vxnJL2~T_`S(5JhOaRp z21kLuad|;e><*Sa3Tw~CDC`ZAh7S2X!(4~kM~1iv#oa!`tQxH&?K(zlqjRC;)>C4y z!`cRo^Yg%`Y)AgFT59`?cqDtJ`e-1#=aY`}@;gA>>4QtqrvIkj2u7XQ`C-D93+I9T z+Tf~`rvKYt_cH1SVS<0mH-x0?l#(k*tOK$ApdmkxVJxegq5;@cF?XEsT)IuKaDj4> z#nc|+*ib4&B0CZt^-4Vt(a~VR7PdzhRYr8VmMVM}nlbIPOy53$T?dC6Cww`-xPF#B zC*S}!uFx(MZ_xb0;U6Mqq-N?#`C+q1fQWeFX`4ZD+OXcTn7$S{!Z~Q(R!cv3`IA>+ z*#6RPO60ToS+5PEVJ)x^p!=D9%|cUdVqnHPOf+lOJqT(ax}P4w8ns zVZ#wGW=Zki|Flb@ilr@9EpB{sucl=pD9pJ~CpK*uynoRke|voari)Ud;~jL2$ItBu z#ri3%2t(;x8d+UFHOJAN&_aB(%gxUyqBpJVP1%yDnAxe^vYg3iP}QUCS5}c{kLRRj z7CKm5<>^nK`k3-svwR@yaN}J14xq{^F>Nj+P=GXLFMR&`@23JO{-mGs`SeGN!UGBu zze)XpY35@eF+8vctije2v_qt)m8%|^FJoN8keLyrf89cp#gdL;l4OM(VJ+dhw9wEz z!sYG5n5agNCZ5<#XUc5PN*Pxb_8R9cpUMcY!NIIhE2}e&%EP2haCdnpa%5l}7@wCe zm=I;PK`_*tAYFIygj_eWw;Yn@jPEzPlE;BA8!B5s)abi?)~K7yvvU=p@XgC_<<-p7 zd`F3zN*53CL%K!i%GSj?ojg=w_^dlOiRHXpPGgui6DJrl2d=hbZQ-x3UU5F+wOCdC zGRn`g(o*&62S=}zx7*C6eKWRiMB8$XL}?k_g+f;m6LkEY>;}tM22-}{E3e{f9HQa%UebtT^Mrqh>bMt&duvMVC|%k8oH z^Pc*=nkedloF5is6DsV56{yIOgfj0@{WHa_t}>V=f2T!|J+s0~S$|#@w@#kQBoXqo zl(tnnjkv8I1;tjK)y4}FIklI)NxG{@44r!#t-+74moHpz6A4?-P7g6A=epz2A3FY; z)$XZp?J?TJ=R1_pY5L_@r@J~kD6!?6!Ku@P(HI*08rfQyFWg|$&1&iA{`6^=(H<2|f+Kj%MS?Su2h}Db zaf9pKZ_--fAcelqH+wUBwdJFRK0$EQ(AG~y6MR{D2PMD$!)MNsj!3&JDl(~6Vr$uc zL~QOsB#X+?jXz6{pnNhn0xQLQLK)YBnoYCN8ZQ-TBq8JZl=z-T8TK6@Pip1m zC7Zl|^LYZ9!K(GzmG;>G!=!2|6o*1xq0>!zuMKB|ll~WJ*-Z9AQYUymtoH(XLOkmTebuBck|LXSjoCcxuGg9)x z0*gqIam!UwxK>c*bZys^*b+)k5m~nf3sXxw%`%X#iH82d?0PvV5S2Kv*Lqz(*_&qsFfUoRg1 z8kK*i30cqI`J>d5)i%@(?g0J{uk0eXJ^z`C{@I-{?f)jCNG)g6Idgr2H;8Y2)&;6? zPT;;paJPtbL)vQHx(prN>6ED|ZNCleO%l@U0dLPIp`x{?nWn-{gF`^8y(#C2jsa(~ zkf9uHECX)SsYV7iSoRQ1AS8bZE?XMU!aglvAF3!#mLK}$&5M-Zg}z^TUECiZ#N5v_ zq__hpe(vKu){VIXcpoqc)DDaco|YE>pZxkyqka?r?sn@TkB=1Text~bvl!B;*vkmn zjV>~r=@SMuRVZa!-i~wWk=C%#V6~09=Eb`U?&d57eBBEc#@`VnVi|&IaXQ;eI#DP% zL)NoXI(+#ceen+Ppx}2^Z5>UkYZFYkW1^s5uTEn@*O9*4-P$9dg;$1B^OpGRw57=1 z@)Vy`x%iQsm{ou~At!GV+eYH3z#_+nL)2`cZ;wB~>oZX09?Ma)cneBdo$JAch; zgNc2wPZz-IVnN^`^U{##O4c zBMlfV(t*wkW&|OEDtP2;z@vbKyxz!kGXQ^A{M5B~6+1V9ozpn^*f);xM@bHk)N&n+ zjBKlsb@pTKyDvh@+w=`Z;`9XAe;4bsLkBcrh51eaFQ&djSW~U?9hS0Zd%J7mQ!WRq z<%GOuA0K8pNS9G90@K14zs46TeAhrPhQ_;ob>v* zv{OWw$K=gmlf(EPVV7UEyZohg8;MOFS=@?=W*}`EBFhmvJB%xqYS!2OT@Yn>^zn}w z0)UD1(YXWkdf&TYK9M+)unsh&y!KS|txrC$by+*2E|w^C1fXWtkZ?*l?=;p>rb}vk z6%Cu0961|U{{3ECraRi@GuAgc=B;wNtLc#D?C_^OfqmMh3pT@gip;=53;JAR92xAX`AN@B z99P=JWG=4T!uT$pCd~sKayW)h-T%{-1WU!r>PPR2i<vE0K zs1Cze4uGWPi})iXlo0Ji|4%R=l^E%kkvTktZ69P~3(9`EUM^r+}z3A)xV>-U2EQigd&E%*UZ7eEl zf>;K7Ha zV&Kw9sAG1Dc=uQ{Z>x`^;Z4VQ(A%=#b%HYO$j{clD^4i7B(m9Y1%+_C7R%?uoHGy& zjj+PTZkx*_Bjw8|7oTqr>OcHP*8GEl&p#Ps@p4#vfApxFnpJC(z@3rTg;7!)RuKY0 zwwo`#nF-pLtWzXwt{kY5=7~dmlXXNSaPf zQQ;8Qv3JYD+U&BsVL~+6F(K^?zwROfn&RWVEu##44BuH-?1Zly$z8XxR6&Z-3cQ@$ zq~mimh9WuYqgPd>^%4>6&=A(BFF!}rXO~9p7_yhZIkR$OGn0ig%>~QoR{Q$rT?^Hr z?&&d*VqKYQDv92%x|1CG^#SP(eBlGU_1Q)@yPsA=5A~~U!c=}wudy%eXB!@}$bSi7 ze)&iSkKs*19>?DR?czQpB>&&A;07)gbWz}UE8hXs)rL=lYR%Y34HzWM)u^pL2Zt$^ zT#D`VZqS-y;rXmAfk>Cv>xJiOm!9|D0jxiIn|P~|T1~gW=kP_xP}43x->r8dTUVfV zGP4yeS)(PeUWifyB-vgr_9Pe^CmQY=NS`jM#n)FAC=hLcWgzUbjnFpbPl?hmEc0jH z5@Q`U*1Op2RSPF-EYP&xDJ^%0-2oU6#(lLa`6<6lx(UWmDZyD)-|aba4?u1(eJez$ zX(iI8Lv>fpbCc})tI2y{0x&fnBPG^?yOx`yC`k(PAmmDua={nC~xZa(? zDT0kG`Kr5}hU%q{Dj+1}WXjnb(28SjvQASbd-}e(Xo?6p7;$-!l-ni29q~+KI=)28 z?PP_&tZCxd<^f}PPgM>G&bJze9k23|Gj066-kUE6eF}a7_Jj(ts?N#BjuyNTe($7F zRlJKq{;U+hC9gG??(lgA(ld(*XqJ@CdH-tZY$cxvZ?lm|6{(t9M`gk0>}Lf_T599@ zeT8pt)awbNS@{i=bK8by^62EJl%x&PyK#EnC8V51#DbyaC2 zKI7ART2I(ZAl)$ttAbFdA$p-qaYVy``m_kg{!J?dkjl_!DyJ|gEr)2tnEX@O_d!Yh zuQY69o`o<9=Qh$lFh<)08k8xXJ&i)8{ZgJOz1g_>Q>Fg)-+I;M;3s2VVL7bn4{BtICVyP(D|;?^~-bVe3Y)3RclOfNQ+vYtL^R zW8>aN3CpHcB5uH9%p^ul3)stu6-tiqPZ7C|x`zU;D_3rP%F=`MFNPLO2P@cZ&lsHy#t zoc8?(0@bSu)*H={aNwY;E7C#jL{7n`?ogu9%1#BSQ`PcD`%%~XTs3Vdr_K9kl&eq- z4yrC0-~Mgm&eQ?2YIglS+wKtbASL<{DsWW5+_cfm)mKd zioo)ePm|_YrtRi%`z@EKGI}Xj$K)Zabw}ElP|TTKiVjpuDaLuqzh9ibkCBaT3fR{o zdf(}_H{BGf`i<}6#*h%6$A|Q@DDSm+J2)!SKQm-R71zV2d>I-&EWL6EV5)yqYbOfl zy&xl#pVVYeeHfHuXtXJEzjAqs)s~QO$l6u|s$Tek+ zlyzh6SzjjVr-rKoRXPh=c6!d2_OvibNy~sp(e|wtyDGtZGHUYFVIC$3vvd9t8#Y2w zSH5eP4SQgk%2p3#C@g(iItynx&)j#)uM-wa+MA<-tlKYC7w2WK)Rmg+4|JPMfsc}p zApGyQ9}0KIU!LJ`9geOCWhAP*W4=4sp3M|_Xo6?=Lxf+Aix&W}F)`i01Mr?D6~&B- zv6q^1>?`IjpG-f+lMLE3>914HWsf&87i^`N@kr znADerrhZ$|_m~S7dd&hUW$fK=FP8znmhPt`^*s3+t z!47wOIDH!1MBkKuFQi^mU+iUMl=;sX#>%XLsJP*n?Giyl)(T?;?E0$+`F zP2c~9)BS9UG3%-8vje70Yr7XnGrOqy0~0ai!1n$zA9$^wz(_9Vf(J4|F~e-K4eWMPr|$?c;p$aD{oh zM^ydzQ~4sAB%!Dz?g*0~^1JRv5EPZAZmeJttJM*9sHXF_pJPEf9WFf3arMUO+FzrD zqXs!qcq&a06JdzvyC6;ooRYa`;B)C-&9M`#Sv zy?Jq{L;#AJ|L`}%052k!TT$g~9=n+wU(Cojhi2u^OP-8K*(wliup7H@Cy4jWn;R(h zZbF~FEGi3&q?xXYhv^Bb7*etmsp&Uq4yU-aQofByxdTwoDuHe3NwRS3!3*de zfPM}>>cEjHTt-St)XkuqKOLyvp@-H|tQYrmt!8`FWoj&bj%<)qu}lrFt8<%usOp+3 zs<}ZOCtO(V#@@^-vmv%rBrKt;B`6-zs)GQ3YwW)1t8yq6<_$UPi3QcVPW^b64+|-B zLk;USj^p6kpM>Xc)_BZ5e##X4JfDG4A(9?jG|{+zsww2ca=RPm;tf#@4sF=FRmj`& zuo;g_bMh$P5F?2^gN}0z4OAZ~_~hW%G$_6g@+D54oO-GgqY@5|PctC-;g-6fyD#M_ z;XZ!{Xw!+o`MqvRbqClEk`+9x|H%Q(3fb+Rtr!wO=LPik5fuu%5-j^qIq<_yPfG>I z(=HiIsh$<3Oq+?cw0)3ie^dB1-eYF{#TtqWGT@X#R)r*mein5}`uI~FEPc=*rMNjO z+o`>LI;53fm(w=#Q;)_{LX=>mJ>d?JwU&N)>v_GvTQYDSG56~49{>IG#y_rASoG^s zJ|r!Dz*3khKkLgho2zWOUnl>s4EZY)3){-l%~w0ZZ}#>pJ#Bh1ct+$@%*lPTu*hqj zzvgl6_=$dfS>Y$$!q9v<^3slC2_LkT_H>%mlta)`y1ryOOD2O(N4hM$PH@L)pe-V_ z$DHd2RvP?0$ov!N$m`kedpVIU4ygAc^|q=~>uYtNhVxB?mB%ykEHrwTLI%giLRZ0= zAnUD%{!N#usmQyk*TFn{I9SeF)7tAWrf)t)JxTJL*+Y|v<5H4b!Tg*Jb`#B6QjV*8 zRs6oQp}DLAm(xK+?{4Ulwcv=|7tUe!GdK!Lp?)w z(r|@OAz5Vt3V{*tospj1X@7GGKZorQdR~TSQinb9G2Dh9&9CouL{gT`pr^U%s9u9! zfno@^gc?UE(tlDm&?NIpVLDseg`RczhZNdt#{%*PCS7 zWD&fMVcBSvMNFDwXnXP`w@KP(@L~W=+g0gEt#YcxqGE)@&8C z%N=~ zDpV~olag{ZC%<%vJMP^HC{{G87_n=jmC3I(4h2UnK|O`uBj~Xq4Rl2>w#I~D&{QC zBZ0fGUU49g2>h%-;0&W9YnRQaz>iiEpWl0u@!UM<_{GK@ATi*D`eM7)_M{QXc*Y%I z*uWU$phTFlRB$$*`0^^F*1V;S-N3+Z+$X$n8IJqb;LRRWX0dTmp;>0vSjvPE9M8Zk zHE5fYBx59zUDVxFDC+EM6T)UXTI<2JtRD1&NyPrfupq71MFH$E>e`O3XSgrB@0 zz0wM*$0*Wjv7Cs%pB24K@Z4FoWv?d297@(=zL9Q_lsE0>llc2JxZJ?KHpRnT%whiO zh;Yk!5Z0BDap_hbPwal}c@A8AIH=-x*z_f{5Ubew);dkntR@;v_p1Jiu)$vk1Q&*$>Rk8fV*ydz!XpLZD{VTZe_Y#!?5ag#c##mZ> zuL{D6icS6Pzdsa2d(=Ds4CM3KX?%1IMkYXq5D7OPD`5Zqk|2s%#IFgtI6Bp}OMv2; z4cjHu=TUpa%~P#>bgbx#a%OomirC3k|GYos#$WDrPyDc| zC*j!&)Mcdo3<&OodFkfT8MvnsG3u?YqQx$3MrSSVyk;2P&C2HY`~&6J>oO9KG-60q zU}08$1nnWpPnSD9-OEo8elNosxz*n-82Psp`{2WpaCn<1#bcIah z0!1Z6y7$D55aNU|6AK4>uLm38pR>2!jNu~l*Ve9T`z2GY>Kp}k0Igh!Uk}O`1x$7d zXfIA~&dPkM+~((|M%2D6`D)(%E?mP-@HYKV^rdbARJfIQ06QSnVEJAa+idL_!gXX_ zXkJU=M-})+x*_XE`}caxhsPa|)@fC_qphp8enGpNoeaK9Y{9WV$Gdkx#(6ptdwqF( zafqDu_qJ3K4~#qkU9$=}j4J(pi+#)*=N#Z8+kaEN>@VKh^8aa#vTfof&B2>>OdSX)ppP8FxH zs}ojX>2bOtNnkQos#S2R0ElygxTq&Tl~e>s7lK?$vSd%(NYdsyV5m2av#_0PWdU1L zB3sFBqfWfZ#v&0I^BaO~IJpzZ73S0p#HLVA$aiM4oT1f z26uM?3=-Vg3GOnu2A9DH28ZAd!3KvQ0}L>@6Udpo-+9li_x$r~NF9;B-+Kw}I^ z)}ms~)mzbVks5PN2h!D;__Qd z1YQf3y%GEiM~r5G8 zxkN9W^Ev^5*0nV}{@7pvK;$ulw5r=)E5jh-uL+r5?1WzkAk zIf^JfBNEO6(85YuFcHV|iI3fohedOSf4xU+8&dp*zu%_GK5%QB?)(KO_ z2_bL9pk?`^P`~gZ4NVtzm(H}4FfYw{7=?cAI|s6c@)WUv91ipgV-dBS0Em*a`022t zfVt)Lh9vhhZPZJZso3c=PRLcmE`hm14W}q#Tk8XdY}YAI#Q+%RE4@gPku=Fr#iagA zo)TQI$mzSs_kR*&aaIM++Mky&NVr8ZC486Yd>m%5YP|LqVG7$EArA4I^-Q zmVg@u|5^o3gF=pi7FM;7n-`j%``s9^fyCi9SokIEGjJO;GY!w8iX<=4a((n27DswPp;%-k z+M`WMY^?`u9$F$Xpa|-aKaBmO`3^7v@OU(98Rhyu%hHe97>;^_g_+ZG6GOPQ#F+eI zw61=)Npn?YJ#Hf7SB0F6Y+jdTtN>{buyPIy(EaDon}42~`Gg!H{Jl=%X4ciD^RI9bzm%H4=DGlZGh!Xt88>tW~R^5hp)9)W5`37GHr_nezY9%Jbn zRYu6nObrR^6FBRHinAm0-fFtBMs(#kSEL4G5l#|`@f9|rP;MNjs)R|Xqk=2o?dn$Z zB1z)pIN=*aH0;n?Vev(2j5pMS(BX}aQ(baS2Jx8eyQK6Hdu#3wr^ty9!3yoJr7etCIpLl3i-%X3h$c^8zsZAIdI~;IEK(2?_4D@z$hCNhygbsmTdRNvSSP z?NgEZay%J1Hnx3MOLZst?Z-Pjz0RK|^a)4FVVBaT z-^Z(cVt-Vw!j{RbQ|6-MQ>D`Do&&3d2zEUqKMgDazT+CFRT-)=l|-IRM(_83L~Aew z)d;Eba!75c0x25p49+X^GToaFELu<-m#2fxY1v5{bIYRJyF%=bcnvs5wX@%3a;YWB zTHzMhCk#-iN(l4*R}CWOH-9xRhw5R6t@XhaTy2G>ocJjeex6ZebC2y9)g`IvkFBBr za#quRcF;1L0(Fb4rFp+VMd{UZ=pW}-gg&0i`Lm*vpY zt<%>os@exK0PW)IY>1>AhjVX#${g8a|Z=r4iK53!$eM5!*HP~jfGT$zT4Dx$cg{c7G%2S2@$;YYP5{zow0@v(6GaANZ zlutq*BnciMRyKliHzk=3UTU1;mqZv=s(EDKJ&2C}#;D7Cyndo^3-RggLw|$eBF;CS4^A%Q z%}=USf^dWG&=yDAlhPN-KB_nEfqpGgljUK%QeRXb_5a-O!dm>*RfY7EZgb{d=n<6K zkot{*KJd3)eB{xNwdfyLdC*@sqAsf&-~W*D&!A~FVCMDZ{YYXjkzFYc5yC%Hug95j zA3kRRWRP+i3+lD74N)_Ym9W-j+dik**fhnw=<99Y*xZ=7wCTu*tf{8bKa5kg+delG z`-E^gZ^1sIJ~^%Z%xbt{v0>8IRE%`oUxHONIU4eZ{dcync{XE+`VQWa0UE}6_G#rR zKT?G9IKyJoR?yU1jiqnhZK;{#BiR4-9ngPGBO^D-pH>u*5jl>ax25E(7^rK~BSP;JU%zXM4dv(%$VbFVm5ti;)jm!P&S*WXG={hAYLic1dQ{T#C$Rpf6; zu8a`T0%|3*qT`J!QGGzIiI2YudC~?)EH@EQVa?$5?u_VZ%NW)35g+Bxad^nw|Av#k zr`R;*vabswr1{vZ@f*YY=XzP5FW$)Ars-T3+FUX(4{CQ2jlBN$tEQUiLBBQciX!cP z@X7z^Gx-!Iy8ag@cuAc~b$bY(-C{OB$>+wQzcJ)N0FunMgI21l^lPz`mE_LPGL z54ROMYV*}TJ5m9EU!e>MRt4CbFQv}wO_7Ql@1irKXvrGn5@eAdm^DuF?E~EWPLoFE3ZS9`%&SBpJxEZ!Tnx252oAo=G5*g z?rTd>J01(`O4Ut!H$z>yT4N%Y(&3|JNH5`u7#7-|)vFum7rbl+)T^#kyFc{i-ngmo z@f3b|WS4T%c`<+x{m~X8V4PNRC>8Sz?aD0xG}+2WIc>Pz+bUNBjPhC#+!~6M)7Bl& zh6Aj5K(x-ojtxnGRzN|NmF!amnIr%`F@F+(_h$%@myoQRkk!QqowCalLqyi4hC%%1 zrgXWO=3oG4qzhcw;P4=3?kw-#&08a!q@o2+nmDJLc|~;;XGlaW-p_>zGIzG>3?^4L zqcpn@R02Ap5=l!g1t*uGl~zo20w(RPhdvQQ*@9!0F~;;wEVc=jl;P1uaaJUDBo&|T zE12_|^8;+@ByGBr|J15Of)D|px%bCs(c$aat~Fu7eU`Q0v(DzUOPl?m1$)zDD%^sA zJt_8s$RHi3(uc6($J6isgOAAIW#9=K32m=`m_4}Ypq}!bDda~@g-!xc`pJ#UHVNt8 z$Y{S5d;N#NySAN#<3d9o-=tV|u@I*HIwev@nQE7Oe{nGlpMhBUExd^N;TKE+xh=K} zoW9Yza`?;{#mDU(wF}GdSc*C$IIE|u4snc$=}R(cT|Lmwnpb-acxk26z*>m2&S5oT zb9wBs?96}e(AxBa`}3lfZu!ccvG!FilY?RksrN-{C#P9EP$GqTKt@ZPJLN<}O`F%n zK7N+%3uYcL*2Xyh76twMW?PSj2RF5Vk|r2Bdxe`(YiS|c14$7aW+GRMaoR@Gm$ z8fLYUmG-bvrL&$$6V4~G8NOJa>2{ob{h{I|3={>-5(My5Hhtiw|N4?%nK|igQSLjf zBsObSMU5>oF08Y`o#)&O7|-M%*{mq)xMLtjB8qV%!N@jD{`tchHt>*uDa8@s){>AF z+ao&i#y9fJh-l#&{Jt|!ee_(g`|4M!?tx@H#FbXq@hG+JgC55mP)x(kCHpJHLKsdi z$C<-#Z!^V|>bnRtwmKTl_8ToyP2@X&$6M*FEaN)|{f}kk`KrwQk3ijdg2&2L)q5PJ zhpjKOsvR%>Q@*JGC!cvKbS|}n7cZXFZI=^V*eEZv_gFEdLyV=ql$7_+)GMj^vl6jk z(car7>r;I}UTn}S9&m_z21ze*YO*Xn)3rx-ktrVTTx4W=BM9kldCKR5zr)c#?>Y8M z*UZ6&wP;bnab&}DnF;!_OcpP_W1L{&%$ZRYDagCo7aG5_*`8jkH%HHig zde&6opEaGQ|5nsFaKBuSBuvM>?S-)lXO&T_tS4#}T5`0fqo@m-nATo)J|$j}8>hAo+8caDqrE%Ejj&B^*dRB(t<9+{ezA|2m!}%J>9UFxd9R3=qjFJ^BR! zr!m~;aVG^%ma;P(J>Ho9x5P)1ZD0yT?mg2mi2u}+N(iWDsLhx~NgIMwxKj8r5zJ^H zJE2lR>mOcTR-!0v7Tx3+pW#~a&h5`KXR)?-a3}xlY+&jDCsx1F$S$*#Kd)VvRW?E( zSN>$zV%DQQXcK%cSxg+VLFe%B8ll1sK7Zw(r*~QC8kfm3l-~ma%bavLz@Y!}tuCGVm%i zU6msflD4N`3|2HBx3E|;U;+#GZpI&*c&l*L{7`m~_)?>H|0%-{ZDoHk{8ISQDwGWy z7V0Zv#q=mUG9U`Ah2o`(49C|fgj}RYo5B6snY<@yB;_t`6Hz*yQ*esG00$do$yk=E ztH+wG%5H^bS80T_`W5@fmg@D)k_wxi^tdE3__jK+Nv)0U%ex6b9S2e6Qw4=T0hDsQ zzWxj#b(gq;m&R#a@h2n143o}YCg(&8K|*+&y<>-dJTrj^Z^0S9=*Wc;AsY+Wh9jM> z4oucB*t11i(}88#ts~$(HHlAhTxSB{(3*vS{4*)Hn(ST!fd+ zhtmastbWXiz7jFoFjXgTkR0y@` zh56m!B=#rC@VqCbw~#ZYUq9XAFWsDDP9;8G2lK$Kq%ID)9|wZ&S8y11s&?`4(e8RZ zMAydhWlOK%Puxb4Un0LT_H7>zakFXE5mK9{w@2H*F(gYUvmc*H-Tt}%i`(|>v8fn^ zi)!z(e7W!&Bjh*6FZ3aw*o~mkeB_p13YLaM1&43ux6hw05N=XgD~|?UcF{ zIHDU^4 z+@c>MGCD)xVzc1w?S5eUd@kiM` z1Vt7WQoJuL%dpy2DAU`6zjBC38oP8?pP-PZ;h%cvnpp3gL8AWbFJGX882pmqAo`Ic zj`#;tIP@Z>F{}EDlG|?N5R`5bT1p`WROX1xIA}erh-V>G{f)tx&slN_s`_tk5`{ z?B*+d#Oiw?hvzfSwef<4;-Q{?Pd{xe4KamS?$CBd8VQM zBe9-PQv=(AZj-DHoRR|eTnzzPIXvGN4WN5-I~@uD6Hye^M^)sHWZIQjX7{tYB@lI{ zbnQ;8XRq=)YApgD$xmx<+5KbhX_^Ee<6jA;N4_^S*tf9Mwkdh`LNqtGmNt}gE-k*E zfw~Hh1ZCD$YH&R^9JmVp4<6*}&>gpgd*>l*iZ25zC|1C1EAArm$oaG%?9@iD%fr$z zl|F?Ns9yjd*-;rPx6Ue+{Ecyd9~-0XttX?N)_w9oPhq~)0G}B!|I$3V5qpXl+d=Q| z-y)B`FC1dZCI`S0KeA}&zfs;I|4dx6VN6{(Ag45|$zM05kAGVZ-g+jYIbyX;F*90U zJ8lm(EP3BQXLq?LT5N+;PS!KeuFmo1oB0B>r8V>C&-jkh&NQ- zlBI}wsuw_&Q~OoaS|O#*2AbE;6ayU&0Stc; zHWgB`@9}42?{j8UKga;S%8iSA(9{tsagccPu(n8j^I4cW0%jtw3z>7Y)OJg& zpAu+pq!JxwVSGc@l~_k^K`u*YId3$bS|vQ@tduoCi?bU?qt@pbg_8A|YZJ&&*H+TG z2ZXGO6xLJ1tIa%~@GJcdmZpkGPfwTg*ko3f9Ni?^<&v#)cG7Ik9aOKzsP2(0o$@!bxCP-n3Lja(l)_wm z2}CUohg3eR=kY}am#jJZH`0TIJM(tYXPidt8m;;k9O^I;-NO~CW?4`8GD@TH8C85s zD6P(3O{pO%tKabJe`{vZ|Itih=-DE=9<5Ql>4J$;U$jfJ1rqMg_7X6#P(rNu;J4f+ zI!X0q_xLr%4)kr5g0xI;r8l#1tx~V}hrv~2K^sJNA`xpf^?|}X(8iE@fEF(;wo~+8 zHuzNaKHoFOxps?^_S@OOX;}Tsd&LK!pXmdEKr^l5e&B)ZBYYFsv@^IeHz8p|ztGfF zqW~NMFs?FA$)XJ#MtT5aB4#FlF~j8}bH=L}v&iITG-wyvKE}jeFGI^Aj>Ity4?O33 zG)pY~DonYD_moUtAj{AuQX=`AY5tgP!@<}`%(r=+h>$wX5!6YAuz!E=5P}3q8kJ5` zRRXPKTkKs^z0M06EeI6&1)X(bwcEGfuiNEHtNfsButDa~XGDn{8rC^HIk&WD1iLPC zs8R$&aLi@#O#O)56F(&LRFxa4tO}1zB*!y7ihFW8vRf-P`IsNFMVOD2Caf9kcQDZ8 zYL2XBluQ>XH&(R-a zOwJ%Z^UG*$nMG^p#XILpuLaitiQ%pl5_wh`IBA04n{v#tBa7Of@tWL5?5dQW=KJDQ zH=}F5t%yePJoEj>c5T=VkRvOty&>$|O6F)5n^!>G?7dCSGiQ~C zIQlG3{#fJoEw>HUMN?>Ry)02sfbRj4@^z`&Fqn^!XG9a9j|YE3+v3K(UBfkAPhAuO z6ZMOC*DgqFy_U)53Wj?IxS6CDtC_uFDlM(9e#?Vf=(3pNM(A0#bk6ZDicFdUG5u5_ zGB)HI-Kw~Mu0(!%bI$fo*~Qp zJzxzh_&3H5vCfgC&u@(DKv%?Vyg=GwTHlN^{F&ux&2J3E8BPn5n<*&<&?I{W7esgd zpg`B{(pSCj@MH?4UA^W{lk2?55=LiTwl0FRZE0%KMrj}-lHRwr==4j8V9u#S5Rh3g zSEcK|uJoGrHIlRxl?c7QexPxO@(4T*T5X_TnX6LwzSThra*fVzsYj=_-Zp#w((S+J zOuUo%jqwV(&Kete{SH~Pr%YRu*H!|(>++DgXGPN_D|oY$dl3{r&2l3@VrdaDChBlU z_~Kyq2wC}9v#Rd^@=>ZZ%OUP>WvgZBT|GivKBJ+RkVUSM+& zBm&kcLraalKJijw))5*jOo3JfK3OL6gW`uJlVd$(Yyun>@W?! zrg1YH7q0`=ePD{~W-rL(lR2yn+*MBxRFqUO*8@5B*LQP3M-)4IESYPKvxjU+-Urz)$}5H@-1Xhb6u2O z(}Tsad3)=&2-g;)s@4xP#$EP5535pwNje2>DX#X*{bsQINZwhJOsVvHoz^_4;5qxw zF2qr74Ci?9ZWa6L_mFaBw9uu!E!cf49x?kF&k(_@dw@`7eh=*S_xSU`Z zck#MXbK~-38tlRA!0>4Oh5665R$ca#h?zrZFn@`)Wh-A#P)rUXe;g{do}>BH(frF2QI$ z-`7HD&Wa-tiW8owz_D>YDA(9GE#yr313na(c~k2?s-b?EtvmjJeI>%Em^+a!7lWGn z@^r+e-`9UXH>rHENn881+Acs(jtdK%eCw1cuR5!e&e!W}zjx(?bMv*mnr*3YT`V=W zpQ@L>9@kMc%Rr*KBfu&~-8~ooYx20W9asZyn#p;l(}d*(rKx8X=J1UJtMd_dgsL1m zD~|cg_fny@jurYp<+_%$J~47?v97!VgfphJ9V%JGQZ@vA?odr>)l1GYW*$l|FNn6m zPNbudgW8PqXC;ilvsCI=Fwp&I{=)6hMF^Rl_YmPL!Uo+xAW17KYp7L`t?O*2JJWC3 ztudv&3yqiZJg*N1MY2+%Aa*MO^$;U+#vB0^1(f8@8DOK!a_ejKA-6O zp;bUDi_h{$(8KPl56<b_c_vN1J)7j?CQcpCf2X*PW?TKLa$<3fO6aaf>$7hoRR*Nl(%-4Z3`M0Kx)Q` zy_|;V*{WIGnKV{9gC~ZnnRaXM3c9_L9jNk}o!k9DR+}um1tmcw^4zqYt`0^(2G7$D z9Q!DwbMzC%!c5}$n3kr(RoUHFg^k4tMIt&Qiwtj9;!k7sr&68+J7(xD?^WI~tfoC@ zodad4sH%&aO+Y&YrlwF-w5QK}jP)CpI)$i7BSpewP*esUT=jk!mzSyhqx1*Zvn37? zqk_~)9d)Om;-=-=H3|0_7lo}Bg}tv{wdJd^NPVveZ_S)`e>>q0(g^{E?(d8(){cUa zDL;}s4h;dzC9A8jYjZ$>lt;Qcf0WD&faFl0UZXMIX1x4mq;i#A!vSaCv-cSIv54Sy z!Ry#u&BjXvX%?F?YmJ9pK785Zmh+mY_^6gg$1gx5(Il;8u;It`^EA2Qj0R$WMyuaT z?<>$_F}A|bQT&F!KTNW7Cpm(d+*TC+@Y{h@^T%)Cw%Ng$=_}ftbnRx?_*vOz{DSQO zv60qu?$7{f-@6?hC0ZK>xiC;^{pF_c_4yNuoBJa++C(S0f=hV8KLO9Q7m}7$*q%38 zC$X$jQW_oJmo$H>+zJEXq(cErnhvZXojI}w0(mlF^t&wOfb?k_@#oJrbGHvvs@5f_ zYdFON(7-XNJJjRWKZJny+J|B`vx=t;Y;yx(!jsInFLXl!9vtK5)`)xyj=17tGlJMe zm(wDzwblL(P^JZgis(L**`FO321b=vLf2nTf`MncP3aLL-E+*rWA7F(H7M-}5 zmse;!@#-T-_{gwH9JIVO0*oawgLWp%FCoq3FFXZbsI$m?YGgMuw3=%oW%sJGfyC8iQ1rz@gKDl75Hhi#20booBnh z`D)4cB<>`qf><*0v-0f9s0sv7ywZZKd_&bl5k$KEH-mTD2ynMvbgM#y)kz$NEI;C? z)2VpCa)P1uWe!0cId_7GzcGw<((p~A`LZD7N$TqySzj|Uf$1%Pr5(0XKmj}x9lbxN z4E92GuuG$Wl-<)cYr<<;yXq*;DWiCsPg`{%We%i?$`76dY&X?e%^B*3HWseY9!Z82 zN49I~gl|$w(2GETG~V^R=OGO%cz1``M7O~`|4z(#?t&PXHyn!8rZm6mfJx3v(2_KI zZqD)2REOH+@zBxng{M1GEM|^MF?bH;49PguQ1eqBD}oz<+Hb6#8qwuhhBp^RDzi%q zhD$&G3G~lmlPk5f>95eF`TDa<0Q`a1YHo#FIF3_%jNF;&$PuK>(Ic73+GqED9p=)ny-ANu1hU%zhrBVD3m|AhFAe3^052f6KqnJC~Pb<9c};fV7qO3wv?de_%4 zg}K~9f5aMRS@WE6sb3HkvnrXucR7{LW2dRB`Y4{qFaC-&`;z$2tgc`J^|P`X;^!n4 zOiiY1YjOj+N3KdR{i1F5q3A5{vauZs?JymOTDbmVoZba)qOeC*spndQ>*wSC{837F ze2?7zIZ_%YDEkF!)fA~00Bd9$>?UB(U^!$UW3quTd&~-I*sO8&hRmo+ZpQ)7E=fDF z2R*$~?WT-bCm+N#RIVo}(~O)9%A>+(6%Up{5@39X78yt>t>kcUVRQB91|R8)%>&8A z^h5w#fwZOYb4@}jrmSuZT*~Xif<x4W(EjhO6IG=&OY^E)jp)sFPf$q-86gG8mxtLXA7GJ}VSx;2O(Qmk}Z71xOE@>g)0ga4e~?QqE07}{*IK6(Fd zar)oazgr~kVXMeI`pAV%wliHNtw{kL6SV+b{eO#L6jkR2y~c>4MfG~jXR<I3d^jQSTbj{Wz)9E4-5&7(Llo%?O_e&+ zK07o$a68@|!<$w+aeKQv+^coj@vEt9EOC9?jf^A*a0EuJOWmpm#zqt_N)OHclYUP> z-AbGRs@p&9$Ewylb5N?VLa%pVbCJ>+n-T5i%86+zsvr|Jn}sm#D>{K~|DFg!n&en^ z7NXVeldSj;qtjrY^mq*bdAl9`RAq=?$z-}DD0qN7qZ(WiPa&!Rfw|9(Jm;YjR3}8k z0fY^!XcF}cDkZ3o{F%gG`a$BmTwSP|@bhnTJ#Pg|eCW)Jix}s{w7_wZd%X^OA|5*0 zwz8r?C32v9eF>W_GiL9O2Z2(-zLZsuK_TvX+b z=;$mjDNw&o$T)F^QHRY5TB(KMM+izn3>jS7hl?+3=02RHYv4# zQJ*4yjat`aQ$>2+SlW_7h;fdHmE7P$CQQ56koZhphJER%dTT*n;N6nqIk%}*-L!~G z5;u_{`NJVerLzG;|C)$yzCE?+1ch$lUR06q#MXSuDAcB$JN$WQ0@xy#gE-uNi+V}*EVE^IEyh3S0yh2I*7?50PWoR_I&oj91MuEDwk;`CWMXb{ z30Qxgn6!5v+8|8hdbyJuIen3~+7k(Wn;z|pt6w90PT??|mhCAYb4*B8v^pw;^2Gx4 zr$m=29VqWe@ww#)hU%}l#FLi{6G`ToIWm^J%d77K>A8&-NanUwI?d)JnLVOaE9Aa- z^RK&R>uxCx<%R^u%1t%joDVZUA~>H_=1K~+I4|B)3N@`*RSI)XSq)h|8n$AJTfx5>QOpqQc? zZlB-^H>8@`$`E{g@=lpbQE5b6_t}KPT)Uqg-j@T3U~&2RRn ztZ*y(tFWPYov1h>-e_GTUEA4&(vt06*Zev6YdzppkrC~-%wxU&hywT(y_&)0YD16Q zzqDX@KqmkIMx4TMH;~WC|W=8Zt z8?H5sfM zBmc7&to~mV{d4|1Esszhd>r+m9ThI-<;eM2UQ~a?`uUAkWVG_V5y|mX+t)wS~(R zW%sc zC!=22PlJ-#K?co(<733Rtg}j6g1W*J22wy4lLOnQw`{9Duc{*>Dy&3gl>7Pyh5a*g zy*u`AO}1CFUsRS=97-TdwgoXOWJ=C>t8+^773=}&_aMz^v7M@A-b_g6mms37@K=ao zkn$9>azS5r+(dm_oufB%{6LhHH9gcyh%@b&dX_f0^+}*@SoF0To zN>uXhTB`QFc#3q1MzaQ~nz+3sSuyJ=N<^0?Md^H4b)OmA7me}(m)JPfx;ZNRR%sJn z)(ykSA@Hr()EXPYf6>O~eX1H*;DnzG#EUNW!So6-{@ayg9S4-rBDeupAvo#+i z4r%Uu=V$SA@sC}wpSq`Q-!-b2YuNPy$c2dQI(p3dU6Ne2hrO^l+B~Kb^$CkFk#M`{ z4Su77Ro(HreHJ5eNn;4-U%gaVYCmU1>&!P z05Bz9t|(a}bEuZ4Gc`?Ca3R%ry}SsvwF9tJ-znjLmsrL=STDmN>&dv;KfT<2?=i!| zcX7DeHn~yTR$BF{ijJ?mPA7kETM~0m!{Y7bLV4Mk@?35O11#Kg=mRtarR%Q!Yk1FC zwA^Eer~-1G^L%=}oo?)Bq2WOKO5WB&y(R9t6IZ9jEw;vZ&!fMX58kFq^f3P%U_oyE zMPtJuZj5WEZdqIZUc?T)GirbxY zii+A=5p^7~e9Uzv>_HoBLocaGKY{lRW^pg4d2P*V?9BIMgf(S+#}E^!3E9;WH&nR@l(906h{}KRyTFQtsP5 zGHe_r@831*&>WjjWo{tWn9I$4uOvx4X7pd~$Xe20iIE#a{Zo+RV3v5@vGnihv1kH- z0<+ph7hA)%L1|-#{hbSMQG+3GbEtoF;PL9ocxJ&rU zyr{0-(5DHv^<@|{YxZGD&a|JV-9YzHHPnWXm*Os3!qJxKwijeAL*D%PbtYSm)C?;L zPxPJMcWmeGRZ@X@fAcZw50=%kGrLV|;BSrq-*$#3=YltkT<>t0sru#GgJL+4nQS6x z=(y&~^rG9W^T@J|P?2emtzG6h7P8yhvU4E9qG8-uDLx(Xt{CF;U z0gZkGg>H|OEYh08)aj2N3x@|5Cg!aM270=5!7!#e?zZYHbn~v1_B*7pK-Ae9O64kL zW{2bgIE5XgD4d%Hz2&|LS9*69_7Nh{9KCP-k22-2h zB0hg^9sf$hd2#aptdjrw^UY-mobs_%;Js0+t zQx2qKTk}m)cZaq+~#0 zk-OP^%wp;zf#hU=mx@z0IZW9P+8SmKoGK?5)t9cWZfckMC}q|&K)8!4q$aFQj!h78 z`&w2QTUK`pPRO_#8mu8}irRYr9lhBNZzVMi6l@?lnKN1Mx*40f=QFj&lW#c=B%j`a zV2@q8E1jm?0knA##4*#O{ztM-Dy|PpD^pv33i_>FTpH|thrl30iWJwu=bL{I zL);mkO?C7H@arY3dDZpzpG9Qs&R2>^&9eYf1pH@@Uz1_-5Ms z454B<_I}nEMjpXtiz8xW1FB>q5`D2Q4t$OQ+PdKzDu-Lhz=`HFv=8p(RBI zYGHdP1GOfLag2VNFv^)MVovnRuFt(^3fAEukIbONo+tpKRVM1dXnoK=bEVxaraXm> zS4aLWLwG0%LFrn9Aa~x>n$BvnIJYH6(O8sItWNzp-Ug>!k(8S0B&iS@1X@@))NZUZZL-cp{|A_<=A!Zq z<5M7C-@d#Iy$`i)L&tE$!KtKqHtvl_j_IPrz}bh&O|0pu0j zpw}7zRCyQcJ0i2-Zuc@Lrq_bkJp;^YnI#%&<63$@_19#qF(*l!j)59k!$v&u{@%oK z%5igj0L@Ijo0HGLSYIq*ugIs8`DV&w>&hd{628bYz7R)}y0MJ0Lb5AXe3^)qZ9Uk< z(a~MwTs`TkUA1Q=L1RGv`=Z0Bk%8gV>UH~C&l@ISwy&$T1qX+bklG^m&MiXx(M(rA zPP0%k&pYKb@(s!haw5dS3=QQi2(c`Bs~)Qe+9JDs(<4aCNKty*H4NY-{f)7Jb(wZu zTub4XAR?U$XU`!Vryy08WPO)`#+oZC&}25Ue4E&k93b%5)8NvB45AsFN9*B>U zv|XLdlOBUDR~#AZnQs$zzBkAKFLDiH%l5e`jV)R8y=d+`Zv&3Cd>u?2DB*)8+~2B* zxf~HgSQ3KukK;Ih>%O__hP}`O=6&{%F^=^)>v@-nWR-`AU2^)iG`)K)Rw%{`wS=V z2P9;L@hkTfn`sfkk$={{i*#k%U-9PYKWAj~z8A*;{Ual<@qPPFx3w%@g5{#iiK#Bp z&|-(ExWwXa%>pY3|H|?eqog&tjfU)If=7_F*W0o$*_nmS5 zpY~Txfa((O!|}=BrRj$i$ms>D78bJ5y6me_NTfDa;a8(^SmbflYc|Y8tTs-+I`%$O zJJ^k*HFm2K2VdJGg>J$@M%jQ~ZuoLt)7e;caP6Is5*So_Az5_xDwbjL0;gOEbLmG| z{Cu6SJ==1dP>dsR8lf_0Rin8qG-k;{*2^cJ4(Kh$ltTc#BewV?hx#Rgx3CI)mZMs0 zEI$3lV56R|t4ds%>ZCy#sM>?{a?_^$0&^X8ttH^i@XWI&l5=HcP7(k|&+zQFz6H;8 zM9P+kjN{2MW8JJ$i0G3-lR*&T0EIbfAideE*|7#)AL95Os@&Gi#x^>QP+9aeG16CS>JSuC?3*v)`mb! z-Wi%|EZegUEqVkb$#X2T&_d}oSPx&zd3OsB7VDZNTAK$ZW=<+t`t=`B4BA78gWvr7 z4gYn0M&MuW?YVSi4A|xyMUxVZA@qt=UC@|&;)3OKeVyo9Qt@<3Rhjk!#Cyb{sZNjn zvaRe2QdfP)P{&G|hR9Bm9f`$s*Ce2KP#k>z;gn^Q)Wy#Gn1)8n``+@ASh7XiQVW^u z)zNAFcjaSiJI%+O+O#?NEciom@Xl12iggIpqkiGF0?Ejx-#Pt4!F6P`##F)4x847k zBZf9~#4AFgZQHQ;Y8UA4H2O>TY@KJXyje03&YuR_@#Go`GN>9gL3!R&rVO>4bcBOj zg!zo9@?6pa2o0>jVJ-*w0$&=ri6SE61m7P={>H!-%I(}53G#It_d~t)|`zSE$Cznh-OzSV|xFM4*E}!o~)uXvRe6T4@2|bsg)EVJZa(OHg zLK+a|B?qPH!|BT*fD*OHY_id4X7>5bKy@)hg_(|;Yed_V0JGWNi(_8DV#guHu%bYo z?##9AKe>Q@U2NN?izDcI=AE0?5oU70xzXZWn8v7Tn*J#r=+*m)0ev!g6+tr;ZQI98 z9l9{%cb`+pr9SXM16M}{Ql0YAUB3ybHgxbJxCn@w+`V7UCF>~+#W!5^6@i}V;7Y_0 zF}X)0m)NV@c! z#~6;6uZo7rL7U3=>cl>x*rlQ?6%w;bZsme`K=IU;?d2ZyT@dr?Y>2B}#bR_3rHE_P zfWbtjZ^rEF|KMMB&8LQyKr#Okc^~)WqqW}{I=_UoqEiIKnUa3Na%cQyOOU79&8&bnqC>&LJsSI}H>gS~ zbE+2OkKH;%?sb481Y@GMbRHw!1iL;;W+*$i>Jrt>IgQN!g9H|{c!}} z%zF1Qv?0lEtz~hjF$e73?8mmXTvOX8%o)-} zH#K?A4s>Uh&Fg-MgVSDfQ48v!;LOr`jRgVHQeLp|bznJ36N7L-mp>3tU?q9l0NV23XH~j`zwA!`$i2Crj z>_=9{IDTo~h@_Xd&SJjPiCWISjTHY+RrsS9 zY}%&*K?Ei?kI8#MR&7+2Ki!&=mcD)e50%0A3r{kzE9nJH>W8n;Re$vdf5`{`)vccY zRoiz5HNACfW5oh0QUwGFp*LxX^xi}79qGOIs;Kl{r7MIEA{|0IsPqy@fY1U61Pl<6 zF4ZsR%=ezrbKkk&d*_?`SJuwVUh`WkYxc_C`}sYO#Gd;1u6=s)%OBa-@OV0b(yK3i zn@7ZeH1BD9_md!txaLBp;5sTd;2q0X$xD@)p9EO-CW-RXkJMIv>w^?Ff1v|^nR|t>^~VfjXBGkyIteM1szVHa`d@0%#SD2EP}XaD%}ODg^!<`u1) z*v%g)`{098nKP#8rNwJ`?HQl4lC%H-J;P@+Z0U@Q=8C2FiBf$q7`1_l4@xv-ubNqb z4Jm6Xx1Y%A$?>z--toC*o;7F@=lXna4vzNg0%d|7F4QOQI>(bbtEG0RIv~_44nqX3 z+}B)+3^or7rGv0m^PJ1Af)NAF{Ka_`g1R}tIv^jOMI~PFFm7@_WVgIMWU~-=ft6j* zI;|zt7~e;YXjjv2DGJA>XY`S4R`$)N#-^&y8zFoh%1SsBBL<&Twld@5YAHR@Oq!(p zP~w8g%zfM)0j8w`(>K+-!qM}^nzQc28>qKyKQwwI_kC7@=(Wc-n%z3=ciHvX$RQ+y zteo0~ijV$=9pkfhUG(JiQju=@CxO(r#W{-B8{f|k_XBIl&UlGMzIr8sL||rrbk$XU zQ*329%zJLGJn7}#E!3LZ;X`V!Cs$s~hi-0xl>^^r^4S+)#|_PuZK?9k0-9J}ie}j- z0w)#DXtJ>)SXHfI1dbXP5?h(8Vb70r_axax_zZr5R@G+KssQRSp5@tsx`jfq-Z7)8 zo*55Bhap}X*cvA496bE9d`#rNnh$xzPDhnOxq}@H4r%Ss1)1hm-F4#pLVF_uOn?e!maZ%Y5XsZO_HyfwU#jUj9uCQcCOq$V3EQCk!Q8$waz)|} z;hq^BuV@ldi!Xo|NSZEjjbzm!47}jL&3K|EbgoPoW?Xp0|H!c8xq3171`cv$YDT$J zn%az9FRC(ET&qJ@+j{tMo3bJ~$Ko{=pw1?@5#FeYfi7bnhXSKbJL4y(NGG$)_*gbV z0WK$9H#)V~5JFZlL~{@o?3+3hJdPD84DluIGF3BIk%9F8ZwlWjKZ2Oc@F!{W2q0Z{T3 z-))Y#w{LXirx`vrR~5IrL*eb5b|Em-VKp_j)+OOGyi&4!`a_f5V#iN{X#I_`9Tr>D z%k%QvXY*c!EEkMPT}7wz+!Bf!4wNy%qBzn%JLdN;7WO%f$9hc*XE%9eXt%A76qH6( z4w!pvPjv6TdX}QRQ(xMT)ehdP3@K z)H)@Z-|sR9mG3WXUW^tHrE*QDY&4~yIz@eMPYZQPYkfuhX8d$5srY6Dn)yM5-%zz4f9|_Fo*sW@j2(GI*^GIhEnaFH*2T>1 z=NO|%wOFF>FMo;V2bN+RIXndhlp-(;%xJ9Uq8}-~&kjkDO%loK%Dg?nv)XE@PiR%N zafD9iBhObV6Vwqu3CK8NybI(hQ7NZsXQ7HROK*$`%hI0w8W@#KNms0&)K3CF8uQag z@!R-&S4s6xFV8dU@r0QL&SBbz76QFH@HEkH(V^cGV-f?V(Sn+jVSVuUY>FD&nznsu zdWW4hQM0}TZyk4tMFO1@SAm=4$;sPZV4y@Xmiv~FD~rHgzmpltHF z$D{zxWOH!O8zt)aID{`5f3VJFTRr}q%4pS$S(=h6$+> zDj;dP`fIp1?1R)*eI&?E(%+xBj-M~qouUdrHSAmiV*1q$oK_sZ7w^D{O$x?eeTf-O z#ge3Y*2Hu-bZBj$wjs-x_XYr~xV?+4sRjo@H0J!WyYWoL)-{aHyUL5A8MB?Uc4;xQ z{phn#)9wb&eLcTAkzfVxB8X)!!xwK?5ddYnH;7Kw`kmI$#H1KRoa8#m{QT{~FA_89G0 zoIEu>c;Ne^5^|K=IpqcL#Nni;dUjt6w&=Vj%)lJED(xWouJsSl6|v;bZ)cmDt|~50 zMhlGjquWDkpX}_1b&EIo2c?Q>JnhKpzYt-7kM-4JhL~kQ-8=ROp9C%7zPkDQpv_h% zQ^v}?jPZUG^BP|lLT#L{jm89?UITVRae3CKSn38iFVK8}GnpJTer6hbY8OgZ6ACw9coa#2sx0u8U zVOE@6Ob47@s4#Urw@q@Zs8!D0Bb)~dRsZ-k@c*#6es1@H9q!x-mb^4;j5MFK@F7a{ zXI39fq2(+>MDqI1{$W}#vZFh7b!jaR$rV>d2>>p%r=fE9n zdAI76x{P;-UC>9_;6ztP*p#0z1unjtN36gPMGD~-{z;(dC0^eqxm77hz3WwzHSZld zICIajwqZJOO@Fv{*<~MeL&JssQ6#(GGl*Ka_4Qwa`P0TL;F0|#=#_|u_Yfe?1L|!~(*w!-2oJ>TVh-+VddgT~A1^9HB2GI!%ivz^sASx6d9v;qf3i7>uAi1<)?;yM!NGb(q&V%R%+f?sVRbVQc zVisxlZTwq&f4m&|^}+tL-46&Ay3Eae?hq;(=E?FjRQPh`*sYIn`L52TKSuEdyel+p71* zcNq`q(zHea&(Gw)fA6Qi35Ja=P3afg4m>EJ>*~@)d$@kP<)iC z7f%f>%2YFHDN+NX+q@sL#nr9rtg_uQvF_!qjk6LOy&||yXV3P-*g>D2GM;T(WJGyH zC0jxb$MPkzNpcXy1nKEfF(`TUsc%?NPFqkoic^a%Jdij|J&s0*?PUp==KFVp^GAQ@ z;cr90<2X^xJ!#t@jA7e~s}*~T-Kc@$;p;|WCfPyEu(O@%PSa=ihVH8tYF6$wwS|26(i=O>K|g zNH$J@WbgI9NQ;P!sbr|gSD^dMWcg8X#>u79gS9W))<`S%9^VFAahKogcIxgBeC)Op7USB>{s~j7!WXZ{n)+s|R zsVV|5zVyF+n+J4ufAOE@<_h_x0nDu>z%SdE4!NvKUwtGOtGy{waxgu0F~Y>^2n2OZ zM#N+^AT|XyygwVZWlW@3bBOMVW*G@HkV15$n2glN}hP%+o@-;vWi4j#u04fRvB6v~FpxPt73Kb((r#6b~n(@<~PIC={C` z-x#h=Kpd*nc{wcqkxc>u!jk;gFKXpEDD9#m^CC{8PRLi*i)NI1Lv(<#hGnIL^~ti> z5E&)=BksA%l+z^l9+ulwF;JN$cE&-^Y**vuXLC~!Y523Xh54z`)5v(JHPouM3{rlmWU*{pXP}Q_yfg)|H9*(zIMa+cMX7JmR-g{ZYCaYQJ%dh7DrKyR*sO%8k) z7U{SH1e#Q8Oh0<68n#=rG>T=pD2-%S?C3-U1T174a}|=XUyQV|=M}F|qdma)GV6^m zBY2TIPL5qUZY}}{+h>Z2Vb?BLo4MtkyY4hAhfcH|+$TE>T|xPz$ZFBEm?o~$*ktQF zY6Jp~KUy0%#6(o$(PkQPMjtwvf%O*Q4kUD2vFx^9u3|Mhv5J^EEf^zht6St?v+lC< zUY(qGr4E@Kd2Vv2>ZXV%=hs%EU@L?+r}Vl6zty_bz?CVkV>c(s_qo+{O z^o&IjN~4#F>=E6hBG~9sMUcHo0aGtu;hSEsf~5P1BDV_jjme=AilJ*>#l~N)oYOf% zj0|62SRzfb?9*NgMzo5N!K&?^nDu=?e+I|v>f-6DNKIiefcN(-egnG$Xo3US-fa;d zHwW8z#*m`6cjM*n)wn*FD6i(Dwv!s)IrtVNfRc3KaNZKuos(m}SzvEl6u7585#&(Y z9$x`WVUq{|ezFvT^xY+?Nxq{D1DKTeok&Qw@KfzS8S>+v ztLGaaN8lc@j#DORx}^rW=flo z?nC{o`t%0*Y)IdgC~^9P)%2FV%lutj2x^Y&SjT-RFLtOyvNF&lP8^0vzqrc+)}2TJ z-ECO$b;c}=Ouf}!S}rV#+x9?~v9;=R(#ZB!{n+e#9HPbPukWIUwY}~=zmo%Y#(n0i zO3r&SzL79)i?RIyu4dlk?B0DSR2yDqt3nnrswFX4lydF8vrBQ6_o2fi(q+q$t{VS_ z+=WezmGS05%#gYRxk`h7jSC^ zVp`aRd5@#=Kfp1kmq{4Im_>RvyV)^-3~4a05< zD|RBU5q<$VRkSy**%=8=s8)iEkQMP6^}+HDSSFTAo=SM0fJr&)H{^S(b zJxg05#RJoq%kYG=06JH$7};EpOnsdFc$;lMb2ckrJ{l;U^tSg&(AaNr{dWUeS%Z34%u4Up?7MBr z{=Bm^V`yq*pK}IHvq(=rQt=7pKEWQIuSDMyOEH4*-k&*bW-H91 z^f_1^v=344;VBQ34j53{7!6^9m(p_*gsbso&Fnv}$7I$w-9)9p979xFD|2JXRSWHn z^NtoSbB416BDynQp#)=S^I`%%4TJh*%&B3CHPQphhSrX&$J(c_6(>a?->dwI@uGCM z?eKfh1FyxO1V0Is7TLOf5|k_#-3iE=*&PpM2)cJjdVN`L;q!V=8TB6bie_5PG_^^f z!Pu#S83rxA`v9R_?ylC|XsHyT#hI=2G|;)pdxj9GORE~t_cqy-vf^ulSqc)S+@dpN zhJ@9aZL|OiwMhV7gy{Y?4KCgUHiUmFv4t@nH>j>7v zcKD}Sr+R;nzw@4&1o?X4RSQ5?7sB#t{TVYE=s2gwQ68kbq zMFBgg9{ZvS)_yhKzXqK&BFZz3U6VH}ZRo zdXOxeUc-7vB#$ZU$=4jP5>Z=VEvc%0f@d;sI+`jPpv@YVHS&5WZXFZ;f_bFPypo?8 zXY}FFs?D>)CCa=!&|1qa()D|WQl?6fBA6Ls6}Ts>sIK=^>pc9XsJXE((8jzYXx|%0 z!#rE)O4qkImQ>}VF60~%?NdA6T5&?zFFflR37M!cu1aY*XDQnn9j}p=78B8}LTGL0 z=K>YbTk{=<6LQz9md$PLMhFakn8GZ*@_299)j8r!`F;$#mqdA+H#$ zG8#GIR#%rdEBf*$Qn(gMX_pSSDrP9x*sTwczasRPFC%0OW zy`2rMkHUa?WHzTV6(F=!`Kyl8Qx8yaS%d6A-!G6+NH7U$bWPy(;bvEKjyJV{6h$?4 zVJcp*7b;8B?E3Of3hR>?xo%C_X#9z`N*q}+p8h5Hjlz>(isOH5-i-E;pd8+#11F-J zdn(--TVRBtJHAhM90jwPJhS&hU1?m=`OdZ)LZ+Ch3Ei#a-B?u>Meh7Zx{pe4lbcBe zP{5gO?cOeoUll*wiTCAk*7fo2dV`)M)|Nq?b*Z?OPYZQ$urxg0qT!C6<~;*cYUL%# zclKoJdX8nTIJ?V;tkkivNKHh|G= zfCOd}ye-Z)UxcxuD%-sKm|{cBw~(z&CjrwPot}6t|F_7m$@=fduC?kdBK5Mkb{%IS zFjA;rrc|%Ce%X}$p{oCf&Gt+3)(BpPx@8#)gSRptE4z!qyieXM(df!1duaS5Fl39| z=jVq{fLkPgb4)-mPXD;wT(J9m2C|MF*>zmpW+zgon?-^(t2SHos?)P_Keln+pr}kw zP4qqLw$4vZ4P#*7;Q}0KENU_xa1|Lm{fh$KCf;q7vk|3y09Rmi@?k%QS~PId%of(N z3A$)A$GvQmYH*)2sF@ILg(`Vz%H8p%`PyVP6rSbg#SA36JDqFSC|uyYmwv<%G|nop z4O!R~d#s1@nx%7kZn;?ho()V*q%!1^UUK+i6fpg1vHETwpeI&NBlIize5-#pJtQC+ zY5oZ)@K*ee39Drmgsep<$|Na5oHx-hMuv8V!K+}Y7vj7^e>!tXa&7n@@&E4*34hM~ EA4_WE=l}o! literal 0 HcmV?d00001 diff --git a/doc/design/ops/images/rnn.png b/doc/design/ops/images/rnn.png new file mode 100644 index 0000000000000000000000000000000000000000..e139e373fe8396782044cfd936fdde624f8c66fe GIT binary patch literal 185148 zcmeEuc{tSV|1TmMiHcO#_7IAYU0PK37(0`MvS(k${z#?JQ&QGspE1VH*ajuSShCMx z2xB+)!C1~c&pF>GJ?DFVzjOXP*Y*9U=V`94na|vF-|zQpd*3f~wA9!jCn0onbZmF; z+`Lam#~e&Yx9>H}e(=ik{9{t!KlE<*)l}(nTY0DG=v3(L-n_2wO+Pz$pi+N%w_#CR z_`Yt_ePPj}KH<}Wv6;tJwoNRHa=3mu&LS8@$M9eOA#^HF z`yMY7cDzGqD8woch0b%d;vh391*~-ndRj&&Z+jU;P2zknKNj_-|G~=>LN? z)VUZI8TQ+c9L&oC-&$-izy@sdD!k47U*AimlBp-x+}^tB-3yA1k@DaBOy%VP4u;IW z1LH1xHc4Z4O`heKoJZgPl=YWuyYW9aPw;WbCG+yG&D9z1FH!} z{Iz{m7=uZ;c)cf_wc+9a^NsJM1<83_{_R8P+!!YGBPUM%x98*TMeqrKTnIB){jc`% zU+?WI4F0K}Y*P5IXZb(;-`NzpQ!Yo6I6V*icL#){F&q3-t#NPuznAU*@ScLve=z(} zbJzQR-?_in=dX7Ut~{0JyK3h*ot>GZm#Ni{A3>Rx;>(_n&dyhmTYHuz#1cB^Y|Yq} z&-}g^Y4251y5x8x_y6M=gTJy|Y)q80e?RC)t}^(f671l(f|DT@B`NxuO#Mhl1@7+j z`#R+<3^^zpF~bi3e*W+41>GC&Qb6-^{r-Ja5}00ci#yE`N=x;$WX};y%a@u&=F;A3 zHLWezCFjWy*kSC%e?DLmSeofXy-1edpHMKEAq>LRy4CNbX^BJrfura9{ye9i zqZ$V3NxOS=SE_L03D4W}ngwxE_9pStjz2^T90}6#O8(`zWMv|ALo6%+6C+|MHYm}t z1-ABnuxbi9c}bX~pgib$hGIHpwTW_;E)abHYTDUoN|Kq1>!^^?F@+ zd>AWt$^2ji0UWEj4_1-+h88~O2K7puiB7n9nNsR}MceHten)O|_cuagY|lcBnGoQS zu&eWf1kNJitI*2{i79h3_B}0P)$0!{=1K?-vwitfYk^?1fVvFKu<(l#w0iUt#<9pl)JU#`)*Ho_>sIhl&GZhk*> z(UHFTAC0EZo?dmh>ii?7f3&{riwoevOwkrqaO?R1>YOdx$*T+9`3RPzYfnF;({Qt8 zUbB6cv1YC>&_Rxa<;8riji)?kP^Fu67`KVmwbfo&rlKP*PDe zH`|nV*D4HzLTf0O2$*(&kpg~u^zjW7kWsp(RXEX zWS>JAK2gr2Kuy5EE4&7h&c(01G4^)t`-SA2ZWk|w{$3Y2!ocC+ymKk!_m`u089Cc` zz&EM3e(JK4ndljZo`!z-*sT+91?@@loz9**rt4?TgaD5R$8R_A`{h8<7K6KHpH%2) zdQkDZ7LM^!_)=%`KH`@{@-5`%S0^O5wu$iN*Sja@0`0o8EPb8A5An@TBnOr>#fZc+ z*s&GoSd_V>BnIwmJVELqABWX&o4Eu#6L@ipS!_kO!E$x>q;4B5pze);LxRuJO@(t6 z)_Fkh%#=6{%E3Ig=1Pefic!vcsyD5otZ4Bp44y}4R=`qs-n zu%feMVzGTJ?Mt=F&0K;FQX9`i^{p?C&gD1o7b0`Zo0QE_NnFE-hqCb53Uc*MLzOH) z>XS*aU4SAa89H}K4|+iIACLU=g339UEeB)C-_NEB|1llwT>MGo%VOKMq&2)f`Xd6W z_UicI#Vk-7#s!ikZMDkBg$;5Z$DJ{ndsBFlncw@9bH7>oJud8Xgsqg9(_py=%9&W5 z;W75;qDGHd6{tl^r)IkiVUFxd>z`mT4CA0We7S1qY-8mdsWziWwPWLZYoaS|T5<`+ zRU8L?A)DKw!4m+NOEhH z4M-aL_L_$<8l^lhptQ3-y50NcjN1MPDjHPMg3=-1<~5Rc(Pwt$t@i1zY%72v^1&i+ z)ND*;CTo?VK>eZm1a}U8XaC1hmpvxP*m{Le`{wUm`;Ij|jz|Din<#9&Ji4tmChSY-U`MK&X@%#U z#wpY@VfWr#^O=dc!Lp}TCOQbu4v3bNgcFhYaYQ-p#(F5b(gH=e&#Av4oe#cHyEAc2 z)MVXm6tshW<{{9Ci)rUrUvwbHSW;l%wx5E@$md%TTfVCTIOJBpZE`x4Z>CRJKWo@j z4=G`c$|rh{vN^kMjK^mFKb;1Xi-x&o{khnkO?Ne;&o>Y7NIZIO?mLy9@vo@cB-r>;&KrUo3FV z^6>SAqPaoe8PwXwM6xL;4cp~cW687qg>%8o{0olhTt{q6wu`v7c4eq@rOB(49bd>W zQ13NU79zx}Ck3DpHqdMKk;7mYZJ37cXWI&x^f@Wls>l}wYu{E&@yOIltPCaT1%UF| zsp^Q*7R}~TrA&7>6C>s4ulbF~7+(6J&|F)-u?P0i>KsR+3cXjl>!$jLR6l<8z2Hve zB}FkObI7`H)XXVj(4w5P``XZSN>HP9R`vv0kHtJo=M*8& zGF0d}G&-?g$0NUlyJcU((~ox=&cxCx(x>a8@Zm~Ojgb1;e?D;+9pOFnHF<0GJI?#6fYcs8ho4 z;#~DMg{a(2XT+L)T9zefwJ@Y}ma)b+lHmaxR7maQIraCiR`S@RDDDa-!6sVBmvTR{ zjPf6*h5H=*{evEeGqMCz7@{&y{{s}})?_9JOpA^3*gVSeTBZigW7-d34(6` zWq8qLWBS|U|C_fA0{EOIc=TY)^gj^vKNbc2HTWikgWOTwm@0an{kN2Mi{m%~l{owp>=kq^D{y#PJzw03XFHK1$<)+iC zf7E&(T|rJrKTr!CN58!W&=K3;gEF0qQr=mBt@^LG5lWp)vAa8jos9{BT_`b}Sd(0`f`eMW$PnRH zG?`_}Y%0SojnW37^Oj*^GQe9zA|OW7#2e#HF@;~)l?Rf!{oD2&L%|q;rSzs8Trd5q z)GUJ0qV!CFBQ1ooAL8Vn%;3{vr_id;8JHmJ z!xxdCjDlg`9>^9t+A9Jiw-TJEIx_6pXBlq1u{4eXYq*1~HsnYjKY)rCdTsO|P@3WbU+vBst4!E{&yjW>QixX$tQIvbX{p{`Gd0Dyrn8TPN?ok! zrrzbYoZMmzMz_9UXjpJD;(mGYT94yIYhtbNmfvbtYNW&<4Zq7zrM=D3N|f8-%@F-6 zV+?6rT5eM=Jt1kczMM{Z$1prhNef^X>FXxnOgzJc-9!*Ea@@Y`Q(ie@B}UkQs;L2c zUibFvJKK7NHk$GP_U2p<-M{1Y_`Q7HzC8QGQji8+GNo*So?0r6m6af`Zro zB~$wg2n)gQrKe0^W|Dh*m7D)VRwE|_p4F$fq5@%1?$rrrY zTNK$hkq)f?0+)?wfG9UsTigcr{1jVQ6=8T|4*LvLH=BKdD`W~8U;&`MiN^q%jcMSzhuZ=a`Whndu2=7N`H{;?hZ=Q3o&JL=m%pZdBj2nV0uFtk zY}a+s50GKU)&$vV6a89?&J0ceuv~cI(KVaiTV!Tv;dGUvWgVm5dd|It!dAKF-12E* z`)&j@@=MF?yt;(wE>Hrgmv?BwHBDSF)EBw>E~3s5IyyZtc6Aarl`lN3&r}f7Fx8n! z?R_Qopu?}gXPMMf;%JI>mNgHw@J|wZ(SW)I+s%VH~3wSfLHb?lF71WAvjWj#4a^&#~1 z{uCFE&ofBjDClB_@W@eZg$`yNgjqKQA#C$V?33NVsTeheG8u<0}|evNuBn#Mb8f4VLhd3Jz*jcL3xkqF_C9_qqC0!IPH6K^40i#zFPQBog<>IlT+2j ztGeQ@KKV(Iv)##AA3|y!vHJEZpWDK_wiJQ}1ai>5Q`2*?p1WZ+B(idiB3o@6T>MMk zdt?X}r{dwIs9zu$7)WQS;Vp>9`%>v7-EPehG=m0i*}Lw*h4r8U&pG^#9~1UP?oajn z*idwW!L=M0a~bG6gsSfKha+`iRfYKQnG7FWC|@iw{MCN8lVgN#gm0l~sgq~Fb?k=h z?YFqwb9K;^tu8(74TB$sW>3Rtigu}@R*C;${I+iNHHW^-e)E`#t*62&`hw`5J|EOH z3|g0}7FBZi%J6WJ`E~S%`)bHoixC-;OC+p1SH7# z;%38jtt5s1w%+Xb2|&Ked&woT*{)lgin!lh0*%4GT4()3^%U$%4qZS`j&b!pT`2k_ zWEdRA2lqrxmi5x}7`j1|9(=|w;B+C;{m<1oLa#}Q!<4Cb!*0&(;IS?VlkR+)l$e~k z_Ke3VZ)cl2vEdUcMWlcRG9e-j-44V_^%{XXkGh|J&yV>%W19i@A6`WN-XtH=3D~)u zL(E=-92v^cV!$EN!}^ySDn`p65I^sC!?Ulsr_=%E z!EWFFg9|C$!?mHbO6Rkr!*vN^LQ^|e938t`O`t74j+0)`7<7wg6t@<>XkPyrV`%Yl z<@!Ud#8`3IJhBbSmvh<3a_knj`Mb{Uar|=E`1N3PouB#5jzZfu1+UhtFc-@Fpnn6z z-+wrm8J_vl=+pTx#0eu_=9yoxp^*=;_<^Uk$i3h6wThE}V~%tixpFx&IaA*ahB!I9 z(y2`m)uO4a^7Cb*e|~$;*IyW<%FDC?mNN-G+{25rJApb@87H$m9Lheld~d1d2o1)V zgSDtWZBqLL*deZtNk5ESvjW^l%^Z zp{vDZ1~W@rm#-S4#BS?Il_M6Dy?-81=JqWRs3>R6f|%D7f?LS1=CT;xsO5Z{Jz;$-N-*54Qjaw=Lb})}_=wH|d_z4=8DIu92 zGd=ylZh)UVPVX}0Cv|as5?_pbEucJP=!)GS#aEx!JN2Z$z#4Tc;#l<$P~CT1{g^4c zehSu2oJixs3=|AI)TW8TsBkG{)Lk?#NMq-KzCdG2K~u0sxM+~_UB~YQjyyLpM#kyS z`E>8dVi{}XWIzE+bDlvFk-p{#%^d*l93aMTJ+hWV%bBva%R+{M8925;1*vK*b!NW zu4bu@=x1}{G$$VQdv<)jg#rS!wCmU%nzo3(ub*COa^5gYr`AwxdSD`bacj9PUbw4G zkzy7vT^YPJK*OUGGt}vk!~^E$+laLcdowcD~Q`5Wb0iZx*FA%L<+@(55>N{ zpuLt-B`gi?PSh9$#DXGTcwr+DvLI&ge z8=Et}=zFWevPLKTCpd#80NYB9o)%hkOHmDjuls@JTgQY*wMp;WQ(Xr*fQ7*3QL;y0 zRC&qzIG{txfAa(4^HuH7q?6`_Hm}+EOy<3abTgnBWiijX~;d zHkec?z=C(Qam5W+i4JW^ikjD30q#z;r@onFL#8j$1o0cxX0eKvtIl3d?8Qe6dxL1! zz^;W4$WrKK@OfQw1Vm?sXlloWYGs9lx%p*1q=MhQ)R9oKaj!o$Qck$kOK6@zCX}Ml zI#`;)`t|wYr$m09U)4lKFcoaT+lK@8O0U7Ztl({wtLF+$zJoH1+}XDPP~!(B#Q4Y&z`gnG zW;noA-|c#H9cf^w8vRp`$?3~8Hh&%>sCzA-lfIwqw5;-7EdXX{OWUb{9%I{caH@|- zX!&KeWjEyZQi7}-&6hu*%n9B0n!|U8|9H(KqqV-~<`=rzr@8zBd5Z!dc%jF2{hS@8 zi4uCkFS!%cc;R5ThD6jJl|A}C1LBwhjUwr1-!JiAd^ppK7NB>zmpHP{-vYRtl_AW( zSQMmTTEUqY_%Pt|g{{P@A#&a6S{)vW$-+EM_91;_Q6G;AzuGVbx~Wavs3~;&1;3p- z%IBj;3J|T=R;08SX3*IAFR)^(p#p35HG{lZb9n_^V7{2N2CEnBF-D8q$0Gr z6wdq7pm2u9$Db3TViuL&pJkHY5kz3sm5P8b?QFpTnm<5j048vp7)v|KpSpH{;3VvL zhwnt!ZBz0&rh*r|G!#LzsRG0nZaT^Imv{i`yD@dx!h3`%Fh##$PJsH$1donmC<@iW zEjQ`z**_e`Dp~|hCqfBMta3rs6d-9@|CsJZa<57oa|A0MfbVWoGMefW<-PD-*?O9% z9-V$>qDM12XyzkeV)wqDUv81^>RAJzPzom_|Kvy7AG2P-191e*xcar&+&Co?SYsnQ zpZT_1mA6+qDNfe93Z{CDT?lA1&CdXScf`*V*;UVnlsBvHr>HV@hMn(tm9XODt z6=ldC^mq-U=X~2k;|||n9KB!y&gs?w5z;&=>)zT1c^|?RBeAzeZg59uJ=_i#2w=eyrhC!JYZkP%o$G}Ku10}I&;{#y z=QMI{>oTx%>-IfsDXR32c>8+hg{rv@p(BQ=NcE z9Jd>!oJ5~j7YaH)5%|ulX!d0*&&xR%pD=~I>;!ot0+*qgB_b_?;=R^yi_%TKT~bP) zx8}C9wUM#m0G@=CJgC*T3Es)hZbK~7JBmh-4((&C$$vXf#A5vc``m?ST9t+k+W+t~ z;Hv30bJNT*+(xhR1+0%KEmW=iQ1?%F13&9r%|7;(<*pgkgM(i> zU%uv>ZFj#u?)V&*Y;U$~x;Gf`9Kd>)$}r_Yq4S@n5wuhdEdT--IovEti@uP= zpt;gM@DXvuYOGoXpk>FE$&L(27oZwnJ;H&LEso`5x}ATVmcmI6U#(njji{e?t&F5) zbsE}m>tDe>XpnQCd;n;_!ME^-n}xzxi9Qp2pjew~^zt;v)Ke)pFSVJv41iEf9?gFH znLq!4XA|+scw(WTS&S7cFGl1B>KyD+nxLV%a7e_hmjDrZ4Ll8ZCg)C104mT6YDRPk zJ2S6K5H!cAgWqrV%C6L0@U;NFG8fB=^`8;joRFsj2a$VPPz@^w!?Nn0HbocgaA0Mo zx2=s84?vWIqIf1N_a&otWvQ(%)P!7-xCx<~I^!Sk9uj}v82)-xd&P3t+-uM}u29Cp zYJLC%3gIF9zC6no>8GHlG&q>c8ov=`CsZ=zmt-RgU=1%~*E28vOZ@6NZO6M_{m zE+hNa*`M_h1X_+JS*#8Gked*%6Vt1%G%C=WXu*9FVbb*!QI`+eX26N!V#jVo3{4IT zI05}005r*fK?ue@SAP&FU0S=o-7{*}KetA;E=#s4j7Djq>#O)OlY9|@Y|ZNE7v2Y} z*t@D_M*YqnBfMO(c>YZaNjw_P>w4#Ya%qj$sWcM8;9TLFEdOX>o6<6>ax{NFr^n~} zxmcqpennbDW;LcZ3^KRVDDt7ggv`$8U8%q`6*Lv+)UD5 zxI4gT7S9Vp1iz%4F)MoRgLa?0+W`!rWDVC5qp1%Ri@>@mit!+s%svcL$V<)Q>=@7F z6d>eZXaTF{)yXtbhdzr;WNe{&6EW71)-c98-}pK%8y>7|DidzdXi}7tPwM znj0rs^mO!FG}3!5!^PA1z`LHsuFcK2e4q^>;lLzx6I{>z2KK>LU)?eak!?$W62$A0&M1h*QKzEmDTLd zn&eQe&F=8J)*`TN<{BJwyIadQa_g>)iQQZvR{^L(-!}jAc{Ho$4Y5Z{ z6j7r*!7-BJDxu5XSh>NjOnPuf@fdQ|+APX!CE&&C^A!u0lHv=!EdknakRI{*Svhqa zWO?RbFwn>#hh#~x!CUMp1g8p7(#&8dWYtTREZdu_;HbOZ+D+U)NLJk`q7H^#o7#Dn zrpPhjJWwwk02`r1~({Ae;`m&J=c`+|;P!$?qGKmw@xUrreof6ze* zGJ4Ksc9zTtbKG!psxJaN5}|sx7a`|(!haMm7e1g6t)%hV=7gf!shqkH(4{SuZR`)8 zkocF={4mbVO#s<6jLxh9At7b24g!tL>LV( zXAKJ8qLHerr^=B>R-%N;x(Xl-p(A)YYLMi*tS$^>Sx?vz ztNjP0GkYUcdyUv+0!vC+RUe>aycNq3?cj|&seO79)QD*vF&hx+g4A?4IfTf(e& zs6eJoAIw}!vlHTJ$)0AgaD&G6RQY)Yx{bV;^05o#dzFw&Ro>l*dg52RTwDz*nK7_` z90B&r=)v0s=ZrAu5KlryABJKa%x`5lA4d7R@6OD|^2oZTnLn-P+VR_YTO4F!Ys(98 z3l^(BarSs_UB8)!%|fd4K9qpjxF6m>oX_)v$NWHlz}BLgvVb#wpDz6E+g?{rB+f13 z=CVcO{Pi#b!oPI?glNUfeAAtr!qx%+Ir_i8IEwQq%>y|-Tnm08^6aT*SB|xZPeR1Z=QFp$rdxzXU z9>)tGBBTp3SH_3&Ce&4p50`7nkt8uCpkjv z(i}x9dmHDKzD*bNRy&ukpWdnqS!j5;SY=Of!r#uhT|^F>brL^GcqHd6S6x#KC{w@U zKvt~RT%99u1a!p(kW8x;Egshv+~(~LJ-|8V`V4ts?XmL{Ej`YpuzXAHvge48|QKXzL8TMRm87&j4=;r=1q27#zR=3V?E`ou+MhPU3D*CE!&9L zZu5m}-#GOWF*9J+Kq8e($=D(=m=wxN(Fdh!AW8|0$pEt>V}=c8oPI9`p6<^bK%>&bb~ySI0%L1H9R~ zqNx{{Q?$OdUmbjDwH_Le?hSw9G-N~omP)x4(%j8Ee%PJ;_C}~fi9`QSf@Zj~HNfbe zlPTdQSFb;hH|z)G)Drxr%g|bg-Vw!>IK4Xr5V$UL+yq=~71|GM%9jM`HJf^Yfb|sT zP>T$W6WEhXi(t@GjB3wtz z_jOn)Thzpwk<8x*NM(l$C`Z_dq%*&mzKWn417KDw0`o|m&FOHw?fx5cF00o!fk87u z{sFH5`r*nc7w^ZLiwy$th{1CAsfQhxQe$F7O?%LiEajXxZ^?6(*gdW$K5!T;?+)Zq z!aBEW$+IkUIT=IU;@Vs{SEiKm<^W-FK517itW& z1->Wo?V~2rR_c=r(=JeU#Ahm~)Ea&kAZQG?CvwB!9mzNRp{zqR3JI)@>DbO09}r63 zsU}xb#pcX1QD>QnlMDx;^vrH|CmwM5!JA|!5=O-vkIOg>@W;OP3#>rd5OS7~>EnK!9L;<+)d zcbYN6YJthf+pHKLryFAL*jA@HFN`6+Y;AAiE4;|jNQssBfp`ylCb@>(mpxRBj;MP* zm*F1+TVDbO0p?#;o{<_B4!yCv(H5vG_Gu-`daJFt*8*UMS5fsdw%pOr%!*wbp`ZLR zy&W$YkpW{l{=cWg%$a$#(;V{__Emv3^Fm*nm^q@5DB^6@J1%`Q{&wvgK07Eg}@6EViRIb2QaJ$ zC5967l2h*OUB`*XfrYe9o(}gI2tn<5buEzP>rGc z5+Ysb_gROv3q&9A0C_nfAUOsod{WYpw+&Ee2FCvkR44nP(Q*$Wqy)aP^wG&nyLmG3 zZI^`p^6+`rEWRSdHK_H8spD=1+H&W)uTgv2?P3tq!?TB=cd8e^@sJqmXO*PXnw^I# z`%9g-fy(L#tha!GQC`$V9&KM>&A?;E0B%PC$)exeX7(6#JXhG8ndU;epn+3iwh-5Y zP#Ar$e?p*W4eEg*U1t;Ut+E%>FV-Rwt~~vD6S%7%lhLKXHT0_}f8N$Bt33Kj(qU=r zdq&f+(d)-VRzFwVy5H%KZKe-0o%RI+?FDNx!5r1FGy{|xX94&^Fosq0AqJn-n+~CZ zl+8M<8EI1OKJ{a_T=}q97Q_^l1wCgDwoQ4ho*TCQz6H=wch$&x=!1A< z(wdu`cA}hAvx;($e5yfzE~3YbsT3nTXoOZmtcHcZ$X!xIOq_jqDl91uX~<2l5qDMS zaC{Mn7f~a)&Bn3L73X2Fg)Si18;?> zhY0uHbTH#8^c9_9V1`PZKYgj$+YABk(K+WX!G;q-P49w)(^-d7GgkZEO}DuY+MJ1c zpQl?v$|FT*xnnpX|1WGZAM8=ddA&vUp-y~q?w5O~hX6($pCGiputzAUkp*y91eBEh z5WGAMzsg>80{ErA`&jge>^TQZf7F1e%Zze+E~xgav;CdnbwKf6M%gcarN5c~7O=Tj zQ||@B5f29kQZg?0;jZvdmJucOzI>RC1RRd+K$~65WmFS2DOzyM8mz3^SqJ7-QeG!o zI)9r=p%CGdvAM<4|oGo_mWs-u73|6jY zyWHX3_p9ylb9~}+=f8G$_VnMc(J?G?K8IG^E#9dF^Mu=GDIg(M!M?07+R{j1h*Q?@ z&M&XGzvvJEYzw&h9FXa$HJB%z8H;!`{h98Bb`UaKbL)sae&sY`;_Jf{(4>uYbWi4J z$B>vHoGbwv>xNbPPnO&5!U8tx!++Ml=#4uTxV^^Pc-|MN{V;ZaKsf#%6QS70yzfxJzP_S zVUdbmHPe;rL!8r*c9LOi+DaQ~LcEq^=D{EiRxd?o5b-1o1$yza z=qDg1J;d>3CF;2H3FUHgq|3NHFPE&9uVfTxp3pOxL^v zI{k`hSa*gWC@nAK_;j5w^tZ-)!hxpd31pN$W&!vx0Woo#E@xZkR#nJj=u;6EobwG- z3*f*vI_uB&9|&5GKiUxS_c7z}1B9b5zTo!W{fJVj4PlxqpUEw}6=Bf#(RQtnU$Lj+ zGAiDArUK0P!Ds4%Gr9XPZkf(!ERY7G_MI88*@I)jup~e^D!E~=rYJ5HZQ9VHB9Po^ zDGEOz+e(_A9Vd4x6r53uECP)xPHd$Sl_E$}=c`eRv|Pr%%>jIDWu7_i!XTIkqN!@^ zI5#V&8JA3&`~|4#Pmvj@bu#Rk3En<(kp^?RS`&U9En3X~!_jMR@W0Yepki=x8Rgft zc${1Xp&(rKRzaHm4Sapm`|So?;CeMOt+IutTUSKZ{Av8Jy8yP9n*Hl-ou1tl@`WsY zwL}>gek=9usq6rfbipA$xxUGcRG^OA-L2R~gmjzL$FUpM##uCMSYFKP-NHgF)Lx&A z;q2~50P{MGd$#=s`zmxa4sAkj3V{967oiuM$3ZZ|9w3%%!2V!sK!O>!^A?zO2hQ>w z(EAG|Td#dF7j)040yNySOIMAFJ#)+u7Yv_B^umJ##CGM7kWVx(=if(41I8cy;7>4Q6LAR3zhPDV4@h%ZKC$vdQM)veBLtF}KZH;C& z0c*DlXr1rh$tfNBH3lt6W&v(@qFa5^UfWKDK@91({5fvW{VispC+Fq?w-_@UCp=8X zFy~`)!|Z4j4yqnU)ghV@%nC~!al-oR#Q9_576YX(df;e;hWuKe)f{LWUi7Xf1P&pf zBoMUPr7%`EcQVk@c~z@Drb?Hg-5J6G0FBPxps}1z+TEgxr}cMRLGQ=JiIVkMib#>b;70&Tu6AJ=(kq1HQ-8%1ZDK5 z51E1i9JD4N<%eX)qSNF(@R^_xZm-F#OrOtS8OC5o-UPt>R;k4Mdnbdtb5~5E z)u8$O6%ICxvYT%UH~YR0gjYuZI%xB_aVDFj?x(=+sW6y;;Vr}YNFqAShj=GV=fNP+ z0ittpc{MOh{3nr~RWSHs7<=fK*t;cU5yRZs0C8Laa9joqV`!#dAoQF~zzNWJ5}gat zvVg%-bZX3>ew2$fBQ3tIo!?^$L_ZEr4wUXCdA=BY+CcUE`C`y~y&OH)XX(_6 z8w^Iz^gUUf{s_WZA4SR{2GlW49hk+O%9QUQd$qKx8+Wj%a~MHVPyel@>I=-TV4DO8kc_pg68v}0=H*c$bB5L=>sa&mMb!B2$SJR9c(PsVFc5#^giAq{nikP2 z^wqMc4y2`nAGZtgTzU9MvZ-M=mV2rxQ9+ti_I(WP>2_f9^qalUi|+nS0O8rGxg2EY zG*W2>jWey5?a$5S+X20o7JFKaiG`d8yTcwU46PPg@8&H)2tuvB0!{!bo9j$L$f7}q zeKgaV8CC4iFOiMB9O_mGTngpPTv9J|W`f75td=&ShOep!Ap1CM&F0#Cl=sRF^q>CB z=#L)NyX8xRL7>Ivre051@YLTOGRJKoT}(w+4a;F9x%1Fez&VE?$1bgtFO0qMbI;w+ z&Ob{q5}@UsLl~L9eCOXU?h&ZMRgEQ))OAGWY4W^ouhB0<6tC`1qxdDrXVxpbAtkEK3u3Wd{dc7j!V<|;xsH;D=3J`hs7gFLw z?m!PAf828mp8n$H4?UiZdG=QE+MTuDWL0mbYFdV=@*`yT(ah@=VBKrq6Lv9w4__-DHfK@OYyN zb}c;2S0X94B@#-$ReZnK=f`5!v>`X|q!*DE&DV3klv%E{U4EH#wzuHAOmSFi#EoSI zT2gH1G{hsMJ?sFmM$>0BMoje1c6Gg_F_PI6m*;`S-2YCxHjDx#72r2l3QrwDdiD zu>O@Cl{;=M&R$1PA|=XzVC*^Yw4wh>1Meudx2=3UX83}xxGvKa7(_;~!?`vQiG{qY z(ChIbML<@kwj0*|sd^rY8PB{&Y-z0rXK#x)r{D*?!EX0t3l`Cvj=u@kZ6@QBky{A( z_C2k#6TH1zijmu0l7x;tD(A2^sO*3&juUbL4*UV#@dC1>?^%y{59lW(ULtF$`<5V3 z4ZVVfW*~Pxny&%eEepGXqS?@3Y-% zFl_!+Qw$CA>8rFrj+HZT0F^zNHhdLio{=mJKxH1?;o-l&KfpVvD?j-u6GV z7LWc#IKQ6)Tby?yxat=#RS_gGY;F;`8aGQ?#dw~kWMM$CpRPg#9@<`Nqv;lG(Sy_u zWB=H#b(Q6cl(H3vO)iKXFrSz64HLXBNQ>?Z1jK+F7((Jh^7T(bBK4(?2k5aHGgU(Z zL#OZZ6(Z*KYZ3SCiu(-&Q_+|cX6HvS$K~9!(3>%x;06f)J4yWkVA{LB^f|O1h3Ml- z5OL|^Ek-20cT_;^5B2Y$UguVutU@h#ky<_~UF_Pz8=8B>2MD!V49a}~ojBhijaW$p4m7&VFeckV zvOMb+`R?3X3wt@~;*e^f@4y9fe9E|9+8hg+8Rj}Da*@4dh2PM{XY{qqrtpqNu94jsZb!95;MjBFG^shg;402bbeU@S8fR+1vw+80+aysLE zLoEeyv8Mn`WosMVK{T1oA%%CrEgmvJ22RB~;){d^DRDf|k=zg%G;sfoQ)!Xv!LbYr zX9go~%Y^+ztaayKE;t5Kj<4AfSJga#@#RnL70hbdG_DZP75V8k?Kfxuio+l!4~(=_@_KaX22eKrgPAJ#yQ?AUQn1{*2TWU#s#L!6tQ!PTy$p>a z?H2^s&A|bnNDzOR@^lPzfe=rVtDkQo7Qn>ndq#nUS2a~YOvpbEwVci>>E7uhr!y!1 znHM44>(hbJIfoS!{2{wBP~5E(luH@MOeEQZ<-5Yv)=^pVdB9yiB7KQCkws_p>t3A) zXxcdV%cu)`5DSMgee5zw7fTuF zAlvVbIJ#U3u@Hbbas8MoPU|Tx7xLDBoDo6qu6N6RK@Qsdh^^`v=XntpJ&^}YpTSHB zp=v~73x*3Nq}&L9uJ(x%C+6C&&?DwSxW&2iMA2>onPMO6l0K$}-ilnXRz_*rxkKo3i**>WP^r=GoJPv%0shPYACo zW)mAA?c+IaX|nHDp;0qGSq4{qg;OUuNG0e4%% zzYd=Y0&tbgrjKLS{sj%_8J8-9NMtru7!y`ZWGRJw;KBz?h>&52t>@+xMTk*tP#h$ z?NzSl3tyM&5;KIp$(ExnrLY(^4F(dY>aF3Mdt+;1wJn&CQG`1Yp zcfF4X-c7y2^2aKaY#NqOtI7RXAE7U##?>z;U9~@?9oCS2*@&9CD*U*$MXmV>mh;*S z1LJnm-DfG7LMYcZz|Rejyxb4z{N3TSU&L=hYG6~e@Q zxF+~XgwP7lBOZRGZ4VWd`=w;43krixSVeaV_ZY<-T9lT;clt6dw@I;uSAO}m< zPrc?$nc|y!r`KY(7-v87Bm*+sz%Oav z?@E!l>qa-r!mx2u#VtgMH1tQDo)@SF8OL%$nb{-NsS^o*BGO&HajZW-6S=Vb9gI0i zXCgJ5CbAtDX; zD+*xfl7V)CiJ?XmNaA$99GT4lhJtXkL4?z9nT4?=m9-u6DazVg(LJR$dWFGh`g*)X z{cNPXXWJmMy_bs%+~8&t*W1fg*uS}2F|q0Y^FX1rW6Ql*t<5hFnGfH25)8%>$|EGS z7U_*a)@k6@378S+5`WHrkVG=}XdzB|Cs5C&3d*CSz=%sf%@Y7(XNhR8M;+}F7a-EF zIAVZjZKtic`i@<^`?IlQV0VeU@@tyZ(8kO+^&vFctoh>QYM{Dp)w6*Sy1Zn)GH}02 zPs$Buq6r)@@izuBqYdE9lEgn<>o2an(=EKpJGyZ5UuYWK`EZ1Gx1NfF^sj3&qD>f@ z%t$y%zv19Pw{_ta#R*=?-r6pl<&(9gW*g=0M53f98KL~9I~xzIs~H)nXdLa zX!pi%gUjLyXty_oFtNfPWMFQ+eR>lTEaT8OjEsHz6r0+Ap;!_mDH>>lnbO}@>jQy4 z;0Q88*L-eQvbQso9erpvDTNKk=lA@ih#kIQ9?_fQn&{5jb!$W^#F-* zmi5t2;GupP`)@)n>_!1Kn^9z+Et`EcK zCk%8-e3oteXvB)QW`yhAckc(rVKs4NsACRS5}3LA_`PiAFy+bTnW#XZyyL+nmM!9I z^>VBH8H0*x`1b7PBv6k=4)?|**b(lGA z+weU(0Tsa{27QJGy;zT5+j`6=7#1i+vBVgcI@zf4S(6~hSyDW%Sa@)7ZKGKn*OM#7 zqckv(5Mjeu zMCO+(ZA#`6hBAEufjnUcZfsj#?g{|%=Ng!Rx_vOHSQMHkoY#U*1#yw;AJPf~P`aFB z_~nTax@YD_0VZJ&&1@+*;+-iss&tU|nt!#@Rn*XB zV!}m$1Ry@lVL zlwm!&X??j-hb8A&1(>6uV%OFmQq-pbe9V(|BW=y(KdP_-(!Y|o;p1!_KM*ghez^gz zG1Dk9*Tiq@j{gs;t~-$G{rz(yCnLv5$T-KQvS&udF)DQ3`aJX^d0D{L+6#b8 zhbS6AchdHLax*(29c9N50`fefRa~G!Sb9Nv;2;`{^0c88H+S?SC%h zgv)^KlZCyK``jb^|?Jr69;4X(`tW44mRJlDII=H!)iUP zv{d2fUyN3G9W&d0G+%uQ@n^W6&`|1_rX(Ad}kF8S)z5}?efc6c7 zdw)wLZvF&S;_|boc=wY>MuYYP`~d&+cTGKNesQLvc=}hLe+?^TfK+E$)8+CQ<6u98 zvw#QmaN(<=xtkGbNm?abe~9tW48W^^sTZh;HVp`$+VLMs>rDL*)=z?J<<-LyHQX2I z{r>xM{mH*P9aJnUJAUxykr10w(RPa{ozrDYgSP3J9&u)I&EA|zb_<+AJ1_cxl}Po9 z`Nz>2vVZjA+;k@4Tqc#Q+ds`3{7-Ot0&-qq|8JxXL=9J_1KYmH)KN;(KGK_9F&1d^ zB-emlaGXvmYd~fmM zkgOQ+$CWf33In~lrqH;ub#y$0{7;vZ($#`To1sQ-JBV^`GYqL0NTlMC^jlY25Fa^!fg{kEoxA~<&JH^#D2_@=|`G(mq&m4ed&TuKu z7@Ul$KJUSR6kJ9=`?{;;f;OcC_#aE7aig>dO#}#}x3u4{dBuZ`oK?pk|KSu?7NPGb z%fIK4*!9d@rUw4Cr^+FyJta%W9OF^M&$TUI)9syzPTtP|3iV0ghZn@ zUw^^n9-{ph9wMrcn(){r^QV9He^s@4y1m#?%tWlRrg|e$v#oA_)bZz44_wzG<{^nJ zZI-ncONcg^)g0KDf2Iu~iTiT5W`Vp(2qUxmW|lZd8zCe3U!`=#or)XrV=ohT!j91wS$!CdS0r`83cXvpqpuTr&-bSwBefG+ykPkw${vr6Y?sh zUB{KJjuZ=B)^JA`3IzgjiP(+?1)pS009nga52Ld-QVwb5&b9Jup5&T~Vbg|8 z;{zCFEM}nEvm|?UgRXzla>4e9B!YPR$WGN4A1Rx*zAI^XBmjI7gCO{_r(nOSm#^-z zGXN#!nn>+h(mGf;O#JNk#>}0oK0r81+N9^o`vLU#x5uM1=chIG)PF-*uD|>Yw1-<~ zt;k;jPvKt8)cIoY<)-JXD*4yfoc2AXn_+$S)d)`7XJlduDzt^=ceL?+e5SPj`zrw3 z#$$5mWl@L!Q-*jhH5_>WWfU&Q{tw;l@;9bWnN`!?_l~M7{ynui`9Aw-MYOUr^0j-wa zz`xOQsjsn^$7OD>VQzf{?$q>Wr+@#K)WJkok=$Lqs{4`h&!31QHJaUlpSl;u5ipP^ z#1TMK*H3Y}Sesy9*o2?clTx#(Oe^JlTFmy6b?do12oUTR77>@?QJT8h14UYW6vvI9 zJ_m`$0GK3K%hJC*5=z(FF1tpXmENc1ej2rDdbtth;lYqryBe1YnyHP|MfvIcQbP(CN>RgbMdom$Zi8s0~G`Lq2ehL4x@c^j)O9WAFcDVYMr z#TR5iR{IkB$@}}~#+$k>Z+~i(=sgOB;AO5!F1Kyd{YB&7CF!UuwFQg@h=UN}k!&4`*gYno@aX$WJca*l@&>l#hv_Dx(F&c;fFBw+IH`& zQ0w0^E_vU&`$Mj=UyJjUZIy&Va4^GXi#mWr<=2ds=L!UGdfZBLps1=@sdT#rX}vkn zBJ2p~6swn)2h6`{*%^~--yep*YNul&J}7POewnL@$}%pBE$MO0Uf|rAEakBK&@U4w zzonWbC9B4w%@lB+2O{RQS0jA1^R>ak4-F&M!P4vR`Ls^hs%~W8eC2hI?kcIjv3Xbh zup#dy=nkJUo$(F=1RPQOJ8wnUK|uJ|oAJC`6uW*_SC%2dc*kKoR-$A1%JWP0ypo(` z|L-<-MYkAeF^6PZTnOA1!psv;Ss1sZ>@=165 zet@Ce_mjOqPH$AWO7PKepGK2_&Z&U8B(m#cb}y##gM=>0+5 zjpbLU5v}`g0fG%X?Hm!;s595~xzn$h$5<-7$$=(-IqheO(&ye6*QFSPOXy8G3D>=P z_Q2tHqU6aih&`Ra)oRz*`OT=SIMSHFAsEgH$*~A>wof@8ri|!(naaG%_y=6_`XfS=UNK0@e3e+9$hDDH&@6Z@)1-UncjpsUD%>orGESFSxl9)buJzS%`d?n$NUgfV?-f;NzsvUR)NP^y z_7^27*#uus{oU%|GpoC^ON$UK?5}<;zrN3HIF-lpCuFnxUth`+4)0lXC`IC(zpO0@ z?hYRxdK4s~u-Q!oK<{tPI_%1OK^Trw zuM3z7wY^lAWQ<7X#ylv~LHR~__|)6QI5%h#NnD|50y=%Qr2B27{q<*5Dr+v#fB&az zz>6rO*=moR;|f+*V)R*Um(f}6DB|LMH4IgN`33ztKo;WUPOOwK7?~jj$!iS^OICL; zp4o<${5NeVn5@h(xBCk9xvxg2(Vl0CVpAhEa+~wyyrjr+NvAYH&cL%7gtZgqyFWIo zi;@&~N>o=MmHWgeMPse9;swR-fs^8)obQ~XNe<%g_Q;eEb zfF+eqgqh&ZySYYclE0*8WwhOqSyNNpf24GCGw{exb>|zz{H=T{MDRkv!#!tY2}3os zVW6bE#+|vzhKuCicpJ zaN-_r^`2g&>Yps(+l!SB;t<52w0ybFt&dFP}}dhvA?S{Q+lm( z*dg04+vC@=pHlY5TZgir0YMI#Qg7>LHWl#(nkzqwqz>GMi7I+{M0(g!f|D%Xcsz(j zT$U>WZXwqE*sup~v2?9E+k<@NuII63S0CYDhhtZ<{G0qgkNx)ToH^F2!08@MPJK1F z+YyH)yUiK`Lf-~GjgUeGA)lBW)g-|_ynfhvea9*rR}}1r$TH(&!{41)mb}sw9y^!h zQ2~kM9CVbeS|1a%I|;G!$5$fW=3#$mAD$yN$)JVg2&Y;GpWyDc0u+72KM!xOBs>0S z#~lo|HDP!@ii$V8K=lb|c`fT2lwDFZdC`hDupK(@Df~{*6=oRSXojj-2An~cMagSR zKCvXRhfVgy*CVpc*Nhkx$8JLYaeJNj_{q0T*z(uHR!H&=rZ?Sf9z2cn~xwpIz(`r%*%4+qY zVmNIpzj~!x`h1Jn8&xq}M7*@*o{wfFwH!v}L36)g(AMYsw0QB@eZJhhstUJXQ~Mz< zr?kFl+vyhHvTzdl|Ic%Ujx@1h)8|XC4K5)HEs9ZfwoqsnB-%e#nuz9BC^NN4A*7xq zqR5&d(Yw+@Wl`#aZugsA`#R*6XJH|n{$$gFbM@^rx`E~z}ec$lY zPqdD_Q^E$BzrLZLm()U>P?vriMELL(q&BCI-G6sM_N~gnRb6}2=~)_fIETC^DGwSH5Z| zm31i@QT;i?4cTaA5)sSl*Y^}6wRD9E(e#fQZKF7D719>&EvtoL=)NitJ)G@g@RQHC zXdfv!zl-9uTNo~5Ku@gsVXZh1%;+<^<9Rft5^Yc1kr+C^eKC8aO&or$70wX;ET1Oc zsKmFtWAnTz785_DSL$u0h7YXdat+?GcJ+~`kwZ>JIYG%_h1Qg0&xiej+ zGq07IB7SF&pAf#sjcKYMfD)M@)?^3#;v&gfd=3M9VrQ46lrJ65XEL_1 zf#8d#m*tAjOWV1RVRl2b<8o){pPkQC$UJja-Z?uBJ`d$747QfScZmOdmLoWz*9$M? zp1*!yEeC4A!2)81NIWk4T-FpAe&kg&0{dVr9}}+ro}0_{%}qEWR^3vdLrd!2szbU{ zgi)+3D-;ID18 zYmy~sbbAtHRhjP#wq`yd)qeHA(}o-#N+$SV$^Gp6#Bd2Lqz9FmAX`BHBIcD_Gq&Lm z_dP?1_@zXchz1AKX+GPSW1@greLWp~xi zuSO^zLgk}=^p{!b9cEsvI@P*e10(C9EFrGo@Lw?6DjJk4)TD`Bi%fjNK~JCh4Ox&e zd$cfcAGEye225}5@#&LoXW4aDwPj(XR-`|5SSIUqY8H*#FmES?3xSBp*s@Jz8&>93 zGY`aECG=T%MSmcj`<@qH0Wy?9FcNx5+sm;)eA8M8`U+(~nf`!N)2DW^Z@f|4iCI&r zpL>w+;(re?#9tFW*Sc(#ax3(`5EA{jW#I{>y($P;DLyZKiCBet#QHER4h}MO!zs%tPPi zPA|ummFJYVmA$`X+pI+d;BD;s(+jPW-!}lilc1+bVD_Bt6};aQXI(CJG4or#oqh*a znZqmtUG{e6tT#{i&3W+2HajTUvSw4zgc$O_+ydaKW5A4<9rG&yT#t@m|9uWzR44DF zW0E%#+-fY$qkB3)DCFQ@?@HTtZ*Cn1a%5T3hx<|AZ2^|A#rMGH9#Ec~MsTtMNFy*1 zi9iCU`!ggJ?9$2y-v_pwRZkj^O~q;d;C5fQ)!Lf{Ci@<~gYhzy-D4nDr4i!Sl*4JW zQa{Fi49|LS=+7*&MsbC#{(M^0t~wrsR{p%VJ+|)^GbO7Bt0itB47K_JPU>)c zPp{Uysh9k>Eb9Mxu0nCjXw<5fRrFYn9!nOEDfe<)=&9TY#^j1tz9VB#DzxHBcy#d0 z{QcT$*wdk-UI)#uL>y6|xIQaHt+zUY=rRqC-rtK9ZEQTw6?efQ`o_VH>#8}YFwBr^ zkr;*qq?8E(|kM9;f_;gnO2c#QL+QK4AhRF^JpCB>Q8eW#kyfcfaey`pM1+zZ~85)=b zufsnm6el(|XHH`7Q0Jy#>WP~)q$Mxe@FWx5_Z8v-CrO%DPj^J$)R;(GPo^5af@glE zo3T4{dS5Je3Y)^rc)m0>GCVRrMq*TP6zY2S!T?3Yq_L5W)Z)W5t8Fxv{alu5?m_O> zE8WqAm+=k2`#uB3k}$#LTQ5j=zMb~gVi!(jjkQFThs>d@Ge0OdlG3CZ?`d67R>>SG2pA}UxBm zXFzhFy@>OEf((&s7x=@8!%Hpmp#NoPq4UkKD0|V&swB!;bKqp5K3U!G2`U6iP}E!i z3~I7UFNWiu5hn^+1eLgv5q**CnpiGNOhe@O0-%A}$%f^ejAVK!R+y{l=rTI6@`;!D z(wO^jv&0|e?+%1(^~?H!SWlH>^Q*sD*PGW2zB`53sS&8vZ`wWVl{S<_BiVSiSS{WJ zby4tC+)+REaPBcM)Zc2ahHAD>_ns+^Y{tIY(@r?&3%H0v?y;E&7#6!*FKc}RkZr6- z(eT})@^k;aHc4>_o^VcW*$o!-(C@h$#s%eW3HlH&EIYScJKga>*wUVQO>{gbdQ|q_ zKOZ+XFwvJoro(a8kcupAkWTX+e&?XkJIsBa*l^``tTN9sjNj8lF`l#T-z?GesTrAW zk=?CgI58<}&(}_pISE>*VqkM(VNrK%H zz(sCRB59i$=Q79%0oC6kj}5^vI~jLJ{ebdG?|WK3aLu)|$~Zd$a7(?hTlA$az>j+< z#f75uS{-*T{ca!-voxCVK#S%tkW%82piJ>k#%6Yp#}!^z97XSmU28t^N@0`2y$!5f zkCW?U9f>7K^aA^_ge8h=T8@hjh2r0$?{J?J6FhbkGTsQ43ISU;Tez({9n^>KUU(1A z_mM*+1`qdXvs9wUJ5TZC75Nqr{#TIxT%^jSTv&Qplg`*7BT>Zf@MQ5M?YBF&fn48R zCyvD5JZ5mmhu(IjK!h3FOZV4nA)G?}fNRr?Ihy&^gOiE7?OV8VHJsiY`riFnl=}(* zbguL)-LgUna@uOMhuLIBD>(5LVCt>Qs!4Sc(9hDBzS_W9wJ6IyMBi#Yx?w=o5-Ezv zu_vXyCysH^9(j0^95n$(yx6ZCgD#`;pr*Sng7JYju>F)$jzZ8Kp+$idQnXLs=>a~> z`Pua4yX1LHOs)a$0j}+?!buJVaukqdecAC-bTzqosZj>jz@g)`dpVwOB0&^rrp})M zz`C3Ni4eyn$8f_xUj~>$=uuH`h!o2rWWn65WmPOU@a)!zXE}b>8 z#WBSK8;bLtjMe)7pz+QDl$d3RKaN$b1HdnCZ|PX`xN7RS7`I5`$;5bp&&pW-{{TTa!2{>;E{Zl@{w?mq?KQ^?oSQ#PWX&gk{rYyf)$f*} zY*3fvj5EYLz8Fq!VR}imxptGBo+);c+A+~0PU3+I&j*&V!-y8PJumY_gf5SKzHt8+ zWb@D#lsC7l;zOKH(`>3+2HlDGk(h>=UxDD1%WJ<~+^ioy=kuyv`p&{eY0cuSEC+Cp zaU#S^V(NPZ2O{SvE#90~4lSsu=yozGTy=fefD2ngGcHoenqj**5}Q&N$s=yx+2Z}T z2ZPJR934&8@oe?c7osSN+UuKLyykCcP!^=b?b~dh-&SGzyK6;<%qnAJYiY9PEwEYs3%a8f z=>qRA)!wR-6w~iOjDyI0D=qDF5E8}8ns!6=B6k#pd>CG2t4d}ZjqVO#iYG1DW;Iu@a+x0{Ws@TG=p0^zyQG7i;3hC$J;|Cs z4<^J#=9eSQ2psUwz>Qp(FQZ|GJP}%CImM4Umm<9WR?%mwg{2FVP>-q8lKaH1a9FVY zC<^)9LyEB&jx*ABNi6SOm}RIBYuG$9OU-l*Wl8G%_|yEz&GKh3A5E!~pe8GZomaI^ zb6t%2Ef3Vn)Y`>MB(Gj`2;t#p!t!QXP&jZ04=Rd(CSK)puM+oy@_Q;(yH}R}YlMx~ zdbbF{87+ezhaq_^x|_!=^M;4J`6r<$Bof9Ba9|0pFrZ=QJ}%@(S2R63_=F67<>Sug zs-+Q$tp_nLhTge5KW6cT`c0j}lK)&%g@QJLFd1b|TxS@4$VGaD?ULn+E zOME4y)DHCHNH<~HcR@k{<<-8}=iQzk*P`11ayI4?V^nkl2xfLuR7hEF4(FWQTK8D6U5N68%X!gd24Q2S*`Zppx)na*N_|zv60I8;b|Hak%oI? z0El@zp9qG&JB+Z>q1`cF@z-+vQ_nA^{4QI0k(9_;2v-i|#&o+6NpwEE%X014_dJ^` zPfy`P?qTX%I2;Nz4Fa>R$}gc|Mk%Y?Dn=aburKsV(Dv}yd~r$XOOUh7Tr*jzXvf%N z>hL=&PEB&-7Wo%&T-i%-Am+qhfppg6@mB29wFI1+Le$N=1RvQoB?gWhyk_!ns-)r% zYI^3}qpg;?g^6{be{-%jk}69=pRjoIIp!m>1{1cQTDI2Skihsr#Z)kn+AM}H;(F)1 zj)xzRKJeNRIzeq`_kXUtKYtI*1ZWtjj)y+@(r!-1;WJL^2OjjZXT~7Cx5N12m58?s z@vOpRe?ExKC}plIkaRh?yJ4Q7Cu#ojebabWL`Jxc({P&8bCtdIGbfp#((Th-1dMbS zxYv!Hm-rkN)kP>Kn`18&W>vTpu+SDLA-mNqiXVP=$bCL%O&}$?LhZJFW0@y({An!0 z_pVN=N^ih4FJ4pl%PBg6XO^WqB@Mj}mV!>+EC>O<>M-)g&GA_Do)ORW6AGrLB)Bn1 zBCUZo4RN%Y4nFa^nl+dHlS=D~eJ`o8Re-1WrN z;VbL*#<+iG;L~1adtAkI9pPo+j!;7r+V=lw}OZkH#YfYiANPKV#rF;DP5z_om zF&{P5FQ-^~wa(U+zoI9L-U)&wgrO4OB+E`CCEk7vT&5YAxacd3Q~wW&fHH_cVb+q8 znLTK5dW%Ry-?$MT@jFw_qag7PI<>4vSH7J>+r9dLea2rX4kHZ;;z$nuIM?fe^qHQ$u@y0N{L~(v=HUs3$!mplI1O?MsAASD?R3!7jUT?TG zB0rpid=O?nhtapbPBO2)$M3^feG#6^{OaQ` zLBN`6kO_!?=5nqT7;L~cLvfzW$yk9OU_-BxTr_idb{#R`u=^4dzkon}IhKaj)U*Q|Y$2w52%jG} zb6v|g54{6U?O(7x)K5~{-i~o`6+Dc&@+tUyQK=2~$IZoEJ$YgO44w#@X-tPXWKps_ zH$J5vtYGS|owRKOrA2tj#_T*AZSwKD-9K3Xe;$@{ITxmR#Ic*_mDshmtiU11sr&75 zN<*>HW0;Jo`jT?ep{hT{fDNskd8#i@S0B7Q#N-7?ba)LnUCOm{&yr4(X2n3)6Uui9kN zNESnlKe8R>eAle0lE~mF`=-MmT^MBBEM$0ewEkKwuczc%2BG*H4^2EJ!%1ynH#W^Q za06G3weIT<0^m?`Q>6HE&+F>Hj`iQX4h;-JZw@wodGtUNG;8D_Z|)2aS$%>~=^B@g z|8qx%SZH8>^G1+*zuI1ufYj)w;Is&pPWp*TonusC4pk$!%5|XZUsWZM`wDfA%=Z2n z-43kW(~L|hKl1UmEoB0&a_{cyLxuF|q~1 z#|(V+HZ1v~^=Ifv#M<_)@syhHMHAouzYjlTM*{$g!0h_B4vM}3KjCVyQrg)Zmxab| z9+qgh7R|h!yT^LsuNz)HMA^rBuuz|Os^|PkjGzr9$`ce_-dxdF&e4VXm%xuc0SXJ= zsF>Ul&?-8D+D7hT@#U$e|4RKU-vcK${l&z0EUzc zEilDQvwM$-37E%f$vK<`X)?k}^w0z4V=UK#FFY+gLl`YmJja+V~Y9BLahp;BDWapzw; zc}%zcToQ8MwBgQWf_Rvmm+l9GAhbtVg{D=fV-#*i?g|IsjioyFP#A(412iJ)PR%yqJXDKJg z8~2yt0rOVfStBhytTHhRcc{x`>1jX zX8TvLer3A~i#oQF;*vcpuC*`q<>EVzfHi&j*}}YHe`bOufe4vLw-#@B{T|!j>*qdz z^KVtu*2$Nn%ffHKGM>j?4WtzBIkv&oBC6$v9s_NAHR@ij+O=;&56J;U3=4*lp8|QhAFnr^RUQPIK@G3W9{q$9Hb%8WX zpg_k$AUm=r9(aPzdHAOSWG!Y$fKAR+M9+I*9on5iF;xS=QLIHhB90nWt zuH55%#|l%ON{yC3FZ~{82(ku=JJ2d<*Ha#{krr+rFMSR?&)XgO&uK;)BuUo{wU}ss z?!g-{TNT@KX}y{L11(TVr?+`33bKmqqF?Wf@*Zc=woVWzPF3Q$v4}+Wc*y`6;enkW zl^}}r9LY5npBk-MZlVG#!|v1k=|ru2dm43#Dt)rtEt|jxDXQ8$zHhveb|bZL4cdKJ zD@9*fb#4+DAMdTO(M3$NBOd98O%ekOc60l?WBVV3`$OB%;h&m;H=WP&Ys>QQo-YfM zlK)0V74{WUsjiP2pZoK=%I}isdlD2+gQ#3s3JDOl^`Gwb@}pY+jogwyl|mRCL3-f` zR+#dnl>#+_QG&68;ipu5r%FOJm8;_t}X(!|{sIaQ;*JK|D2Y8&Y!?()baa4M$b&=(aNWkwRfN&(QxKp*-jWP;#2&3XP2#T zUp0phgRN)7^YC_Pgg-xXkSy_c-g6!I01$)@!=i5H%BmSbo*>I&l|t3q?m?}1R1j=o z(8#Bp(~MC02wRN?S2@K+k`rIe!c9_HmUOmYrRGEw6M6^)9|Oz;En&=H*OGRNZO(&6 zqc2a`_1i0$4}5}TmgukQknFGkn-+i7C7F~#FhEwiNSa1m(za(tVy6q#mn_ZYvJg_< z?91+%Y;=s3@%|a-^F`jfpod;!ZLUyoI)WvGi7oX6o{e}d9z+YET{%RWL*3Js?V&V>QkIOY zw862xUzM*n{S$-gz%1kR{qv(m#+LPp#)HSgXQmsy0A{Oxl|>Ag#Psi2lW84QFLhtE zB(@lD6V+v~5<$*O0}_d9Qv-L6>7XxUr9;Av0+ukjZeEb@#*D{y zM$ej+>;#KpF4rd_=R4DT~G7W&ZIF?)8@ef6l?AXrm#u` zoYfF6>kakZ1NJ6OqBF+TcQ}kT4Y#o~Iv&oM8X>|Ae7hs z8u|nmpi7XZR;BYw1?H>lF5RO_fc{g$bUic9U9##?#U3Udb#Xg9RdZl-OVv|4cCLQf zgq^hR*l8NN_e9oaoRHAYbovd*mwZfN3anWszy6tRE(L8U{#a0NrZrFUj~mg4Z# z>!dRZXoWzDi*F!Nqs6(5d^4WJ`gE5H-tEa3JVoM@;~mV7%`sSata6k5;3aC`F>#t$ zS_tjtPW(x<&EYFYAcFfV&5!2UQO`1e2mJn)^nh-`Z|etB?(_*QJN`=Jo^8XO$$QMUd=1;=tXu%G$MO$6=yKtl9M&0Du!?6R=jrrlXfn zzEczx!Zgd4;mM2a4A56(UoB^T+EuXpc1%F%qfSQqtNeg}k8XJc46Z|}N}6$0hm-@K z_=R!sB?f7&K!H}$G<#x$4rx*!ugOtAhdf*CyJWKnx*@FZ!5Xr zW#VlhYxyt@@~64Niz)|g(qj!A6PWQyljWV`w-{?V%I~v9q!ORDfTV=3r>mT^@ATfA z0)~2SO|xjG=Lun@;lFvuT$Y{wV*gf9r1#jJ>o#qUDfm1uQQZNk1C+PbRrVT}ZssVn z#)S1C;@NzWSvEW=Y~m?5z9)>8^l|P;lll~6rDdM+1}mVqH}1{n>#Ak7vZwK0WGP#q z5_2TBhiP|0QM}I?REm>Z*S59oP8n48xZn~Fhxn=L9%egKWh@~_RWNM48m?@FX>g0& z=Ez@$IJj>s-G!_whxw0YSS8WSe2>NyL@G?zgq`wD^Odbg%GLajrY*z*5fGJTCbHUh zj#POpfah}6;{4kkaS{*#P&ka$)oLF6q9<{a8R-tkE!*}P3|bIn(I>qRo){4yYj&}N z-;z!Xr$+K{vtHmMyG=^!9;_ZNx{g5gzPBg0IslLVme6TK3{n!hC!r>H0*?`S(W7hL zCH&?x`|y=0(c^a$ZDUnf54zkN#nLFJ0T!eD4BMdS1aq?PDb;05*GBR`W5k%*aT=lS zFjc1I%05(vlBgfu2vky_WCS#_5rgY00d@fIb8QYey)qWepF^jL?Rs zQTYQgY_Bh5mcwmj{w^{7B`hcqAv5AR7AKd`DBGu}NnZZ!jcS}1VH%pgv#VCTTJqR5wxuc5pLt}J)x_(ZLSiVci8A;7{ z6_P5w2b8tcr@n!hEd!MgT5`Dr&eBK^G$6vYIJP4WiC_Kz6NZOLGYA?74;@%ke0wef+AEHL zR>d>Muh0t6*?D!TAKjFgSW8d~-l&>2F#h&-^#qq<3#jBL!ksN#VhqEywfJ>zbN6wm zY@ys~ie_&fyb_WxOV@s+96}6-;PNMq#S=C^{yB%xRPOUmd!L}K~B6+c1`)98nvdUWc--+6Q*W*1FFMJZF!gM9^ZeU zm=O5yXRjUwpM9?UlnpX#`Nc1R;-oz7Q!)CHNs0$E=|RgpHq~C(x|@s`7Aihy6~;4* zHA%zj&esv7?_TB}zv^9z)QWud;D)g?$tUn#<_YDmMMbT+HnhhtgdMb#IU~QC+<=up zv-qxF;P=}AZ~9Q{`0Ha)sUyb{o;SxF=$0)PK7Z3_q)rNiIUy4&BrPw1Vr+=f7*#~J z5zoPrvw9OoTFR=f9ypisn;@Vtl$Yj7jMC;USE-04EW$55j0|@C0Yy-#*v~sXpDixg z+F(fR$aJ2T9NjQ3$JtCQWg@{l6 z6t4UNr~Dp#UhLD8YpCf52F@m;@uSTC<|AgIR>&*^3WfN^*ys1RXDoOCT zrN+k1acLg%R5dRgSMO|zPCWB;Rta?zW@g`tcYQ$^n<2=nqmTpHsOwwD192Cn!|?1y6r*i zWEI1K@MPp8`-oFE6ti|U1V^?>wr**v&D%eBX3-QPrTU>zkk@o8an{t=?4t*zf$T6Z zS9M1z(l9`rNU%P#<^ed87Dah$y&z`2B}PLqsM_U`2hiI;B;bg5Ql)rUQjFmA*S~ zVm&1MEr^lwc!=6m$BdJ5r4#S0?gfpA@7|5N10|ml!UF-tMHsSGmEGA>alg#j&x8`_Xn$lP3%{I@Px%6 ziI9J5=>&&r4LwROibSrLSu|S9Dwm5(HG=IH_4}WvaWIo$>oI5*H?J0UCih)4S9@{! z6|GktneB(=W3_g7X3rIT5sy}76eH)t6(~-ga>Npgo!%IGy$V#=v0X>Ua6Bif<3n@|XPFuvuJcI<<6MK5`Vw#@r zL=VOz?Fww=xmtF!0xwS$oAP()C3Bld*f9t=fQHrGQ<7Fa;ifv?ovj*?nIfFmpfVJH zX}zb91F%=*tqBR(M7a@<2#gLQYW{wp_(4b1;dFMg<-Hw1SQg-*xW?4HsipUH0~lx! zrh8Mfih-+C+0DnG@`o$$VS~B7(Q&UHskIkqzepHfGa=^= z4KC4w;6|O;mV@eXI~ddErzYf^!KTkt`D_|c7CkpIY!C%xhx%Sd{C`A|#3wJ))Jd8= z0ax?_GGsg8rB!`kR#??r`b11QF>K7OgMyv^8bMdve`3I9-QEKF2YIw^aG}Pj@wLo&Q*%#h4X%vCHslh&&*A{4ZHAIqFTsw-SPLJ{ z$nfl5M&jo3ERV=2z#~Z_U-jjY7fgo26%s;2AEz|f-6|aevj)R15J%zyUsW7n=8K;4I{u2O_yJQ zSm4D=9!u?!Vm{O=ez0Wn`ZaI6jSy}+x_P1`hm5nukf-DYv>_R3-2u1Qs)^gjk+AV2 zgNAc|_|X-5QmxB`@tYC%RPqyYmTMmJ`A}m%&Mm}Ry^`|f#2Qd?nDL0CBAV826{EKtQc~2e3JU>_eBZsq z`jEqd1_DuDVN=8l-G^I~q{@|3pmlxt@tp{Zn5y3HCg*cJn(XQf42o@%2gtMnng@|~ z7w}onCRwSphhwtK%8rC^Wic)ieOXw~u6ET27Px!0iM7aX)<8hXQEs+!*o|>fUz-)i z_fP5|E`Ddkn7f7>nR{cUzLZJ4&L?B1PLq`CUkm%$yi#2bT299LIaZ*@Zng|~bSf(5 zGI!UULV$r5hPZHETL!*K7HzSTE?8* zQ=VH8f#Du1H>4=UEt{)?P8LUzC-pY@=O$kN3+d8(2oFuTZMFE)p%7N}V1gx2#+cud z;=a$!0gf`~{Ke?%Xo*Y4M;bYz;-;xWnpCmB1Hwsv)33+~Tpp_1PNQ2tW^iptsD#nj zimFOxq!Lp28($mrQlFg%F{(7kg)gG9;ISZ%(z4L^B}v?g(56};Asr0^x#?V$O$;CJ zj7VZV{>!Ig3Q|K_ghVSA6KD}dlHq^*%~+~j4UN9YlXK?22NdP%NnE~YSO%s(pTQJv z&*Qza2hCx4*WveXU7-H?X#gbgVABSW^*CQ1+@uro@1#{4THo$IvK4z!*U zd+Pj}DrjE-|HvTpS~F}a(f`eEO#25W>x(7naP{|AL0`zVGkk=I)5%+87wehr`SA=g z=u3mLldh+sveysUml%1Jl>7bF0Tl`ZMrd;MrO}88-Kit&Fty{#t1yM4DBCRG0)|?` zcN}e06i(ze;XMLdXw;S0?@Wmt87-4WZVFcnT5wdM-({Re?OxGfD}PEtJuWl%X#*x0 zt!8k#b3EmRWZ^LHp+degvF2?idwyh8Ia#=&UcdPu`R|InhzJH-I%vXj zktUIbQ)%Qn@e81uYz~joE}TG%zG+t86Jk+agXM5PIw%wB81vT{+l@|`L~jX_P0*p0 za91#$^A*D<@RX*}ksuSn1+ND%$D50tY@Ev|yh=%5)Fs)~2yM!(6O2fJ6S9Ku6S4lb z`(=H#X3^*q45+>s61hPi6aLNnF0ftE`(P^5}BE5Sh^(ejlEzC z_sSfySO!qarx=@?@6dE;75+r2I`Zh~X?bNnvf|q$`}O#j$jJ3AKtEj7=x6V*Kk!L0 zS8(xJJZ0Y7P}EsNpZG~j%6C5wKR(h-mQ1DVPIl*S@nwz|LcP}fPJtOLiC6fF7i_bZ zE%It4KRN&)mI*iK^5eUo@%miL(0jH-6M3^)Hs^9#Y4-t+e`I`p;r2>+A)r$6bm+-R zj&;A8OeSmD_Mr-?xi9UG*#RqWF4*7F#Wh@h2lV-fgW|_9bx;f+^5H*vIaViI=_|KP z>z(_hT9Bzb3qsxBjIROf)Gcd8h)QGLRO&H&~4o^$KI7>}zpIK(Ti z$-=H9_j5NGA_72Aa%%IF{PCGkf|b@c*=+$LmvS6O)6t0(Nj_pfcXfoy*}8gc?rPun zF?ehXUENSk%*(yQLaYo(f;l&gX;zHoiR3{f83|R9`svj`n^4x(&*nhoRK;e~H5AFD zGTT_t@pDk>)-iW@jP&c%4vJ}oBnm~aR@v3c5LX9HhT(%J$RHY}6&54|U;$ue+Af1i z-}k(uy#)JbAb>6(5ofPs#vmC7pPF$~Z8c zve(?{R7tx4yJwd?)kme=F1>|xhk4*tMBN7;v5{<=b8vC+l7gLpGsfbseE}72Ho=dD zu0gg5lX;2ZVCpDLhbj0de9EDaVy=ca=xbb{aq*nUDcfZfg&LPilQCyp%ZVkeki90Fq2daTxN+_7PPgsP{s{QZK+2QN>5~(_6R@iD3OF0l zH_jTYmDEBj9}YdZmH7%SidGSdi)OSL51F}=k?8X3ILBeZgE1c}V3Dzx;LyDfkD*Q1 z55lduWqN*j0RaT}gnRIdIA#2Sjrq5>RCBXYPGh(8sWbYgw95XG$2_)^ z#YPCq?&1^Q2~^|5MWu=Ywf2z_= zy`c#|1;oU1ion6*sKe}OJx(T*$dD&OX26?0T>c81ZC5dqwb@5oZj3f$A|(jqvh>`K z4vra?v2F!pgK@PFpsk+nbfJl%`A!8H1nLB3Gc{F%=u7z9Mh`5>3dnuKko<;2osgsK zYY23ErcFkfg`~A_qDetODjOByinip5d=cUj_#9qqdn4ko)oD1_2dih8{+eML2fDAAa34^xP|lbj4R_HJZ`u#~m4y$X(xA%;AJ zI+2Z~>NU2B%iy6TEzuxW;-I0@-tTMt@ovhol-A-s^A51ZDt7p7&Wr>BfA6^UBq--Y zuYJnOS~`l#t#*Gv_frE;7Qyx|;RX~6=Mx?HCSA15mC|e?id5TB-o4NA^JM%BQ1EV! zvyurLd?}I&bL{Ku@8$bO4e~AeIV74q%~u9?FqBd9tx-&*)lFbS?yW+tzj$`b=C+xx z;-wcmb&ze8DM`_vCdsC^nf=Y*jWY@P$X!7h?^pT*hS8~~2Xu|L3m5*hI;vQX5TIrI zdY0Q@s>}_2Ex9y*%<*|hawFu*Npue+##tlQ#j(@}e|WjuY`{3E2a*R={_@9GpO+^~|tU0bcfvN}PcKQF9dhbB0!}fpNc4QwivX5g6Sy9M1 zwuFkvs0bOE**iPqI70SLA!YAb*(0)dIY^m@?D4zbJgc+JresDrda9IU! zf%T3VWC!Fem6fYZ)-%CdF`YDuoQhOjG~=?0Ne)|fQATTL$eVPYA z{2E^B50;lX31+z?aa$nOZXuEU)2rVY=-N_*-J`*u&(>EArH3s2VkKY6(BaO9e34Yo z&>f?vgL`Ir?JD*!pa(+HoEsb%SMDr8yeA;Li(c(2?)+>!K^%f7D&aLelp$MLCDx{; zaN{3!C5M+5CU-ZotwM~(X@8^ET0?Q$HrCc6~ zp@%c!#H$r2&D@7onU_OF>lO(b0YA#};Lf?(c$7r9BKQ#bSw}w-Fpi`UJWK1~KJ1%p zfm}j4T@%c7MVdbDq;CKKlfKp$lmM%8@RWD<8K-%$^>=F1B6pF_>v8s8`1)N7;SryK z8DldT(lZwR1=p-)e$-SM(E0#oKOvt#S3v!Iq@tR6=B_6gV){5eh-d9bsrU89*2~j^%cHIlX(U~N$g3wlJaqE;#_){iGcQMBuClpmC%D6)BhVTqoeT*Mw&dE zz|_%Pg=Y7|=aXeXW3`m=fS^@&*(RW~DtKPD0NxkC=zyRNsSIn9Yk+!Y15C7wd^x!u z8kUaujNv&cZD8<6!GK0~P1}l$uo87c%vPn{H})O;zwb8lSijMaSfc?{O&|d==QZ();t=u9;xr?&l}!5*8K=C7uznA3_SPaOXZ}$_WRvB zaAzS&V6(w8a2sGc9?>RrL1NyFL<)Lv!-`sd5!iYr#goE15iy;0r-2rc7ggLD`3bM< z_=l2`QbhxzSMuHTEDe@7oSI`ZVVP!aH59dr*(;e%UAe-O#;z=%sG_{QZ@zEXE$JGY z!>>qGqyq!=K5|!pzbn)rWIc$UY4@fmux@y+6dEZ1~N`n9G&r(IvUR~D;ADg+MPbU zwEs{h5@AiJu2I-s%lK3&hX;T&wD*cj>jUq@T+4r50_X*i=^z1zF2nTs{_GQAuX@c7 zwA1vN`759(HQi5Z|Kv`bmmz2G5{~rByzxRy(<0J0FGt7@DrcU#7Sttt;{6SIJZ7ag zh(eH-%#>F2P*_T(u?L@M`-aLT zKM-7L9{Id``P4xcFEtH%Xs>e8JiiZY{s7ZgFOM1?$6)o~DjIcJim8%Xu37uV0x{?)Mn!8+^k-uF_j)2YbUwKLCeavw7+r@AkL z#_!#@6H`knboWA8SqY!(Jh$fmft-q_1Gx_-)kAbHfX`TF=!Xwkn1|_GxsbEkGLwA? z+l}Yv+(G$gU{do0Vq}!aU<2bv3IBWXIuZf?pb%s}snrMZZ**g6+CetfCn%>~gyZV{ zp=pJ?Hz0&_N*O#b4CpU?%pY?h=G9)Z+`URXJNAu`_UlMuF8}x7uK7E1O6T=6@#Hzs zwe;k(zkd&2HN7~&t#ey0vtVLl`O3EVNtdqFzw6Jq?B1b$KeT|Ipr->v)5X|+i3lko z7asz0$iEum8bhXH1s}Ow1#X3>yW;i$qhJl!>#`9_5p1e0oyWi#(32|rd{)Njjiz~) z+-*TjTAZt5o)<`6F2kV8Oso#U+E{|)J0|03qn7pgbzoP_0a+ObUxU2ZUYu+8rnl zhaP9B_5#$0;#6=j!|H%!zg3MFeHQ0W68Y}zAM{iSg{D&IC>5&}h)T^Y=K&6^lQan6 z9XissvIDFZc_ZO3Ii<%mi;a6hgjAn^Tb57OpWinPR^%n#_;hncOagl4W*k%M#JB;F z?gtZ6%YAG8P+#`%CE$bIQcM>NN{lIxNB@WTGRyp~<`OBh4i}Bi@9Z&ek}7CcR1pg% za1PwXXP4;-Pf`4nxQU>JolaBibEGWn#n< z-MP6T880Xx$T?~Z)*r_n+_p=+cAFd#)^Z0yBOp>Kr?d$nMdU(e7)!wKV#bOBA#((aFXZ4 z&(sA76OOCP+|;7xvdL!ob5BVHo; zsXZ|*x_}3KgOTg6ra5Sj^arF-#n#j8Ti$tQOY6XIr~=rw$UQGtXK;$0kal@OcEmE0NV+u& zP~e!<9t_Q5K4nyqMvGK2x26#y^LjB%~URA1zID#S8yYZZS{~ zTYx4Cbj~i0L3rweZ(ihgGz?4PtWz~^veHZ{AY$Jq7IEcBI9`B}EfW)J`1||6z+1`^ z__|w9uLJ@$Xx1Oky>kJe z9fWpt_?_`l@D;N+o~)frhjHy|Nvrf<#NrbE)fS45Q0*Qg(YDB1;X+74@d@y24=I~s zZdb_+(I{dnKuKK)7=v59wr_j?-8J4Jii?Ts^S%QhR7|*_go1k;MUM@7aUgyGlJQ#HA;sC|z!~jptcIjj`ho}$hY#P4BGz2n3vn=d#-IXeX zbQ|6PVbpkAt;}z@&))rU}`2*EK**mx_ z=Y0>0>HlhMI7wG-cymeq3++_?+YE?5{UOos z|86=e)#urreE2<%3V-Tjj^Ts%nB*}hYSVKaff{HAWYDtA#6W!W=FHh5uj#i8*f_3e zXaRxKj8jDD3U-3$cS^S>b_`v6f7EFs>#mi5&jES6Mw}$J@<^@sbyuUuL(>d(zAIr{ z99bpx@L+MtrcdqwJY_5Ckr$;SNK%4qQz@V6USZjarYP?FPtgE=J%YfmRS^a}V;WdS z)TPMHkL4P@s4>&FB3V+^nlO41aEe+EZh)WNAiM$Z?P03l)nNJo{SB27`2=J42_d?elabGR{DQR>h1SgAU7C6M8Cbs9f-3aTDRnl;o1 zxg9UtMW%y7L||WIBGl?D*3I(K4Z+#`{57B-_zWq@CO4{~=BEP{)DA}+c~f_hRRT-` zg( zNdTjpCgH%$`1;6?|G?d2QJ`|5cjux7JtfxU96B-zs=CYh`W%TOFMVcV8hYf~Aq5a4 zQaSXT;yiQIoqy*b=K_GY&7{3Ph5eEtkGsl{k)#D%e6yEYKKFDn#`HjLcjPh_oiOvN z5^yJEX+Hs3;SGE|GPK-)e=U%8{C9G%oI{U~V^+Br%PKpA%(k?Fazt zBF-mw5O;$D`yyMwTTA=g~cnnrn;*4pIs3Kx`Eg+0dC>MKY zAIy2};fwpbz8oV5esX$uokOPpGGf`daMM=Jf)Nf&^z;N_oRy#f2~6$i>H=e1md0j= z|J0z-cyavL*KAUDuj!P#^AkRv0CR;=U(@^TAAas@xNy(J(_atAN?^A@Nony_-hMX? zl>_2!bt}|X%X#B7*B;1Z@5CviWc9C=;KV^yaT%2ZjBEA*Iy)ouvB{wQYW4rLRKc_0 z-kJp3?I~zBSRl5g5wna6-3GW!77OKlA@Ce^`$F$PhBER>w-XQi8u1&G3UIMj=nZOBDE<@-h$MW;cK%V@gTbBh;p`xNWs>#SzKRS;ceL z=Q13?+#xwTy?WfF#Dk?M-{=zly_+&cVD_n0So3i(R?le~n1oQ|T_N+L$Fow-LBMp< zr3cs;Kr*N5O+eWYtav~EKQkS)8;pia$3$g*4vN+msCC9HKYR~vg3YFEw^+lM(9324 zFMD<1y26nL>e>o(-g)hbHV1$0duIE!$iH~+a^@Hw{uB0UW}Z(=k2Eh30oM|-|s zeYj69dS)|~AD@_y>;*DLsu;=Ji4O#E<_0TX>2ah?b)m_QkCRSVY* zkJX?O9`OJ)gU%jRtH(!IOXP{r-s}wUeux(bJX3zJlHjhV;JU}zZyfnsoi70EK1c0j z;U3K*QLDuS+{RR;jowW%S3Us8C=qF#Re_1A_aN@l^Ww4pppW1%6ehSh8VXzoub>tP z_ZD_xOQk(GzxQw<79oQy$WGW{W=ssMg)1HCrs}=Tqs#77C_<`N4p)sd%tK5&9|@^I zZ%MCC7Sfh*_n|3;tetOf1g1~~><|FGq zn(FUj8a!!cu2Eqre&n>`F1~r*b2X5oe%~0}DV&&^?IQ&>GzWojv?D zg~uL#G}NX>bvCVO1WUhm>!^J7ZUJ~xjB{U#Jm#r=JKspYTaf4pXys)}`bdpv>^n*j z-T`t!ogmB=q8aCX%^4UfJS562~ry- zy&YehEYEiYN00H%I1eOPf*)LZ?np6=J62N;bU9})bGLP$ZT^jnuY?GYKoy)aYoB2P zed929o4g(H$%2gkeA@+{zND1io)vOgM3*cBL2@l{ye+)yV3Bp|0VeAHADfTuAlEZ; zGBE#zUOYJg(0@th`7@&-p|m`XW`wa`y!mA8+!#9(@e3aY&kNAck^!QHxuXhj9aJwU zR_B<|a^kXB-N1mNbbRUFi&ejDI!cJ#^bqjGiyLk_TA*OgbLbn<2Z!JoZH@FPEBmj? z{~CAD?glVWrnrAO1inA#X%X1fmAeTEim$2Uw?GOplHfcz=a_RWeGoy-x)5-OA(6_@&_5riJGzueo9ey?jz|Cnp8 z59o*1dj)0ul_~yHC2{IWBDaA7x10sVb#=KI&&kK6X86Wxj{q4{w@l@95MUr2azI2E z&*m}t*t|R5IIFu(__}r@Sk#;#+Qlp?<{j|fHy&{7Pe6jo3Qw_(_XaS?FPPou7gr+)m2p(t7M}+0JMsv;s5)TmS!HlCs&*8M*UJOp zWUVYaafy9uWxqkWJN{NT^S5`5=Zs^m>GQJbWEGgHYY$Jrg33UI4MZTt|M%CgAwF64 zz#9QCU;{unKOj)Dp~aJb`#&r1YwbQF9XpOK*W`=z5*q9mCuF#;?W{X!moMd0A{8;ws;x3 zfd0{9>+xaY?%UVkNqIErb{0PBSukm?CF($VW#IsU6z|Ubx@%vF zYlG|}h#2H%llb;_`ifwcoe>q+HsF%hTk>t4%VqyxBNfdC$X_kgK!4-t_kG9zTcI|i zWvTfB>DPc4?=HSZDz+!K0sEW3r>y`e_1x0)4maEsxB0dONF4i0p>^_THywRWmtVFm z(4_P*duZ=TH>X5B2XkruDVS86zU3s2KGn&fZ6;4>^AGFh=>_ihCJi?*{S?2xF#q{S zfJry@%#W4E@fVr8$$9U5h2svEQwFGH3y?U^t>z`4!Xv1H4W3pHQVM}E6>hC;pWgx} zK>76f><`V7*%eokH5=m%?jQ;J&@Nz}xB*m-(%s@&jabW0E-hXn1W}zdmCPa@G+W6x zmNnbeooaYt531wjn@mErUb@+38P8Z>&S)&OcK6l(Z+`^QI1_n&R2KLZcXQLZkTdTk z*GmXxDS231rU#~EFY*^B@4O~xFY5A}Ff2zXjQP@xc2X)FL z%~%yeS<9uE)4OLXGB#BTFL3Sves=(dr7r1>eDkua8A~%rCr02&u?q^^=s9-W-r%T0+EgeRaHAEW!CHuN{#ck`QD~BxrBLlNL`tvd#9xMZMSxP-Tj2m^JsW&XRI6S3W0saVow6 z@U>51Za4rn8sZaUmy~lFXnp}o%0g>#+V$5b^jTfl#g)KD!0V|2^}eA|W%Ys}C*F>B z13f~M`zNEQOO^yhX;I^X(GTb8DhzCU^BRzR@&)KG{J2(;PbFB5OiR*m1HuFo|IifXs=RH<=Zt7exsM2W>FxeDp^y50UzA20(DiM#jT zR5e2wGPHL>R33q-HeHh4SY0u7fHr2*%!70DQrXAfXz(q42)%DeF*8*`cW|{msF^rU zV!(H``^g2Y0o)IoR)Q4=@!&nXini=;Vjvy-Mon7k1x)dji&;H5=Vjyova2s>2RvvG zubic_Qua!s7`zFW)J4axGyEU$Nk{$O{gKTTi}3)(xD=fdQUn zst4Zz@sq#c`i-B<+6A0onxV`-5A+JQBi1f*R)_F5dH}`clBNG3M=VpoN2cZ)aTVvR z!Uwn2o&C+mjgFwxM4(r^`uVK#lnu-Nim(kJfF|d$Rc~p!5d=Vf90x2by+uA7kImuq zqbmDAjbRUnsxIEsItPWt?0062DDX3a(LU_JLh~l&4ttv$WzPfPZ#Fa;&8BH@bUD~a zStQtZt9hdN&|BReL^JztEv*}7dasvEZ<@|19G(!9e!zreKfL#bQKoBKH@yYb*U_rs zHDp{9I?=Ygl7MmlJJu)innu&6=HlF1-dBVQ>C=>`Y?^*(fo%v2 zjb8^BhFe4^gmI=7n!xU{5TL7MegORAgI3oY;QzAlxqf1aHRLY!pnz7NKF@=arfXF9 zAT2qd(M(1KWEILBDV-u(iz62VRWhpCas;n|IYgk*lQfNmyLaTrxC56L8-)9RNKJ;| zy-r!owzdd()~^4dfa<)h&`R`An05d&`Ns_1FSyAtj##bVPAionGo)ob_HXZ!_l1aB zd9|9iDDtZBa=pGw8NzJ9#LYo{$VvPX?mXX1Jx+@Ogbf^z@Q)X#Rj^>v{2`+T{1H4q{oAYQ)7@ zE|c_edF_0Ome81V_da5B;KQKh-b4N4?25<{x#meq>*-fAqu=io+r%F%fa38#=nOtN zp(~GUpnfm4z=3d#e3)t1lnP0zYqtgBaf8Q3S~Hvd9c4}ue1u022s!kuua#-iA>{UfsU-zH zg5n}=?H_?FkcNEk&*nJ{CVnbJ9v5$|^HSAn@5T4q7s7ju0sfUFl>O2(r3JhVOiPVH zl6(c4z1Nq_et}YRZ;t>J{qgGWu@5Qf%l5w)ApVu_LE(YK(gajxeJ=#y;xR_-qeQkl-c{}LSV@@MOkY3vi**G

1$LU8?y-k)Ene&IA|0ZbjYE(B8YS{a81 zQp0?noax?*Z0M;IA=Ar8yM!en2Tmxw-NB&JTe8K>LA(pi7&rM(vvWeL1} z^EABA=YUrlFdmCPuDp7sN&$FnXdRFXXl}NMcHDwTQ={_K3{x#Du0B9ZBwQ3hO6Niq zm(UwPeq?%lYx66R^*6o|Wm(&h7bA@aG{8XPOP7HRI(yg%8*UX(wP4OsnD4 zKAU$cW)*{Mz0Zew2ELwU+e6Z64-i{;cp-O*@9PtE9@l;2?zJPnDe^V)ALP%Xm40J3 z6K$MmL`}lC!-%drxjXdC+if|~vz8p+A!!Gm6T*+5?;XHBU)KTL zhYqL%bVGyZ9oUplG?h8AUv7t<>O_ehW=?`tqckFM6xzh>Vsa zN-Q$l3m7BNJ^se2KQ7(3rRoL%C1B6jaweT1b8(7#QauKbBB6vw(Q?D1E>2g6@>@Y) z6NR=PxVQlC({J()#Yay7CF#JMYs)7OOctWhUBiJEFUIb-=rSS6Pp%}ubXlWlj+S>r zUTPq`o*eu9P?e*Fm7$nIPj(-r_1Y61?{B34T4)VKXyEul4Ac=aD~G$t-}Ji#N~y4y ztx=1t=sHl1!GGin(dIq$OImz@hIwzD9>WIlqr`rt!mJz-6hX%=%2`Gh@qfK}AwKtT zSu>UEX%sLSyGonEptTAt4rP?uJMiof@01T)I!GTOG=`4@ybOs+i37V}MCG-a_6pee z+s!3ZnlL26vC^7!c@26DH$o*PD_Pl0jB>0C>2dfht?;m+Fz((~DCFzEPe z%m0Ovgm%!1SBgYN{-0trhX6*z=X~K|W6JqXod*yymX^jZPlg!+M3hsV29_Ok(T@6ZwH8S*mDIA) z#4j<)n9x>wSx(U3JhFm+jvp0|tcyUh6qut3BsdUg|1j7a?3iTcDok-b{9VpdpZ59% zKiC#+~+0%j;b^0dXf2hg|XdJL^2Z11!xC@V~iR4bRcd)|1*sJ72dDppoav+?ja zJxL`LLzi2~5>A$7)1;0f$?g5in5XH_KqN@QMMV<$xc2px_-M@Wb$X7>0cN#B&;V%m=(VOdM?*Zjwn!FElN+NcQUj*K${dryF zG+a|{_We#q_aHFDLg_JVw-}IKP9P#&8B;=!dSvgO9RhX~W$5g}*+qgq9RXPv`FX^p zDUcx^*cAHBaY~$aQ=bQktc`T6(L~mTnYpMUWQ39M?0_`bpg>rR+Q!~hpX{7)B-Oc` zZpYFA-Qnn$c2d3$eH3}#bd}t@byvP9wslqLaF&v zgmgAlofR(Cvuk7I5lN>3-&80I9B_gn59O@;nNq$|yi6C_>ATb;K7ys;@q7{~ez1B) zd>9*qVo0^rBgK6~lT#7hTRZPLq07Uh^bm9OC<70P>4{fm3oOs;+6f*59p4L~xq$SX zpPN?xai?mP{kLOqj-CJYy2y0y*z} zQpDuMdxr(Tr7MS@Z;DQkjx1oGtK~_c&ac4VYrsgmFjs8DsPt)x>dx!-j}}x7*fc#| z?~@pOUG5gD_~>VR-AW$jT5m03{hA(`sozIgoA12!iBrnRAR@4p^LCbKbMKZ1!av^q z7VeAE6xZycaXISnrO`ajPwofz86#%`ZL9vF?c2)M=zHELCfbKA~T?@$j$fO=!I3;D!`{f zmDD%=1fbJ~0A~n6(2(d*C9~$ms7svtkf!Tl(qF%;jyk<4?=0UWd28>ifu}#rvpgfP zA};+odOzVOjHb>?IK*v$+X6rnB7`L=2327Se*OZcHi6%_Hx<*~LvaeJ{a2lAdYd*rg1LtMdREOIscOp_18x3#La# zRQh*^%yP&ADP_;%^S7+s0AYa*|=`)EQ<$rgPDK`Hnf_^ zEt7bSKbE+fe&}-lBiqO}sYpwpywt;K^8r`t9-a&b8PsF_0m&^3IZKsZGVUQK)tG?9 zkMY4cnOAX_$4ZT+EY<4omicbd zjc=ihr?RI4lzeB~8oko8G2HOMI+_9~G#y{FWmV7~96#Q1+v{`bs8NB=N@klU8MT~+ z-;L4w6h|e0j@)ET%*QKbyE|vFJ&9Kf;8D^$H+&S-q$K)w{miWSE>864SIyl^-<9=V zOU#D$$J7#9B~@;u>5eMZqq5F z#glE}I1W{NFeK1pgT0Q~yx)~o;*yOjc1|Rd#mOm4Yp@zF4cmE4DCxF`AQJ(jIWW%Sv8oB%vAr2tEWE+hIx__ z_qjYaCL_fl`*G72(GgGlO7xtJa9b|L%fqis&S1V*MEynuPf<6_w5~o zov=-5T-w{C{_Qh$^x&Qb*41&lHL2LOAG;LftFgPd71y(yzvjTkH*Im>4)zEAqyuCa zc?R-*`IGkREs(G>4}X_Y#N}Qks1xM3@ti>)+>oK3vj?z2oq84Bazvsa_6!{Fj!Gos zoUbnZ2%uWK?pH@#1h3>&pc#erA0O0iw>pj&#RHNE57j-T{_e86jt_TKj6vj9&U^g;?Kz0Jc8EWP8P4p*GFs(@-HK z8{5X>^hV0*`Hw6E#4{%PLtq?+!QY&=^FlG8#M`UKsMxLpZ<&o(Me7%po;=P*I!=dI zG54bR?JHiDP9X{qxvC~SS8IM-^fDL=8f)iPRFPf12Zu+&E-M>rPer4(o<`k!bFrJ@ z(z4PxcjA8#u#o9%v(mD9aC4y_l$|RXHY@3FEB^Bb-aw#Xb_4(}9Gh21D4E(|g8psc z@=l+(VT*F_7(V~L#2>>@C@e;at#MzU)MvtF$ezCDHnp(HNi_or)}L~mRdzSMDk8Xor^ z&}pT&c$0*T!gs%24liB3AjrJ${V@~db9Z`w&StP$d8ZSrNGd>r#%%rk!+4ilPk_+0 zmJ842Rme7jOa?52QDfHXt)ovemX`BcC;y8Q>4Z~J^7%w&(O)pd1q9|C>CMa&)79LT z%@fc1c2v^b<+i)ba)AJ`uBP+5slK-0P5GK9s_m=MXUWebfv*EMw|bo9F#l_wg72!u zJX7VVYlOdYHm>Si{c;f~+WzM(>yP_y093d3@3x(oDOpNnAC+HXn_uvmsJzy~@Hb=L zgYTuU{@`bCs-!#bKD||?)gz+ES5*X`PE~^5N69cgq!QJC+nCEOF(&oi^1Wt7#8g2&x0Nq)I{N6DA*oXu6iU}>ryd)>(f^*GIu z^9)zhuK6{_CQ`S5hJ=rgbU1)2QlXL(8FIu{;K{Zb@vE)L{{>GIOb%3;D-A!FyOppx zgx=Pq*epWQPffZz600|zoqK0Un4gl;s#77N`=yy023#Kf>YrH!NVq4B6c)&zgq+gN z>Vcz1|9Ll${qu*Tlx`Ci*ZNAE_g8%TA}g7ths<#?uw9Q)A?h+oob ze6uOP$|ar4#lp4~!oLT7zPO1!cc&>_QPk`V-m@)ODe&8)U|Ep?)0ao4dA}-`SpBP( z_BJ;bFXr3b_+xW^=Zc5mWe4l&xPs`rw1i!r8$BF>GF~a38C@KnV*7Omlk>if2TWU) zmoBpwc{p~h%}%+9t;;$!FU$NH=eaRGzLCMr#TG)SJRZfiTCO!bH&L)*Lqgy zw`?MFI{9b$sg`BJ%N#)R@6icCRmX^ly1*WCysiy6VeG9|>QfOS$Tnu{p;km%>Ks zA#H=h{`?&COP5r3zF&fZJz;Qf?R%^Qd7etYG^QT82}F|uq{Fe+EC>loe6@VxJc$A6 z?nnZ{pdctmv=SP%_p8GgTC06R9S~vw6Ss^h`i!?ljOtpk9|Y-yY#M^HfO<_tu^gZ7 zf{7!HtQwdZ!n8>WLo;?x8=sqzcpkQTu|}NS>zsTaKbKED^qPf86OBC?)bB6%H>>Ag zi+x{%&cN2MdksL6QrnE#U)_J(2&rYzRnn~+6haD!ob@}g`Wb3I`{_v+e8>c28AkqI z+3OCIk796g?6EtM8DB5Xsi_d}`ekWUU$4q(U@}74DECT%$wPZ30rjg3EhYcT6MCB- z*+npt`hixAIIZHM2gbs-D>vuuA7;4ya{3;R$0m9xSCoiDGk#uy4EgDTdE_Wh({>=leCE(zYC1DeH_k)R45lB86 z1k5w1EVA;g5@hfitMbo1AIW4YZoQLUH;-t?J5A-Ox;kj%n7QgU@Fw4n>YNDaT(nKp?tZCKFfGi?D})RpC*k<;q6LjT*d(LuLh&QxkFjBE?fL>GNW0cZ78 zHkJK8I8QIB<`5kO4ePw3hUvj~bdeZ8ihBQ4ZkT8&pz$*XdNM-QidEs8Z*@1|_GU?bzQ}2+R z$9hfc1%tpl5@sW<2R=fhzG0W&KiEy4e{sTATMk@?TrXb=M^nBi@ywiQ%BGbRXgzPx zD!xC%z2K3htzAbO6l|cVn3nNTn#0i{9&)j2@53bA3%Mq-FwsxhCri9~MNA{z9WJKJ z*Do9W>rw7KNSu_kB_m-oNWZd|>-R*E7*(B6wc?ejGpM{(p4t(-nt$(}V4=+FURDHr z^vlAE#eP87U-!(2E^o>xecB}J-^TVDgNj_U&`|E~B6I2SKCSE55CDd0JboBLDY3iJ?fH)DFL&A*%@*OP$Z8d9tFMHIJT`a}o|W|eM&0SD8||RdRM<-@c<%HqAvYSg`bX_n*tdV4 z7;wY@v*pt*-kFJxPV2NgZ_=VizUfFPYT7yBA30dj3hWNq1cY6>tkZzMr7-{3%W~IM zLl8k=NOf~vY^zN3gN>!%)!(wsY=RV?o9Vy3X{sGpZE0##fRIdX^&=+u7bu33flIaHZpg3C?+HCMRzy&H zZXF&p#>J zh@#j@Mq@EYb4}UJe|pdv$ud_RG?&Ww-x%^M$Jv2!qn};=b$ewqjvhT*O`&z3^2v&;p2(T{OjIM!D0zeK{8l9MM>;=S zxX6Rjgw+@12VMtP+46H!Uo%1xeNkdFOU`6M>doEg|!Dq`xt`M-$B<5uLBLbgz@ zX^&)v``u``c3oI#SE@*>j3raR3Z zv9GF=vQ-(6m-!RQhFB!=-lhuODbHkejl9skZVs1Wc6i~SOH!D(@p9CFfQQYv!Cj#2 zj7#-q=Y*K+DIQcx`gE7K}EBqqCa0dHGyfE?X+#>*-a{Mpr7)JisGURgCe0kD&K#v8IilpXH@}u8 zVq;ysPaKsB3c5SOLRKrf9c#%l+(RZT;bYRcY0T$j&YGF#Z04^6zI}~*WxF@$)jhH& zx0on(2^w=cd|w`*o8#9T&K$O``BJ<3u1g2Zd)s(6lE^cJde|)}%@3aj>3@jQ0w#u` zy-1DnM|SiYr6~V_aSwO|`uiYv0JBRdYpX$PfWHJIMAExY%=?g$FKX9yQ>!A2wpCIG zeJBrrWMcR#@6TG3YCBCApSr_m_$~~y0QzGd2qDMan0fs!nZhbJGjT_?~STW*dH?e_9z?Pmm zOf+XVC#=pszr}}0VQ$ySo5XY^bFSFTFPV0hjPJB>b}j5Ys5jxitjS!sv(!cIs>|X% zRO2|K9LGw{ct=S@S>?I@W-5{qm+I3pHkOBIv$g28Z6)x=#|R*QQme`EJk2sr5cNp6 z*B^CT+Zu_-d)nZA#=jfGI#LmE=40u#n2{tU_(e3|ZzmG%vV>v~WteihMJop{xXltw z^zhhaj60kVA8RF{rQ;sZom64fbERA57DYt-nAaUgqM~;a=)s&Amu@7aD;Qr`2zS|L;Oo z{0-BSf2CE-n$H5ti0(&k7l!k|Vy>xkenN`5{W>MgED|4e9f8{tX&j3GOfk^*v(0uJ zlbhc6ZA=`4|Xv*-$gUJ`?Srzf+#Bl|@ zKcR&|*2K{fUp+FLWD}IRQ<2cdz_P*?v+7<=_ISBmmVCp>J(ZOJNwzweY5&?>!19i6Xy>Uyg3EnRLWw0nK3 ztdcou;gtw{&vRz)QhW20Y4KK?oZWAQDdv|5+#NeK?t?JwjU`;*1uzCr0Qw%SH=_Hv zKBq8~&Ou(EH?&{elOKKMUZB6>fT6F!eu`CCnH4vGyy<-7?d=#}Cl%|#KjhHeax_M@ht zzslMIqN)@I5u!;ZK2*9ehVmw0_ot z3-!td%VOW1)Kw^bn5Jb<+#=EsFQa7SSvgr#h{mORKkzsbI^J9nF3JMc?>aia984PR{5+RO2Bw$X2%kJ1_EM|?Txl7~@5%J!_-`A2oX!i< z7$@aZV{Ic?JZ#tf*F z8fJm}>|0m*uidyFn;GLXMJw@mY~>DPu}_8$m*Ns-#_95(kHoG{M?yU5j=A)1W5v%4 zb@Dtj3%0U+qs6w~BrkFYdFPq>SEnB^*2uN{4WoD4o$71E=wRPuta={9zw8Kd2d;aR z%?zDpzM$BtNm6^HpdzgAkWxO=gxQ*n2uEJYtRf=BhZ7hI;=n&7<$4dOBEq7IdDr@E zEeux}3b>55#b^&1FAAFC+&MD=BeDq~TfBU$0Zg;0DXG;WID&Qh+>A>)HQhW~Z1nOg-B|TURYWJ31#g^*WB)biJ)l4{;A?6>yvNxe7o5_xK^Vadu0uhh-o z+20U~pC5iSM&$uqzVTPF53D@PuF0YI?H@hW-T%eqCH=VF|Gq0sbGdv1+Y(wcI!H0?FK|cR?%7BMJ7Lnw?A{QYWuy#t{ zK7mo3_tm$hr?mH5I0?_f$eOpPAaV!ycdbbMVDFa3v5%&a z+G6Kh!;0O}qmRLP8>O)!Zi^oCpPZ}-!Y7y&`5CvC=q`n6&DO#K$7!fQm z1`+{pdwtswc#StH@+f%_pxS1jP$It5Dv!3ew=0Oz>bH;t)Ke5(P7ve^DabLEq>(xR zhG-!Wu?s9bI3`r(%!F+h_6dEpo;dp1-N7i|Vm18f=Lbm;1a1`Y(c*yd`1&Q-w?z_t zlEecLiZJ(-s`!f94+FIZ>SV>Y?cJEq9`gH@v961lg|BL|Vs^o5qF3TC75CrVJX25( zj@MieLc&tZLXy^0>ySs8c6$eUJ*pFMt*ldqVs>*R-V>l_8lSK3G@ zSZkU4P2PvqKpUc9L3&EZe=6C_j{|+&5?FLx`|r1@Or*I=X%1#pzRU=-TRg-JZu*%u zt;mVcwbPI-%B8!oX&vc(P*aHyGUIW(MWZ5Cd#MG)-j0@MpPF8zFO!GVW?v8r;V))# zBB`#Xrz|;V*IiPe4U;|yxAh64F(r1kjlFDL?=S2vO{v{>e4!mrt(z9*JkPFql^#oJ zAIQCRQZpd+-}K^}HTXJlZ(CQiXFG5Kc?%n)DRw#h1qVbj$a^An2>{!r@}Lvh0^3pY zAGvQ<#ivFRd$?jSRTtt#@;O;BvIMLpj2f(dd10^l9PwrK*5k*Dyg_nB!!5c7jHS~o z7wjOU6J@?vr$P#0V_L1yHgrW zX|LwkQ_4&CjF^SL#`Km%<%Ow(*I!?9M&6`LcmsYb-3 zKq2A~RnIi#61KL}K-v2&l$W^KiM2D{q8%V`BSuXPKPHrBZ68yy>H|xp2pvvt$;;#c zs_i`u2FK6iN?V+~Uc{5%mtD)fkrBVYXSQ?tTMg+!8NSZ7w+=E_h1Y+imwCRvV>GNM zQNOz&<_{02DNf^`)fl#_6xn++wC3_G<`h4+D!I&Th2XQRGu^qy4&w?16|S0SjeOvhet-w3!zhSlt=#=4e2RmY{MOz z$V1>DIGE@Mlc|rFTF+9O{R6Hh{h*6Rh;I+wkEP#3Qpo8uG6Tl+(RvxRM4s(r)o4|B z`QWwGlKb+w5GKTh8G_21Il@SQ4pUTtMEh5-SP4r9b%M3YB~VeQnVr(zos@C$e*6P? z_aR_*{>Z(3mnhCk?b_bQI;5vzOO336NDHB%^{G7iXO?=Ey?Ls@XwFhb)%9S~v8J5? zORbNJ*Zm;DNx|>9Xo)DvVHHEcDfXZE--R#2N-d%TQ`eDsdj7AL$g%e^)wNA{hl6#| zO){7L@f8B^x^_q4U*_}Q(6W(iK^?Ge$c8O&iWH8@j;#%9KT0#`jbo4@MVqsoPq$Gp z8xPX68AP;s-m^qnAK9A&48>t~=|8C6@QzKagulO>3cBIB$O9=dZd&|Alu!m1@c$*n zwlUKuB@Ne-IpR2BV3uW)qFa# zF6qw|+Z}{TEx1g1E}{xk67Wt~8BLvneS@R;T%Te(()`^}=Ao96F3KVad+T zj9s0SxsQkw`u2mw$_Fm$MB$@kv*W}OAM@qMZ?-^`_>=tn4!}%$F9#CR!lV4Fs zGwQsxO`UMu*K1np!rQf`Y90>PmYUVh{~;HXv{kYhSiza*5o0zB>!+=dluIH2h(Am4dO?gzI4qfjuh^vP>|np?6uk99 z)(F>^CggUZb?c8`hxLwHa@JL1--wQ!hk;#%t`@scFG{qKAr&(KqH=BG5KKJU@ZuYM zKRS7g5~Ia3`j@rE>Nd^I;&yt!XbKto5xrqnU)iKRl`5YTv}B{|tR(^6I~Cy+O(yNT`=%Fz5kq%4vXk-ovxpS?f-tIt^m#yoTVPf=Lkl$(nevHftA zaOYp>K5f9GF!;v@oU(}LJLX1BrFo+w*{Z=YUWn!lGsUv>2 zD?N9aCWH{dwiq@*+=%fx7PtlxaFp2 zO%oqzZokXu>c|vN&5PHjNKudmTHu4P2o2J278f<4?4%v;IRjF3^W%4`v+u<}$EYbG z=JZ4ISr~8KPudahgjp!*bvLdfnRPwp^2wF`ieQDIA^<=Ut(?|Z{AmW~=9!0$OzyjM z4itFv_n1WFAw!KFW^D1F^HxJf@`%OlFX|9_wkDOLE**A(E$Xm#iB|(UiG9&8uUMP2 zkAF9TYJRHPNL;|}_d2Ewm66@9D4|_z-v$1dE9?IEL(#K2`Cl9ysf7Xs&NhYI5#DyR zx8Hs0B`}Z^yAkr&;62{d!N6-wOpJ@iC6q#`B}L}gDTvL)pdondGrg~0&VAJ zOGRd9A`Uo8QtK1Sb-~es=lM0O({adRetz7Qb(pqu_pXt>mP@}iuqDRHM;_V zMiI46w!vb9UVhG~3oT^B{0xpVHTb&xkxf&xvhoP(zX@>!eNN_qva_FLt|9cXQGTRz zwgGsR)JA&W+|YlMW1$_SDX(M0)R1G)wF3i?2<9zr>O-gIX$NR+qgKY&`WEvo+KLnS z^ouHjmYVAiIdz?~zB|7w+S`7Y==QTGWDxl}uPj=ig!%=>t}WMiO^QNsKq$v%bg@}c z>O<$lC-&P-L$9C!izW1>goQEMX~rd5G!G3a!URnz_P_`U+Mi=`PhK-)MjY`os|y;& zy>@>e{qob@NKMW?ZuK`yS%0Lgj5Or1r@DND%EyrIV9FJ|I}vVXFyUsmo@Z|c4os1& zidW;$a}@Dzdmm%`LZ8u5jeOwCS&HVO{VDT=CL)HNC>HxunnFE|*6nK!VSBQj`R8o* zaVD|?j%iD!zeE$$lpOZ;cDZ5&UGHiRbsfL-J20j9TDN^eLSx{@!$7r zQYK>$X2|gQ4s+kI&OFf=H?hlG$1D>jX4>{+K3M||+i8d1D1Fpm-9h4GroXs0XSNsK1>g?q-DcT+C+QQa1=1~_ET}{vX`z~IverWz?(!egwmJGO zSCPa`UJvl;Fn*KX&Y4&pgK5;BUCFC)@2zZn0z*8g627 z=firsyvStsNZG5^2OPcz$p|9#9l3^<(rU>=<}?-$tS{v}`}IY$Q*ACZ>~t#NS%oZ1 z-y3TeKd_;>8;ep??>=cH@B6VK`LcP1Fgr=(pyqq5k#4Qyv&PMIy-EXN^^Ut*6h!dsMr-6P z*0-85!dMUINTyQ48CL#Z+8h~$;MPKADsC|>q9#bC@gZsWe)ZV65FZaM|A^j+*7I!g zC=hH`RGEwE5d;_6zpB|DN4!kh)t-Nsq+4CWeeD|kJUH5AhVrLpufP7Y9@1~1-*{FPTzm0WPKuECEA_dYFe$?bnKFJNM-fK0mEyW%f1IEz}isYCgWJ^i8;@yR(IybD%+=RJd0b%NtVQEwo?65 zbtXN$F$)FP1#61-4lU4yjkxSxi1IId39hVY-la+Q_o+kFu@#~jg>@A)A6Y!U7p2ZL zgzn7KUjOXil=-VCBZ0@fLl;~D>qj(s&P8A^i5U|!>BTFoXcx#md#xQU*Xp)H8$RY> zMk;%s?VPx#DG>+4)54Ha*cdXn!ItipZ>|?@Oc#^I4F_%r4Zo^ld}-d%4#{b5#glh9 ztZ(>=40(7v95c6X;5xVD0~8vut7fHz;@wp`Wec@C|(-`bJ9{hs`S zYmPa!CZ~B6x7D?CG=q8Lwys^^Jeu+{=8$rdxJhWb>vbIai4Ay-nP3w&pCzi_s-J@_ z_Cer~c#ED2@%CXqX5J} zBj|b2_4kc_Z36>@H&jGJ#_%5RDT`+BV?XEkGE*{?xiI9GGVQgPppKX)a^14Er*P+W z_87J!LUWp99MeE5HOQ_RUB_wXUZpsQF4+BQp`66=fSc5PH(*|ZqAEQzqho+~Iy zC3cr&I-+NaD<%R6g+@%0kiL5&?yg86m5fJ#&ru~sOB!s#)e=oxerK_W*4{+ei670d z`0kWYYDW9)6ST5o)#mrC>``$LPeC$2{$Y#>DTOc{U%S@pgeW%GUA=UB;){*E!%1=f z6A^_7L)oA2*ngIAp5VHRCX$)2?mVjS9NPnf5%k#>{Q^Qs(+)8nac}=W3&2kr`|YtZ zA3S%2ci z$(rJIjc3@nF~Z!^ zoeHqw4=u4`nQ7HLUXcv6?BM6e#4RE z@50|Ky$_srO6U+?vniTX9P=+#Pq1+IC%P*Dz$;n9ia$TF%UmJ$F)^Dbprw#~KxoJO z10~)$)(g~kO8h&oe0s7KJS!#Pr^v`>!cAQ_H&6kWp(o03FV$kiej{57n&Q9*dC=~- z%J03KGa*Fv=Fa@z9T%YOlvo^{qT_ZTj;`M#bDM5|p8(kaPWc;S*Xi|s$$0O<4_n`k z|MO{3E^Hj`U#>^`C#VP&Z z265sYbvOf3cBRhIf{f@cN-c9Cspv7$#ZYUwRd(VZ= zy*h>a-|7kRsC<~9T}?tWjXg`m$5ee|ZdaX!@a?MHbMjrb!R#lAy1ClaQ)NjR9byEk z?QYSPC3wj9!imIXJ->8_j!eU%0f#1&e}SHGeu?}$O@Oen;%PhC&Y_1Xo=SoinN_Y zuYlumCd<7+X` zS6vdDuAGkeg19$^UM^YaoJ`5D_EWnA;iqS0eT{!8&m33C`Gwo#++(ut*y@VVYq4O8 zx|7+b2lH{$6M?}eugoCKstoLrX=Paqn`@xo;>GhYaW&p>0rw0kj^RNupoI1pN;D%U zFwKc)$H%&XgNFeTuZ)}4Uke(VbRwdTLeo|SBxRO#~|bt#x_ zm;a>ADL&(?4F(JnEny_oQe(10=B*$)nCZ-!`E=cYosz-wHAesvZBDBzfybqK0&}Ik z8ArmZOw{8zh;#8(D#kST#Na}KGDh*CrlQp9Z%t-TZVBTquwF&dIKf0ML8kxmPm_*SK$e7R-y$@ zl}5#9A6NaiQkQX=t zxCtm1_Q1wIVX)etkIa=(hx?!J_wl?Ok16L0?2^4bU`~i7m$YqHH_Q1a zt{a!h2qh->0E>_5cNXb`q7NOHK_$q>%B%;|e= zS2*b;M>GHhAzdbCh)l$>K30Lfj{fu;SQ>+`lM-TgrQl^-b5FS8C%n(1WXBTnL4 zpR9@GWKSi-Rq#Z|P|n%SbsmE@IDxf9M&Al*-*}R_p8c?!qZb&n`WZ%;I<4q*KB;`B z_2Xrr*Gm2}N8zj2x4}5Mlsu z`b~6~e6{N%i*7LCRTkQI9`V1gQgS%~?|z?Fn`G=6@{x!#AaUd%R$4J=T2N@DB2k%5 z-AG03nk9o7xwM;H3;f?8<>5?R98OT$W7yh~c65Na<`i74QFZx_Jep$}n2t(X-)UU@ zD?_zc%yT4|CK$5o!zs)eE&PRq?{9(s#1c&)(WiW*hBVX>gq#MJx&Q zBXpi>o@)2AHpi$8fwV*4#wD4J#c|#KsJ^0L9@%Ad0;aS>%ha~URTr=JW*U#ff8}H8 zdCf&CwhR;`{dvy=c_`PGXN;c}huKwvTVfN&F3gkFSJL%5DYivz+*F$(vHLjJM3MFx z_(Uk<#%N9N?7-CJ%^??-CBm$}eHWY?^tGEvAK;wH))D8&Ngy zD{3*uWs}>6Rgk&o(SwS$9SOwH-#EEciQShY4RW=qj=>|+($uBkCqIW-FVeW%Mm!^? zjK{~9DTyTWw&&8$-+lHPLgV8_T|-yIIL>$YQex#YVX@(3ShM`%RXkw-W$7b52uJ_41Ip{@eEj;*^I28OKo$= z!xG789Ar3}bLA`#9;rwMEP5>$TshPG{+GaeCj0Nt>*6yYfJ3x$qDe|T6-j`9TB07ksJKGK+;o_1ZrO4k~T^VQsj`{YD3u(SFpYt9i zi%uiw_<&)(nhu;0%Cj0PzWPGysS{ZzAmbK-a)VDQbQ@yDzln&4QlSQ#54K;RJ-L zM!Eg@JqI>><}j#(WvRP#g&)&{5eJa$DeWT~XZyGjkZw47$!)Mz2v1d=N4+Sl+Jl9T0V4qB3tBE6=Z zcKH*&0v)ZR#6uc;VXy(4#GJmxhz8Y=`n2G}+GM?8eAOU-KUP^XWt*LJ;8Y)y5-``; zln8A~0W2abS!ZAA{8`)%*P%79zXRuw?Oy#hJ%>*V<*9UVtXn3}Z&_YHBSVVm@G0d)CItQAcf;^13tMhM z6B>Kl-b~LEO@Gn6mq|jpDg&&;on5V^zgyb=+G<9gE;~swbU%?c2cJ3(xW7|N!l0=d z--+~aBC47N)mNVVAw23+g2az1uS@}fS!{o$^>;2cS~;k*p9k>5P6|`m3dYz(jJ2e) zUO0`s^3BnR%Rz>wrTT72%vr`N_Y6KY>Hnhoj^$=cNixOvAGGYf1eUPwsQ~Vl9Lo*& zp9%yH3iPQ+e+cgci6kDO3!_E#UugkhMb)Iv4Km)T0M$1d)@j^$Pyn4BCXt~q6y z+P0nHm{RXu*!PosNE@V(>ROZwOh2T-zVYiS0t*4}Noo`vL2WC=opie0A~oONir&J) z1oOTvqWqULi1!Y5uPM>u4BuZ!x*aH(57*(?A4l{L zLR8zva#D;kiOJnz)Rg%`H&VnKYCi5tJ4oUo`EEUyeDkD4hakWm#%>%u)ryTUJ)xwT zE1bbPuLSd}J1Ku<5e3zr$d3mEHhLjJ$2RMV@(6-u96zP9WEs>Sl z1tQ^*aLN>_(013!#%n5UW406Td)C7rQ!sHx)kh`9Gr=a`ceH|AtM9|RA_vm;;=3W6x9fn-bipXeuXKqdhSi+DP4Qa#=GfJY0ZJa%%mR5 zub9(GVd%(DU$VQ?UeXNusM!-=pB~?4K zq>QDQR09Jt+0n;TjEW$1vNL1YARagO#>O`}q}?8ucbYFq^12zG;dX8g#D&!uhI<1% zw;WOlhM}OM7&LA36c9BzCp*4sR3vH|_^5+0gd(WDSYQ5clmh?s>C{+xTDkC01kdSOppkw2dNp$;Bp32oI^Ifmgyy2;eiPsKS{ZfccH?FW$;KIE)sea9tgb z_7)r`ks^yYB24yp<<;KJhf1iz^1L^O3G%G)yb2NUfn$MM%~i61CCN{K0ZnC*SCDYM zf<5KZwCosFuGh+?Gdgu1`YZtGzi0)4Z0#2+c@-MI&2h9$$7& z7M)7WfO+3J5D**2_R$m}qF*t`Kle;Hsu>7fhKhTU^FRq%m!Bk#zoGipNG{+loUE#- z1^WH_P1j>`}@ zL0te{4ME9w-~fAC)F#i*8+_HEhybMB{7h^mx@0E^i=;HAYe&kSjMbW4u!3{b!}&Y3 zbnYr9#4qfmlTJ45uAR;WzhgWsAYl*@LRflZt){3Jjvga;5#i@<@*M`8N= zbMW!Zn%_fvy7-h10UvzRt^;W#qjXH?4%_Tt_KhP9jn!enS=j@MBQ z>V}Bq$1$U7;mTX5(?$d?Ri?b!H1xyw(s?S*3{w*TGI9N3nMT4pqgSB0!T@x-=n zM_vD&#eMyx>?aw>?^Rr=HOm6OtkGNWJa*=jGy|VD7d5<98`^#dhD-@ksj0!_2oPhdF1l5=P73WEIKxBITTQv2|vQ0PK5x zziztHJ27iVy`VUKpaA~^f)D8CT$1`&p5R7*9KUX61w7}l55Wyi?o&`>h>Kt@dV|V- zbN){H&y~x%!$XW()hhHXIg=tM?oW;W1KEG~MmNGa{BsFUux`kC)mXVXwqvKCUhj~$ z3t!&9yr5X;xdvUo%XPZbQt^EI(bqFfzA7p%<|PA=kfuRR%1hgLb4D z826-&smEP92v$F49X#w8zr{Ke=Kl}1BSjpmSp?@I{KI@Dhl>@O3}K_F+9^l#FFxtn zs&R4!m2XPdarf;GfP{A%X{@4cI?eBmdav{Xl-x{4|M_c>ho9*1KMx~JI z{kMmOx>EY#D7Hv4FKfKQof~2+H>^T+Cjz`zXuSPhr3*K zi+|TCMTAo_Y#p6cG^NeXS9Tx^-3 z?jl0P?Fu7zO&gj%N4!)7`xhwyh{=87GmV8$9&mdu87rh+=;!CAyXo#atM%(Q>whAq zlZXVb69SJr2kx+yv{$xma+BkwoodjYh%QMWc!K4!83B)~m4`4Pc32h4qmAd=Ghyxn#X6MBCmzUBAegc%RAL28}LfKMVanJMM@ zzd8M~7`QysZP(Pu{uc)9SDhU0r-r=0{@YyN*Aq+gItHEQ z++E;8Cuf2kau!aG#}0{BqjkE2z-4Yuhs*>-$+`s8QmwjT0kFV@Y?aPMn*{5U?v^O9@8LHJ zJVXN#P3hJF+467(Vr0)<3a4mT7J<-B*PBqHJB)uCaC`WBtP^BKe}GT}L{rD8Hh?mT zZNllDtiO<0Zdhy(Qi0byz9Ds;Zf5kYy@bmq?M!mP zjJ*N5UHVubPATJI+sq3}*FrEpLW6=Xj~>{M#MSOLc_$5yO>Gf|(-IR7%Kuy3qX zmN9Tmr~IQ*Xd<2PvC`of`+Uc&gT+%yvMUPd`0|{JiW4k^x8XfYcnsd2=WP4C*Mh*o z4gh_LGB_kCJiu!Ax_jIESiS=W=rrYLDR2>(L8lZUs$7v%amn5z=0?Oq)WR|{sAch) zwm<-lh%`<{7c6hsB7}Gk94%>z#KTOPP;k`(+kP~M1YB_ea|C}vrJkgAqce5hIus4U zHg7XKi-L+AYXfX7<#*uWodFIl_2Mt{1>4r+z-bX1S@5aa>`woXjkY=Yh7JX`t4;H& zev1p@PR-O6F3(Jc|y_LzV|p zpCnWU^16i?W;v1{UUel4mmv(ua&w(_m~gPm@zEt9giihR;aH&F^Nkew1JA__T7J4K z8b;;_!#`7et@z8C3+$WVhjuZRWCp6my^!w>#e7RiSrEi+yZnBB{K8AJi~I36BMhQL zAGf79X*YGgV(_w4+mLw7)O~oz3viYg3;cBkYxcdEz{hlwCeSsaUQXMD^S(|h9#z}4 z3n>Bhg>#Y1n?Kx5zXEcvuUb!*Q&}1IGXw-Vy?YaYR;k1Q0RSG3L~ekB;2u-tBvTcV zjY6z*L*9uut`k?g0POu6ilvd=!dO(_0*a%}^M5Vkr~&w?lrNfLKQys~{Jr73<-Q%itX107k) zDBJsN$ld6L9bHpz81~$9rz}+oKLIMT=ahaxVFe47jy)XrKaR6Z-qHr}_89oFxOWq# zbB($*9`)dcW%`qYXmt+d89kt+85HI>tjP`8AMxy?DL4>k`hTY{1JMeH8 zn3+;zI<~TsCMiLm;<;{$1R{=|A9pw(?~JjJUn{qsLXxS=RR>bweNG0k!a|_;YJY|e zaa&`FXM|f$biJ_>Kc<@rQ7&_qGsYs?DfpplaIZH8PEO46`puBr_riCl;^Kq1Zd`)3 zh;>9)Dfy}!xJVMpU~qThT9lE_0@@~c?rXgB>%#JC2(J5&FY4hVVip?6+dvIT1R7j{zu*P4BxM?moiwfc+5c|F_pqau zuNje>+o>qcZ&0j~0PdBMos*8|SLa`7;LLNjaXdlc38Ms>f4&OdFw1FuI< zed^;kjHVDIjg;{F<1HoV-2w+bjn5iW#CURSQw?8Vu5tMY zN2g!*>l_S5LzsPk+BzE2U<=G1gGRb~9YKvQC5l;F;NVXJzyoHuF0sAj&Asn3C+0eD zS51uRUu0A=rQm|SclZG_IEH<^?Nx5w{&IFl7_9$MHayu|O_1;(ySWgznUOgSWC{Ou zR+XW!Evgv3FPJM7tHuLd!&CV%2`uE;LuXS(8npJO&9;@j-?6T}t`Fm!1l(G`20xUnpa-H|5#D25^rzxq&@47L*?}hc~nr!tA zMv{-O;+wJbJ?+N}<-g_)2EE?p&JoM0rm8Mcu>{~<2}Pnu+h-kl_@2rL!yg<;w~g&} z&Y_+jFzGu9xf$Tct?WtFb4@=2By30So=lQA?$d9%&02U91lK?aXCc@D>p%4(>RG2# z%w$NOnFdm61d^axl^BPHr>@)a;tzKt%?j-w~ zIY`*=Vap6BG-N51O_%BUAA9w_Q?iKlZ}q{%0SqHeV*#3wlnEw{atjrc1pKQ4_IX>n zXRc&l)9w>pE2|P7cFd(nEzxKp^&dd2Fn=u=`E4$Dxc}Zzx)Og*&ieSyg?&9-)RycL zhT?=Xe8{BY_z*i-u7$8!`s2nb=y(B76ZCRjL{k{yUaw(*uwr{~N*>I{JdGV`7b22v z+9$Wf5PZY<;f7LxgM;da$@udh+hA@0VLNi#0+(RI{&a~vu!`g_svm3m1B(VC(Jn2c)yaBdLH>6+iZCjRFL0Sr=gh7O+njZWLXPLbAwvY zG-l$=49c{z!uL_HUv9tTGCFWi-qCqXypRp+773P$r)~iS*|aAkA!X-V9pYq?+wZji z*DC*T|E~x{d`U&HRM1Zcnc*YrPlLbr?kc!X4HWANrXEtybLqeHLL0}MDL#e~Am_l} z*N9QZ9FQWBkoROdfbO^l$52RphcK_6F#?lxh5@q&%@N!y(IY}m{*QSL=8i*~|9}E1 z4YGK@WbzZ2-m-@y%|c9JnAlW6fqB8TiG5k$~idB|w!{^|<1Z1R4r{gBGO5$5cuw z(LkRPw@r|l$dan>S@uYQ@nYan9Z9@B-+g{N2SkL&w&u*oPWU~em=wfIK8EzoGh~I6 z5m1}EcTQd6<2(*;MjP${^mVjB@Xua>>P^G1FED&e8USIcB~w3;ltnpstzr+HGRkm`9w67d%`8MI#SC@0ym zEWkp2wGW8CRDx4g1OfI`UDq^(Qtu~mfd5Dd8Wt33vBT{<-&8BOmR~?do+1l-tXtyQ9%dLc+d+-}tIhP zjJ_qh&5l@G%A;<~KuZQ9`_ljghj@&+M|a+cTiQ!LTKUz$;|D%LPo6R1hqRT=d)XO^ z)^kvZZ(JPCjU#s-`v2$3j_l%jb!6G;N~yI8hWAqe3Ne<{ZOb6z?11O7`d>ih6(rv} zcNV$g1B+g+@vU>n3D%1v9d@g23r9_s({!kg++%=mXEqjOfscwiJ_c|qXJgxXkD?Cm zQ@>pnySCq<3$e>AtD`O15J=k?S*kmWN8JDE!8vx`il=^FK=GH&!L6NCJYH-_S~S?z zk?qDW%>se#4vPGv4ovEfnJ5^LADkcVy$8y)e`I0%-Z@k+7FRfAN}Ojnk2mqKedaKe z%ZsPCiu-4zw4-DH=k+Y5*u5PB&Su=RdFlvZ*94W7(DRD;jCD)-h^I zFPmzm>pVb<-j?=gj3P9@arA4S&X3{U5ZHvNPaG`iOCZ zOibdpK(1@S*7_^) zXDJ4e_`~N%uU{vQ4V>>tdw^Or6J#LvlUBRsY)sDf-g{ zE)$64%E&M=_-^>$^>3D{`yTZhgaHJ)2sLx0ctxs5sLVK zGP~UOIaTt*nR=bx5QjLXhYzay+#Bk~M%P2EUeKYk|0vAuVs8EQIbr%h77>UPuJqW_ zgm4Tb+n6_ly|}Z>*uV3ErSBL1b0`@4hOVOeKAr8+%z_`^Y0fQoU89tL8z&5%LLk^W z-b-mWl7UOjvE-#rruxSxPm=8GZzW}+Q3DFsR42xzWEqr}Q>2=n)FXspwNqf~R<3XLcw8hA1(BbVUJk$P2(?Fs{N*Z{~T;Tv_XPGw4 zA9tOSeH>Q8n*}+w<6kwMT8v^CZJI9u%|bfoi^i%JHE6y+(wZs*YQ^J$+Y^RlW3SK)g3o`EIn0!v zUQ$`Hj9uOmo<9NMM@w`zq!5M?FKgvvoto>X$J*Y~=GqMsjaBn&d%LHUNN*&Q=uQaQ zWH_=YARl|C-EAz_;S7!b0`uG6Y~Ry={Vs?TV){5+4#!0mW%L%U)Nc6V!H!z=b`k3V z=Y7k}DDnt?AzKH-pw`}6^Tpt#PqD=#Jk*2+ zpYGwB5`|GXd#rWKPnEv_d)XdK&S0#V?fb{l5(HY76-dWvAS&$#o#TuVUEb3pL1UR4 zN@@uJqfwG3J(DAig8T>jjRj?(P9IL!-(#Yq>=Y(i4)6cu)eL+Z1~Jh$LLo~<4KkFNzP z*jth`+|82A_EIdgGZ3|X8z3kKHOFMs;?RT-`F1+_bbQn8INupf0@yAC@NY+bq(w%W zu0^&dJ|i?5d(ZH`&$lRtIuPl&Mri}Du$O~N4Yz0H33wkESRvm?anp}7@ytDWKNL(1 zj+a(5SN-2$R>ZwF8C2!!THevG&#-+pX%w@Ak+<+Nt*>(a3^uCB{$89-Y5Kb49 zk8JQc1?dX!*q;3FxG)MjK7=~*u^JYo_ix{GJz<52qP%`r`0k0UHOqoH<>Xy|W2wbC z%iqI_?+jy2kMLE|KiDHI($>eNm5dO$h=2c|?eD45x%7)Dl9X<6e-g1j*F?ZkmJs%W zkf#D;X4;V`&iYz+R|k>8q%M|=g9uS7wY>%grSwa+Bb^=8hkv5KJjL(9EMvY^YmPcx z1QcY^QwSAf62yFmrbPAE#iC~jrI$b(w0b#+nchujC#XJ=?=Q=7aqvSvcbbJh%Xw-x z^gtVkQYYdoEr=B6*8X-VQ+>iwt1A<5#M~6NQJA@stbC6^48?=QhR=ADl$P7k&B)D? zl)vv#5z#otTa3|}K52FW3{ga?dq&yGjOVFT{(1N|=viu3v*nD*lEvIwz)yOvE;U6s zLm~Ay(ZYx5v;2km1z{{&a@o##TtZXko+pOvui=}p8i~4V{#Hd<%dHM}iljkiYqA&$ zkfGwF+lrlv=Sq4&UuZaPL5uv{v0 zya+*E1V&K8M+z*pbgZdTBK*hQ?fEvC%qY&dtW?5+u^tzKH0!dn>;5MJ z%)shxkH?7Cyj7zSiw8gQekn3MuR5=&NIeEk$SYBNg#f7r=EZfD8oLE*LC{)Axtt`* zv({vc;NAwD`^;ibiu$8Rv~?u^EB=ySToFFjFskt`QUUUsbVLfxL$LVu4>%T+XcXje zty%QHA~G zFh99@WyDx6=#y91=QAc@+z+~GM2}^t5+Z)-=2_crw%0SbyzvsD6n4`3w%cmBif5y; z&?4cELf##knbZ3l-J(SaJ$GGpvCn-RcSa?5n8&^sJ!?)bxWp@*)H>fF&#;S<%!zBoR+QLON- z;~R!B28T@jPi3@tj-_(vnGzEUS#y?2Nqn zPSTW_Ou#iY!VdppUB%xiLR9_Zv_+E_$hnIx85my|Lu+wPl82pWXYdR!5|2` zyG-58bp5<})LIqvmrZbBrU0mV;n^;R?#_={%d(Y%3s8bWIdmsaj(!`2kG8 z&HrwEiy|lfIpUNyZaf&m6JGCPKIWX6#n#KPH2c_?wS9v#{N1NptXjZKDA2mUy&B8z;KUT+_s zzp>it!V|XRjvpvKJ`x`PM(fzcA09onMDH5X+59u~IT_B-Q?@H|QxMYHIxB^+!WYOwYwIhKA2gnP05Y>BKeC!uCh#oLy9j{s9Ez%jYmZr<_nHIyXsm)( z=W6%-k>N9!OpkNKbcK?0VfF1>rgN4hBZj85TUpl8CZG$L?)Z9}Q^$Ro2FT?g&nA?M2Y>t{=!MBFqXEq^4BVss8I>7rAI#8za%|Ymny<4$`_ZYV4 z&D`OGK~ouIVfXYau^v3%UoT3*v}TXv8}Ep!AD0|mP~Y(i?-Ee7Bh zf6V&?&57A0to^S+m?4ysBPauM)3+{&BK|%>*$(o+2y4^6b|s=frg-C@(YP$Qt;&O$1N4 zx7=RkI4?FhWfCy!7HomCe_w=eQ53&M+>vuZ<-M0X8E%cW9YXBWt_~1sgZ4~NC#jY) zKhxNksT7U4%JB1t|5`DB(fV^-_`&%Rc|T3mT2W1#A5AWyMAVq!`|tu1zQ@HzBTA}s zxgqg9ApKN3z|()a)=d5bxce=St^cp&C-_rO9zAU}zm+AMH03(jMHViOuohuI;2C4C zf);D>PR94_a1V_%wTWoq@}%zQ$OM>X);yA6L&W9?0+gsl!j?f1%QoXPD7ER*k%BpM znOR18`u8MOwJQ5#Z&aeH5fibgi9wd#wP}~S(qfh&7^G~Z3HMF7EVU|Y@`(hnVLFYzVSM1y6L!D*J#fA~s4(XcS|xm-{HQj~(%kh=C-y$eu{hJT1*Y*%0~C8wKHq^O~rm3&nz zOB{aZ=xR`^B~xcUx&;!4IHtsSF@jwMOkhg+?I8RS0(lQ)iyCi^-y|P{vl*&k3Rz5! z#|?q_kU%e*>d}{uy>h99=^T<^tR?#A3B1r~;ProlVbd>){Y8wH6oz@LiR{qUVW; z%JU{!>vTuvz+^OG5=~v6d*amI)&hAZa1ls>OJ;uxx%5g5l!;DS43?{t?`?|Set9ca z@HS-SWzY$-$88+)2ALNuGw;ct{!`IPa}sGVD8}j}9{9m3<`8$vNpC^GgX3n>?T(Nb z-%<m&%yWb694Wz4(zq9Tgbd)~$NYEIf-|<4`)1)x^F6{YEP~qtZ+; z<#)Q@dBA3>`8B_(+-jG1dH z>!mJmoG1w3JFo}Kxz7sbvXU37B6t5NV{6I#cqu=`h?y|4Gm}FkV*j$y}S=cnSaIt zy8T8Afp-bG9+Lasu92lcUS#;(E0IjJ?$tKE9kyFxu9|^O}jN|cmI-4-l9HM|R8ko zo>SJjyMd3seO|v@HPZOXnIRm|_kJi<7#>Sd?lDhHd}BZs(f858bB4Z)&o#ySOIG2PGMdGs)8cjLETLlHX9ji0)7RrV-)|5 zd1-Bv(%O~rWd1BK5uerZE>SO{9Nh%8+(rG;4rQaB^dR>TW==gE8lA?m#L#i{H+3e^ z=$IvIi2BU^JmGJS$HgloNkkEYle}>6+kRJNK_eK4>?a`vp08+5)_Y$Nh>m)rJe25R zcp^ocg!_xB52=aRecEW!i6j2XNlY+E;EPKQIdx-e>~+h_OAnMGDxM{h++MR?V``x(LI&WKlPD{ zPE}Kgj|^3Gp58i%9UzM}=EypS_U8iJbY=QE?Jh6bz;9Y!$v&(^;2Z4mJ*o1`C9WSU z8&fV4UZQ)$0g1}QPO?j#9r}5Uk$+n-2XuLi2a1jIkiH9fK}L%BKXY+aHsM5aZccdpj)_Aoy(#dx*~x8rK-xn92YZn@SjY6+Vo)-qW=3T-9P_t>^Py~anz)9x z6{C3OBobF%OMJ@X-}-g~2AGdvxq9YBZ?T)ITS+<23?%Qdlf;`EEa0q&6Z`Raels#R z`&r}ca>g&NA6eS=a2=kgS1{soirUG&DoTA#r~Yb+CLc%;Jk0-F(T*lz+i1RyYg~Qj zW3MUrEu4*e_1J-u9SNJ`Ws6*R^X}^;-U(gKRB$RR)I))2i~jywcM_Sa+?*c(D_0i# zhh=oK$oqW6jh>ar<$}u;$U)c_KTx6-d&=|8HC#us0l)gcizr3f>n@b1EcnMne(D28 zAd`Q9jM!swsmp}tYK7v7+BTOXTn!r#DJ%UGH0y*lpd^Og{}Iti&e|8x>L~mWyZQtb zV7R&TW|&hET{@zBPSw0uVT1e|n`yAa0Mq$&;8$QQQBQ%+1oOsh7vtJ;KyPJ`Ig4)Q z3`%cU%?qp0Ok_mx%<&a3y4ht4Q@Kn9l-*~+7TURz3w&qk9;yKQu^(Rz;CW%hG6+i) zhW9N0!C37L9z(^DU#lu_bMM;Ne>Fcx1}?WP-WIW-P;SnwGcsDll)N_{yp30YCAh&C z7GITdQ;8tuk?+}zA4>nGfKmQ=y#=si&jcn7F6EF#PR4j=?^QcKlL`ql>|20*nyyY% zdbiwBXm=D{SsHG;eu^?uY^h&}{NQsxc?s2 z!6Zpq!Qs;#ca&H3n|xSHL^4~b=6oFh6y_u?91}3i9NzQ)20Clf6~eN{F$DUa_XSeT zK>2(dd15=QjUQ1ac5obVA?CTI$h>Kp5Xt^L`dsw3u{)D=dKXNaEU`8~ZITC;+8y~T zF~Wgy`4IIYb)A8w7YNhX*9A`#jEhxm8|DcSyd#ej>BNvX)dXc-VhY2*YA_F1hjcfF zXdG5|pihm}J?Z=8|2Kp~ts0O8l(5=`dM~SN&L*3aV<5n_)h_|y@D8KW;73i<=V-SR zWFF7h<@6UiM8}$H&=$GSf!D?(nCs{?4g-e%6?$cS^l^yGEGF57%KFVHg+b%zqGJ8^ zs)5u)|F`GvpnyrIV31^Oo7KxMb1828xswxVh4*~4!EVRbEZi;>x4P@K>;1>Nd(>E@ z#bfj-u~|yzdM`e&CPjr>^iTnRrbY0>_uuaWPu_r;EW$vwR~XH#4!`WUDIKr!j^Gj*}It;Y(v@5W?2ct$qL>nML`+p z$Z|N(wpyh5^cE~1=mQc2l|o0z7o~I7)AxmFg^&^pT0=~aA#S!_?W4aeRri5_%ao^7 z^_~z9gJ<+eJD!SA$NyOduvL40sbJWyCO9LM?W9VR7ilPG`P5P*yX-@dJA!zR!0>&> zc5!RaVNBvg*|>!;k{rMJ?kpZ%B`w95mj58}>Hpqu$zB~_NQNFX^HA@eKYFh<&v zARJ_TW3CVs&Y^g_T_p28VKu{64`J(Q9DbsdAy;K%$jd_eY9NPz&^adMFX$`^TGYsI zGkJT?0#~%O39v~EjkmQjf*qPU`A{nz`j-_+xj3^By;dFD7c`PAt*4}$zNy-0!d3Ti z>TGn_Y8lS(3JiP$AueBT*P4zo;|qAJ@G*a(+eq@I?(*A0)~5~Lu17k!D-8Qqryf4F zdUiVpY^d8O-YCnuW0cs-;@lesXI=&^0n!3YtY(j9k#GVp#yREkAElYld-Nwi>WCsm z@UZV*@*tv%5^t*Z=p}=9}~XKH~rvHCnLB^Kh<4v zUHn!%MW@z?H{${`1WCGZlB|XUog*dH_e3PAjp@py8A`nh&R7L5VD(AaKqj9~DHz&z z$rq5`QA`v3BI&W}R%8WLfL3HOP#TA|8?$gQ!Fpu#@8eG_a9jQF#B%tGDSWjpj9O{e~z*LL^h&W*&_3HPj13olgzEz z@pJKPWRH#P>O8YZ{Q=*4yrNnj)=YT?Sg&BE<}8NXc8@m|8{?xI!>5>alc;sHsC>WJ zcSRMXjYEoeO1{yEAO*56cp~N3*{$Z-`iGxPKL$FEK?=%|MC2eJYH2AD9}*dF2@C4` z?O5FZUQqP{G*oT?;!w7Kl*sd+2rWqUe-5tXzmr_1AtZ zk0@`Kr)bKer6rcYtp3V*3%{vafr5K0>M7A$q-s*ucUnGs^$RqFw_siDg{*!T)Ga)b z-Q*3Y(E9|n`mJoUq6v zwZ{6V6<;lZESkI;bK$Wudste3Dck~eyfn+5wx~bbG7Kev+_c@iJf@S#O>B#rjcnLt z9p&2VZpu&OS$!I3#$y*1nL?)HweRx(yZ}m%$5~M%DiOc!K3G2N2S!5SxFCc)Mf|Ac z|BsjU?{7vAwFZ)+82lUvEq$D$wF7`!H7G5s{E_ACC8QDXIp>BT;r zW@1HyFS%?uUaDQZc0@hv`<;+euhH8Z^YeXM=ZaO?!-lg~E&?%-5qXWFg{IgtvY>>k z4gwe!$s2VlhG|JQprnuRS76$xQ79D=BjG;8mZcS|n?P^6d*RhR{DKD35~qy#Y{5ah zrZEwou+9HJO#*#6Kky4p{&nsok$MF~R3_GJz4c>oi-k*{nPZoM{43}NyOI_kyY8yT@~U_l{rcs?|p0r_862T34VbhU@we6?{`51VQA>>{OS>GNBR zrk0va#5H`t2P?c(>#5AuQPt$BR)){&D!(_)@9e2pODK3x(1c#D+ac;N6#BPiL32N9 zkO8mDEYa_oqGU5#x+LYPn9x1(z@^+NU!$DibG5w9*B-+|UNc2Yf4c_bS(D+G zE18nc8St4V9)*#X93qc=Z-P(czU?VB>&S)L?|W{pgUOZJZx z1oFYzas)|Xp<8-kIvpIuBR-$Xn_i%~Zjpv`AeCbiLga-olTGGv45Pl&N%B7V zj1Cj$D42-KL8$YR_#D>N35k;QagTT;@hrwroqJ9qlCPSg!c6`mC;J(cFrR$Hu20zU z!MCjDi`jXJ>qw8LPOeY8^G=IBoN|$0xXw6yAHxrsH(tp$fHM2Wzq>kjKUPt&5ZA9y z*}d>cH!f0=srDUD5JA?Ua}tdw;qgluB75-VfoG)6W4miPEo3D{qCS!KTH&Rj&x`hky$*@XH zWJR~yonuF=usQ@HFUser40cj>;L|KH5g@+4)?HVbqUnJxcXWvJ^1CjlmFRhmsB&?uU)t4AvNygRW3yi01*`~M$5Mu}m+m-mMJ*n4Ugaf4Tk zgmtA86VE<8yWriXqMsvC>zzIba!XuVudCwM36iv!m%wdSoP@CZQKs8XWb%3 zFhp=Xdq&icK4($14;90^u|#@O`DKb0zu1J~*|_`Y!gEKnv3-q*3&_y!?F1)#cZu9T z_F}n@+~)O|?96gh(rCZkI=7j9|{#AVICO6CgCOi7DeWJflyop0cy zy#LoBG|XB;43AXi56F1+inMrQhbJpYK->UDm4=A#Ag3sLizTA*2)*O`I(p1CdJD7Z z$!I1HvZkH*nBB{-`lDbe2Oxq%Z*;7047-Ci&|^CdxirT|jRGU(AmigvHLmX{<6lkIJf-SfH##|tH! z@|Sc)GSr)7c%!b_9xf4%bU~)yM5uPhBT&xn2zmkFe4oPU;$+dd3MvUAznsIGFcwWi z7I#MZaI@Bo%P5axf>!Zj7g~Pwt*=d>BLLY0d)dFFT+a7}t(<944?GSC>YaA05O@Gt6RY6b{`xXaUdL0wMeaL9-})1_`Za8xDTHBvFOC>jYGb~ zjW##-scaDPwhj=dbij^<+WU5bt?bD)2*a`NO^VCTmVenY-G`7~TbH1yK3qm>5k_`Lp;_HWu zWQ4G2I^ybwXM-T)i2Z@E7ewmf7|<0p`UNLhXa%Z1)9W4|t1!%Z2&K}HU0*%3QWA@@ zzI&sCnUN9Oha~Hpdu&{Q=k;pQ-dr>8PS-T#+l21!;^@2BDZ=M?)^WpgZx^*QF{dK9 zb6G$;+Qzp++nh;{bnY8gZ}!nyTZsMrB-uS#a~<6w>-9x9t3O-sj(mSB&+Svl_J7CC zMX+yX!a@)5SxPi^la}la=N;vs|LI`PLj^WJ?b6*Hi@5mp!*kQ;Vp?y7wT*Gsv(w(G z*S_OdT|~-@s~zyTv9_oP+8bjwt;jSS7wO7;syivR2Bq3{A3R?>g~cH|@zwA2AGBvd z+rr2$jhk$%-Wk(%rv0GbWc0No5}UXA!{~074(scDUBx}iM)06O+ofXFJdvP)}Onzs+4T7w-?qv_!b3;yU39{ zg#xY5X=;?B<%>3LPIybyJOtqRx{(^Ya)cfv20e*Y?Fr?OvQwBzuVCf$&lOd^H|S%4D_bjz>fH)U1Xui=>*ED*NEH;TZ8#K%piG z8-wB_ZN10F6jxIp;1K@j@|+}}j+(L1E8Tl#a~r)F({XqJ{t~H0;ZOStE-#-yF^D04 zK*MTDquJ?sH`g`anr@b7yqhWL>q&?%m+iGF3s5)6E5ni`wTc2OW-(+>QU?_q&xPp* zi3xN4%NJEP#AUs5Rvu^eJ-n%3Hotb|7auf(gn95u+37y5lu^EdDG>;GXCiV^7ZrI{ zlm8pKDCllrN;#r?Z+Q}dEsyIEF^mKq%LNYAZjO&u109^YI9eoBRCjU1*I+TYfzFlc zl+~}L+6&BL)_q$`gO6YoLC*OJwuL+_)c-uG@MmxShI^Z#pD;>ta{KQ@X2MWi)uv?I zWTT*s475myFTXlpx2`sV{!Hj?&q-YzKkE4Kx2agZip?W&h=S+!-Sd;tZmEbbl)>vW zQ=-Vg_v-fBhNu5U?InBN%cJBrVvP?-zxa@1lD41GRQz`mW7S;E=|?e|5J;9PUJTbd z3V?XG>^!?-FS|KP7?rW|rsk6_&Kb4v%NMvSUrs!?V-C1H8QIT=XdDIh(ruFG1#ZX{ zkDS6I2uJEiTJ6rANLV};(9m6=fkdBTM!u$Edt#=&u+jGS+(K6gFb(cU(^freJ6Psa z2EbD19d8K!G~g`SiwSVA5OdeR&a66%;4{ULH;x1Xx8LEDMiNJ^rCOXz=Z~cwGdQ=O0F+ z@xiT{C#%f!d`+1M@Uc2nr@gEMS{k1@6X_1gq=;=?|V<*#@m^6r8ml*B;V*v`Mh{b zHTDu@LH$Doa$xKqB4!X#ARohvfcM-{(6&IR6s@^W&0zg;*I2h?0U8{stqFT~Kepgx zxQ$_a(*AR*P!}|jrkS9)YHeA)ja9ev=morNhT+;WDqGH=N{-CizL4Hdtrlh3a9N zzj~;yo0?yH9t=#o^DOX$ojN>nr%$CP!mDoRDxg~yc_EU2G3ijhBy?kL_3f4c?UhLf zpf5+X7rGvi>*AhZ*lHGF!K6(`Rka9yK>`Wl>IV^1Av(2&%>F+-e06c$NatghHEF*h z7>9lj{H=^NNbdmhF+aHY-iq3t>h19~xm#$Otm%jDh+-m07sF?_e~nbK+!j?pEzDX4 zTjcl+Fl`w69-6Z~=b$JLOze|rOj6x8V+tQqXW>T%P||KVffiq?=krx#y%>Z9G1U#P zOan6yWD$CrVAO56cRd!Ma5E?<6+AhFZk8LxMr^d6-o-xWPt!cl zCtZ<2#Z0f%wO{jtPfxS;v7J$(Se0;UFUOc)tvyzMVHQvuU!HsNIZf#&W*(>aEp+}e zJdVRV3!Ig_6RR@2dK7?l+uHZyu+NEeL(VS~J1 zW>Pp$Hiv?&NT;e6zT_V3RAf-r*9^vY_rjSO7X6dl;XOXob%uqETO^ZzfU*V)51>G+ z%*#%dd{utPBbB6ShPJfe{qsyd`b;iE%pI}@Bh-VNhzC^X?wsP-GLtW|=>FBw{50G) z_sVm+77U%Nso+w+(f8$D1fXtK&v-^W}Lp`T~^;*BCv68#VqKyZt*F zZ21Pv;XmrnlcGPOquala-jDT5q0fUx+zh6pF6}B0;dzZewKBq?A@UZwgMVP) z-fXD-=M9*EN@`pY=iRzUC~=Goc8uEjfHYNy9jN%+uPM&q*;K?-XCE%@WW2QZ?WFa~ zpKLk=sK^o>5GX-Ymd?2jN}-(Y2qG-)n5C>3?VN$eBKll)@&j$eoq~HVLXPi&mh=c> z{ur!fE>Mqp)sx2~P;Kl?dSj~L^%@+%SCR@FkLleYnBg~fKH?kaM4^nq|9F1Sh0lYU zi|FxIccJU=fS8_Rn4a7AO6{h|^L9y&zyIm#B*|dG`fz3ZN%q#@?}{$Y7E`5z4d_x2Z$Ydu1y`NE~3rQTw9cv_G-@}DAwK< zrq=o0jk3LsLl)o=T2hMb4r6ANtLp7Oau=D`*6JgA4iK15#!hN(~H0sub$`LRNklPPHY$MatSiP>O^ZVB$==^8=CCAl%O(;GVqoJKqXPnkrDGSBU zXF%}~-Ql7{F%otgFeDq#@pqj zy5Lb|^VUCMVrRN(RQjx}$54J?YC*nF%PFKXU-bTa;L+EnpJ*XFoP^jygEh0ZU2m~i zZ27Hw{JW^Y88y>kMrdKy3M;B}M3pgtrNNsv}r@ai}W+76%D;f@Xklp}QuJ9H^bMCuPate&gT@+=eF9+$*RJ1uB0fQ;h< zg14#!m{LnUp!cY1Vr79&LMgp zRhyF8eqB_p*21*6on*BUWh#0mjQ4~tlZ;RZ9iDzC3j*zR}=0oZtT+Tl>#+t_(AKkSIIi*AHqAbsV zg#XDzs{97tBs2cph`jFd-|h21H3Y<7|9zC+5ynAH_4n2_YHpJ=22C8100-8>%`4+l(laXbM~TmOEN;%ml2VI;u zYKABC6XU=uBnu9y8x4CkFBgl*df)E*cbr%BE#}LtkrY-Ezo3Sky=Jimd>^alCLb6? zqg-!(=WcorIms4>;Q0;(cb0O{vJHK_EtC5M2A64uj)W=fkL8T@_5C88kK@b0M{A&< zRBz02*x3xuKn;pN_QVCu20tjA!k^{kPCu^Ad#8)wSp`6ECe-G`c$LJ~VmAJ3Px5$q zmJ;s<15p=UnsRam*2aWEHOKw{hYe`SV@3uXjDs_&@>=!lMUw91skj^?87|=OPj?pD zqd(Z2<#@z%Wr%bTK>E)jHUcm#a#*tGSOa%#8e4E3YM0~cM4kk4hI)WSW?2Y2barIK z8lR{qBgmWH^oVW_cQtkNkA6Lh#ClHEJ`%+b-2$zl^;TDm|7>#-^J@q!x(L}8(j zl~roER{q7wWKC^8Y19i&-@2@gqd@9mx=mA-2f95Py4hJ;7ZyS;sM*;)W4fGNpe5tM zhqUE~)Bf*!Qchm5{K+3qHl>Z;7mkxyb#bmJ0J_U0&+J@SxIrn2y*dK|OzYRfQF${# z#mSnIXc`XrKGFGfb~(YkFX9$o(72wdx2N-766XS5Go0lA@)}xk**SJY?*MzY4IYkr z55Erj#fPnI>4HMKC4*Il!K((`he1G~oE zy)N|q^3AJ6YFX+>pU|if@BqvOIma>)#3x*x(gbb|Lvk749`y90Z1VM>BP{;P0JWaDidjS?5+liG$%ao5F z!h8u&!qYzFJlW1XtsNhIE*pGr(K8GX^2=ZK3^TDKorkdR$*&FdTqRWvz~9--}($xttXx1kN#ti2vS~rCub2*%mnq&zYmS<_<(37h8Q`lO|z<9Oh4$Kir_? zu`5W@tRl_Mh7#&kz=kiVGG>V?sbe~MI>yLQoDTA)6DloG9#Hjc0H1Ss=Ocfoe_%NH zoXESz&cP4RWp;2p)pV*_k-cRTM+Y&#;K%oMCgdlb>?_~L7~PwHDrVXHN7*(^UZN%z zQitMrEv{jscXt}S2+bE`)q8nB`dr;bgO;Bm((+&Y_mqPy$(MLyY4-DRV$4axWNb!2 zMr+TVM4J^(uaejM(6@7diElfg1aheO~XZX$fCwbenVi4^OnW&6o}dhP!}XloggirR6T`>*|=@_D&>~ASf_JuwBDSgGIe2>=5bM*P>|>Ia z8YSEIFLhEdv}GqrQ54U569#`dmdI?{z!3SRTK2ps-Sk$vI}> zyA_uO_bc-^TlSOlpyevy3hd*1nkMZ40`$n8t-349BTL=KEC-dNS4No~xm= z!FjVzU1z@SHU#Xt-bg*PUVmR;hE98X89npA($N`fQ!0puHbh(l>KnBKK!XRrUi{Ll zN?`kZvXSwNAF!KtISN1QXD!9Go?H0)oWePsnd&RY^k)p0>xovc4}aHoPS96mf@?^Z zUvabg>+VUYpM*>b6>|km?o9rsQ(1HVyRfy%#f98wi&j?$Cw|+O>sRCDaww#Govkm% zESxA*RH|lQLDeUTtyi5MT^KFl7CP0byvwnTvg=Ol=U%^o)-N)fy4zXwHM~2;YPxrQ zIyYnse$OA~6Jn#{Y>&Zo#R$+W(W0yLRFuPy@;OhAeAYzNk-Pjj%MtettcL0mn-kn6 zSGoj7SryU0T)DiN^XZ0Es;)W*^-_MhG?FoAmZwskpw;&+u3zb4RK*3eODnU@jc@FD zN-Dvh!j4R)!Yx44Capfb@QYtUj9TpN&7%Y8Io9qh58KRZx(3uRHQ#o*+toPwj&T{g zU039li4^`>@bGd(_ zrX4;$bH6 z@%UR%&8GCs)u+w%1Xs+Jrj~#jy3u>Bdvo?s=PCGM8E8qa)?d1 z7*l^T4yyz(ulvYoNk%=AxO5tY(>is{Dt*mza?gPLhno*x%NebuNY_GN(ynn#{U9-2 z9Q|`seIa`6#BkHMy=DyRoQp4`w(KZxJ_8p?XbbKd_GB7=Xk571oQI6y6JOQq(lSaV zbulLYX)ykYwb?enaK%;??Z~W98_Q?(ua+^!==m5fI3QkR1lqiwA@bfbK8AtC5ZL3R z{IX{b9B9{2Wa12QmFMzP*hENelRT3*7MVJWV_-VmIH1 z0}-A6T8Nm9F&M|Kdzbb5A5g(D$)2AU_ouPd3PESi570e7Km7A#`_72Qg9t`O%c+*? zzFYI6;&YDm%jE4#e`4zC|2h%`w(JRfKTKK|8xJ2w`Fr$zvYRlY{O4t%Lc){7@CcJ4 zup81GR{cGs>OIh|RB_5F{JTH_OnKFCW<+AaQ13uOGzuV&fTm9t8i$jC)QH4hLQ zR%{mFJX1N=3wEft8X^`O$Fy^-D7NfvJ98~Ow(KvdiNjq_j#L%XC8QqSW_|O2zg^4m z<(`=zV5u1!GKJ5r*@W*MvK&ZAtz;frf8Vt4Uy0XVaO;a(AdD8?3W!w+UbXa&+68ph z{)fI#2yghx{oQDIt}KBB(-Rdxq#J-iYYn)K%k0R93#Z$vFRf44KO=b}5kmRKc63H5 zwKy|Kn24R&CC$-Ou@oAcui25NXnI4nPHnar7n_G&AGqmk7`0C@P7pB^I@c3jf$}6F z`nhR7h@N^)Xx#04O)t-j?Ihk^8yymApDH9CdA|tuQ-2QLpj$p=vx_*<2%ap${x(>ay27XFf^`4yY4 z)H23&XI5~3*R`f7C)(0}{>0cvB`|z|$yO!)14xA`3%^6(NHy0<>J7S2G}+OM#yg;m zNr&rcXzZc2=(WHIeS+BO8MBl_>%g@>e}P`XUCM(>8~NI1!S-Bk$6qf=H#Ma!dGcRB zB$3Z=6%HzAuAXYoBXKYqClIV!Qo2q@^?EKQ?a>+P4aPcf6dTQ8;7d>UGucdj)X*J%9j1++^S|d|iGr zIxRt$AjY2`IjBo`Mq{^S-8K+#$?^! zswer-nq6?QQS^Pnd?-Aev-$9pe9o)!wbyfSZ`nVq+1wHxIJB1K!kCI=mQE;8aPGO7 ztnQ9du~{+-RZFs5vN%6`Hy=3HQ&f9kKkZ9F$K|OG)Af!XuAYSZXatcA^%ax(EcpIQ zkP)TYnUz$zSKgwSvq)n{^4C+G_Tuw99p9y7+C43VUfGPLMT*u_H2h#-vLr369H(s* zft44jQf;2&Ce04d#vgp3lK#Nax|k!|Qt}(F-j-F6`*A`d8rQntG?s}tR$H19O7#l!o%*CR_JU`}dP-uS!n`%3~r|^cT2$tQ*b74eg z&l_rdT!aeZQ5nO(z|48k*S8OEhsx`d!e{7sPLtr4eePAhb}RPmeHGF_>wd4x_fM$Z zDnao;Vxv5R6h=47MXs8de{E#A*_GcB*8!Uz8g7^MpJCaKvN_#lM^%`wC7==CF?vTC zd_>mRI2C<7D+yQL6%Zb?O4zxbJM;ZiDra2h*Pe8_`03Ge^Ua5W(d-r&#O>UbLqd)l(-Ajdu`?3m%S?u zJN-XwMXUcjUJ{IA{+!#b2UpCyQKYe%l1Mw0GW*nh5zjV-#_!2;Siszd7sgs+ay+6%xF zvS%g2T}t>1xP5DI7^kJvAqM#mu4TE4T1uY7f84I_lB=RY-bXE$LjPuG%OXrvFL$Us z$-}(Oio*IBD4;d##f?)QoMN7@_0?98dj(zT(LG@b-#}|I<(6$y5icIM%W+b1=I(Gc z$p%u!CRTyv*}JBh363+b9}L|-f!tS|2^^I0AeF*~lGXVi_M4Q)nKJOCKDOSk%D%Dc zXznh6FnYiJ;!b>?3BP~UdhHJRqZ7g$cY`Nvm*Ir=Ro2QsieVw0A~`K~b%<9!M&j5F6qsFk)Kc&`Gl zs;=r4_8I%HSgmaML*%JmWuSzeNOgMixrc-1r`zK$KEw!x8ImZ?<`*diRER-M)~b^S z^{B!5Ñy@4Cvi{z0N)T+HyX|*Hn0*!|QxsKw_j4#*Hi6xuMt;fw4vU0o*_gFH zVKBnj)^(Ru4)Vy|Hb_H2Vr|;2YJ?BASVqp5ryp923prxOksV|w+0b!^hDoQXw7Nk^Qu_=TH5N555w&>{RO<$;fgUlmhhUytL711j=He49 zu05B7SAB;9KA2muF|3E6^Jt20&!*o$K7#OI4Dz3O!4uCtIdKTUOpH`S&=eYQQp?-k zkKDKcbQEh%nb7r_$Y)#ihfwePXv8w;U*|(X$7=Jz#UipRjwU2IfEWm_$!!^3Xjb0bed97x>K;=|zDtHT8+J&lv zsT1*NHL!B5|92{5SKVnJ*-{m6Hq=5fUCB-vChQ#Vmw-W8dIo8xmj8?2L{M!Sn_L znM(l?5N90pa}D*JTBKnglVqqI@w8xYVUYCy`vib}Pdd-03xN=zCP-Tl1j3RoCf$*Iu$; z>9!&OHvm-A6g2`zhTv=)8wR@=w6B;mX)Kpq)OyH!fF{=PQSQBgC(Y|{)MM&Jc1)@# zjln0t80r$LQM*;a5}C#i*7mkq&X!-_Jm%rM_gDw|ECxMf4V(e9v@f|P#dVzfC7Jt268oelrN-*k@XqfC;gM_W@ zU(DhW*F8G`^HIOb0zkmNbWl*SE@1TeS+-#jDVvSPWPlRpI?&imK!WfVz$IVU=*Ht& zMk*>`v7T4VRvl(#fqzEn(&^V0=u6}gpKDLE3d74S^<%088v1Lz#njPQkrU*Cn_6au zLjrzLd~qG{K(vhEiwyl*w--FM%AS}DFG-K_^o)&Y1nI=yw0&|L&h=9{-npyU^Hdu% zx{odc?jdXOZsRiFL8Y5#UFbaf#4lutL>U5xi~)%p3lj*b>%5cBO&GJ!ccBzNOIo;L zE`WV^osD?SjG3Rr5yJ*Axqf`6d8Si8NZUDP{{Ie)v8{$D_^;?7OKRN`l@F12CJc-14Lt)h1wf81VkYbqX zTarW@1U_v*-+CUlu5pJ!X48|XH)Khyrn&faV*v1UL8##v(nIa4{hp9llk_x!Bz^y* zJV(3gkSVi$tS8hvEzoN}u+Firt_8hjOh`D0!(xKHzzA;)8j+ZWw>>Rdnyu6}-3-BU zQfIk?b1LlW2MSZ<%Jm#HsXbR z?}~Gz!mwiVf~$MDg^0I^C~91w4!{n%kn}_=_xlV?sU?{ERV9@Qm+w#0KDe93e|EsP zah65*J^7WO)}4tLF$>_LZ3)I#=Q9;yX{+r^s0k{NWa3i=h9}5`IXnIwf>=8f@^<2- z(Zxn>8QR=4T7+i|r&_Sm@Bk0*As`L88k4g0Lo@z^O5vgNN`D|1bN%jctk91>obVDq zd@RzBu#R&iwsj6JPQ7 z)w?6l2u~WxI6oc0q<`<}K-^AwD8q}L3Hw6Wr>pomXil7f0t*zE35QUJhmw7A92NIu zi2eTV^5jG}d^x_r8^0j> zx18D!_}YK;2Effx{w{Fm?xA(^xyZgm6T)CSB|K4yhQ-FM*YEuaeH(vd{^0f{sEBf< z%_L?o%FUFX+yOHXx5`w;+hPUBuRW^V<2$}p}8 zW7wIIXNpbFf7yZ4?YZUzZ8V#uC|D(?z1<-Eb!dN$s+(xPapT$ME+`$EK9OrcUR7GO zRe%#J)|CG;n)DD)`oKf{Vn`4RRXtixJgk~eF9Mu7=9?w_rC~hXJ@RBIKW>Z)!6jEM zHU{?o-%FnDi(P1(cx9aZL-YPH3A$!^2BNr>wE8cwKtl+&S!aTUr74N(iYX_x-&!-T zTX&~n6SQkj8a9|*eDzq`ckZHW3!p7qfPfl8^jd_ZdJ8yoU3LxD&mCu}dH`0iDo_op zEgy7^&}g8w@izgRAD$bBR}Y|O(Q^pDt#J&M=n}jmwR^BVMoi40<0V*aB6|XXzz@T% zy?!vB({pezf{tx)@XHsdO>V*3>khja_}%UMf zd*J)*`6CiagUSVMhw%XtG3jse8E~9;ePaGlKfa-5{~OTIa@t_yqk4&>P;jqkbuDUk zUjo&mdhVY3EETeFSI zT1^KP)YIq&>17kesW&|_A+D6160Zm@!$9X(umyK2XOmqqq9pOZkzzIIX4(d!hr4w* z5q=q+@t<`L|A(vhfT#L@-^Y*R$gvMuQ8p=i6J=$q$jm6S6xl0;BYP(+QfA4>D4QHa zR!EV`-XW_pvVQlg&*%I5{(t}X<2>G#a=gy#^?csX`@Zh$zOLizv}-_lgix)GCD8n{=rwd7s%Tx_3J?cQyddxtxV^5y4 zwfY6&Kxow^%)V@>iFj~Bflc*0aTM)MnQa!X&Zh4ow(}PP=&2V9$Ggq z!$>{slXM3nS$Nx4k}IKJzIsE=2&aGj50qS#HVbDY96fEvkeQciYCGSJcBVLT|GQ)L z*49XRh2TGiC|g5jd&Ba(@$##(pJ19OFT~R09j!+4mlz7K`*#;qNt~+>!HVVMVn_M4 z3Y)MGFIpMR)9#q0L)Y@_%9~G@tA!Z$zcCkPxA^uN*4kWF*tmFO`p44qhPyXN<{3Js zh7|AIzP)T>U29^*SE3s1cGy?Lg zH!m$4s`ZIVyF&Z!^11~jpAj$ewMll9^jaPk3i=CD$6vQL#w}o!L|rKN1N6$BCmtfz z$-Kc-nLY_U>-HPa>QxUj>1fhmj&Tkh2{6^sPq;&q`XvYotndDJ3LMsWSv~F%(TEI) z7=05f!cJS478kcZv2Hw@Y zuRL)^=R9f)rD}=#6NPsfj;HSrn};L7RY`*K`XK^<2LM#a)tvHFn{ENl5#KBLePbIx zH-Np}f@1c}JM#ZtS{zT=k=U-8rrlT#T18+U_B+#u#x^IYw`(?Yie~K(Mkcg}%{Wx7 zwuxr|&$%>QrZ>(m?_Hu_{N9~2Zz8K#9Xkqx{T`q%v@^_GB`8reW&b3YmNLey;6&9` z;pP{qZ=Qp2h&FcPg|l{_EuEz?uhhJzdRsi_*Ji65lU{Np z1A3;y{JWvVYXzBc8Z4V->tB)rrYddknqJ;C3!6PKAmjI^=^yAf z&ZYPT;J4kOY+(ChW_;=`2{Y?3t>W?IFu?Eh$KSz$aOZQ6^6t@w@6F28Ic?R)0W+I9 z4+;lQ=m)gMqEJdGt&7Tre8gzYbmMXuSI7-9f4N%Cx;K zbe^)FC#C$h4HCF!j(>;ocB7jjm(|uP4K9_RZdk1s@Ei;8(N?sbeMuOqH}diz-Nr_K zz}9?NWwR(ON>oF{3u9D=5J)wO9lDJ=28khYP z+#T&#&V7b~=jG_{3~HH?R3G2mHt^}GzxTcgz{0xy>(7}sZ2R6=RPU}|ZrHFlm$nSl z$P65$`3sZ&6wVBZ(Pv9W)}qLN33!Sy_UpcNs~gmNK2b}arL%FxsrjK$k!Sc@CYvS| z`7S!-!Ozx&OdUY?0MrJ@PdBRt7IUfD9kTD1ikHy+JYyHlg%m;B&k~UE@vnJSMjz!- zNCaWP^C)Z2Jn9GcqlVFmrCZxtGyfiZs9$A|LFYkfm4Vl#DmJyTZ9ldfpJ}NW=lOYm zvu+3)6>EMb$zX5_D>z!`er2pf^7AqmC#j8bNbp14FS-hEyovOgYfXj3UaFl@%)*f6 zxD?!>RXu^$#LnS-j=P0D9F?mMt`DpdBcYYovAq1<^!?JftaQwaFPCMhmFw8a(3Dll zkKoaig`m0;s*{F(Ona#WPj^Jgeylii%JQGkA>ak?8ELCV=&J^bT}8H@*-6gG2<6fc zSjimiozOyDc;kmBsp?-h?M^&X`DEktz4m&!m2wz!-9~@Y4m&vusC(xy8S$2?3T522 zFsm9#2BLBouxUxU$%NaTdkdL}@K>YiA251JtyS82#us?-x6VFV56m|hCD2$^mDhxH zIem{l^6_*ONwjn-N8xIi>X4V$nX@d!pU8!6d_1%bz`RVJ_ zY`gaUdPG*=z!xyap?JHuQ-L!?;;sU++bY(w&28N+nJjll&W%+;d$or%=`TD6sS7Wk zh9Agl1}>Kweo9+Ev0EBip5LSeTleCs{OlJQ>OcN1dwD^l6GgHV-IFVCdn&f4%i1pAT6D?T zElpOjiZkE`S)rQ>@%N6_0^hxN?pv_DjXxXgEmfihJ#5QtMhVB~cfVJonL1e<#b2XI zI!S!`%HMsKO=U29u&#Jx4sH72wu1U{w^X$|HW+H(M^~{YGTDu<6!hd zNKZL=`Eb3Bre=sBC7I+Q=5nO9embz1tkTz=LNe4P(Sv(Yt3WKp>LiP;c}M&y$Dq&c z3Y)*_UVYEWckSei7;&K}>5{tadXII(cmDOO{Jb29%-26DkNluKP85h{{JwJ;lhWyu z!ugM{k?SM+%u7uFe#JIIxO%vsfplvJ&y(%>0D)u6pB!>H{rl4kvo&-x)7vufd*%#o z3x~#P3N}Z-G8sR@leD2N3%C?mEYA0MoU^(8KuANN!k>I}!o6_E=du8%BQ)Q`=<(g{ za(#;eZ3cwRSt-O4y${@rW-q1_(S7@0$V?+!ES*=vt4t2s_EpmrtITh#DSP|3DDE+ppN6|H z=2Fbusm~ORwrLjbQNUh!?~_A5nt~8p0Bao>5%%?_`bG#3+q<44^e(#tkIA8(zDBrv z2(x@P3&d*_Hm0hDm>%nQq-+Yl<@Y19E0(XGJ?C?8y5IEEx^SyngO0GV+#3VS);)aV ztBKkK@xD0O!w59lu+M2sxOowPDHLzj1}=y1`8#~!vZn0&cJ5>PT|Szl#?5X4W8y7P zg8N-Q+--#P6f41Du#qA1?3c#BeF$*-Dj#)$#JT;{0f&jg)AR(-30V{WYRTj-8Q~k} zBI&Y-G<@lTjkkd|yk%Ea#*b7^lJu_5^!_H$b$)S0D2Yf5~L{Y#?^Wwp&lvwpjbXHjqN4AL&XMoOw|<)mMXb~+luByZpT$ZPDcd&YKiGNxIrr0aQOKD*x5^KIYgv6p zk4!eHUzM5kz971A=kZzzht8+j{IWW*{363-BtK1 zk~Nay7n#oWUE{{qFAFS3NlT|o(0s&BiGv^OKG1KII&vj9suRy6VOG zL!$Y%She-Om5Gk;-s-#Y?LD*e#LWB&vdRT%Kb0fNJA@vG(9WNJNMK#&9cLJDciN2Z zvt8Epp_+QqkyQOP`yy(d<67&@T2`85!6)nIq=PEO-p_aaVUY~2+lM}!dns43d-?>x zef>_rj8?A-czrfg>*nVKOwM^%o#dpc$4-`AY2o6#9djzA`neMP4$8DDGb*fA$F4{U zk*)34oryXI>3rzZnHH>ZO@$Iw6Aj=oer#yssh^2k{ne2EYJAwlXkh3KJBqv zCf_X`VxIGZ=OsUz^wGw<%CIksZ0$y!VLK3D&Q3q<3fg_e`HHG}v32O#;>xW|ICVI$ zZ$KD{_I-B*%n!DQLq{G#0u_`Id*;QrdA81G9+Zx_SA01WM(B*B`T~lc?Wt2IFKY}H z`2C3SR0)oL>*x0`g*t)1pm=ZiP9m?W8M6lLf2z{*QY)&30X>{2TMy5|z+4Ceo)pVp zqp&mifW+}QmGxyS{n84~N91=XzU4^9ktnO)uD4^?2n6`CFH}vYX|Ub9_>Wi$GOlv=hfeH8bNNEK8ThY^_J^_7vpfX7LYLV>sN@XCoEghkMzxKdQbn^$ma}0@D*$+1G51c@HPLIA)`aD$$ zfSVqcd8~D?0ltS-0P2;CHy1eGrs(fbCjk@@m?(f{>CX2-eyEshpul6#kuePm{dau5 z?L)=I4;Dsoo$cc7RgyP*nH>vAe42DqXcq?yV%lE-9Cv1{)L8iO*DPcR#|bdA;k!S8 zAeDKIligiefLiRbb;XwkAD6t4f<|-vvPo^`aM+01Y}_gN;rhuB4bya`1Q!V8t*3U4 z&F|W?NIF=-NZW!lsW&up%I?Ox{=VH3LW2LpeXUJ-pziEiItV;}^Pw32W}W`RG@$$` z$Dz#daU-Xw`-ba|B^B~(jj!7&D*W2gL$Y&pU9s|6)|5ywsZVm2_<{qlTpv>wUuA0+ z!dpI=tvN{-)gKrH_l373%SV8FQbwqru?yCgxIC;z$~_-Z-F?Q!SOlm^o$&~R`r*c= z`NnI;v7>{-4q#ad-=(i#2>g`GO7rh#-aH%L5C zL|Dnj%Rn#qmFBxLJQs z+&tG-gK6x6+wKCr?TIHd#l#URWO$^w%}!JRvM$1Vw6;{e`bTc1e?M0(HWNr|uh}tK z5xerW0lV(3gAx12^S|l;Ek4mtP%`P>6!p8e|F;96O}{rd&x8Z`CLpsqzh-?`Bi=iq zylMXRlRmO{McQ<4DQ}o}x#ofVtoXH2cE}0iT%wp-dy>m=%=eML_RpJlKu9`(*3e&W z+@?@xY{d+33?oyjyiUZ`NWL4#KxK^_Pra>S6fs2Z@tkDnV&hVAkc`QUqi6yW5tD+q zW0JE`eOx-W+&H4O8zeJM{grJ=0ZNw4`YG^Xia+SvrL|SKfd$s?tvjC!tyyH8oe(9J zfMVO-V4UwlmQOFte7Hb|^^^zT+w-N3#=ywp;BGcDZ3Q-?Y2#Njcj#>Mz7M}rsS4x_ zZ(lJVXK0soQ`KA*0@TOw)`fr>zGjBEa#42@I$hMf=5vSEIDpNdsgHd^`B-oja_o2H zhcK;@s`m2twI&=okD=CV42Gt3VFW+6BmB~YsD~X4k6%E1Z|k*A0q+)XB~YSe8;}X? z!cXL5NSELjTw?@q$4#iQz{&=Ar?hw#h&EryO@ZRGaPrPsr-vsbZkn15vAQ%?d`v%? z6Bt1rqw89FA81erxyjq|)6NgRUQdk6$@eEcV!aBec+*7dnQ5H)+Quq7ysD?jhs_*? z91kcQiAApFgb@}cfb@P-*z44P=s01>C!Z=8-bw{J${uBeZNNXGhyD+e-O|Ed!;S4n z`^FpnZaM0jfyH{7Y2NbIlQl^nU?S2AoBjN9Rz``-!+Hdx&!mzW9q|2e_(gzIH29+6 z2#~&qoafnLT52BJaFO(Ht$zyIUl2L0TX< zAj5F1Q!qT==TtYB`uxsN(Q{}F9INopO`aUz2FDBuqas7GOBs@42^ao4B-h`NydUQH z1pHgS_o+h0_5t{a%#SOqPko0z`AYq8oyWpJyHPx4BFQ2@{US$USWgeab&5KE+V9&P zoQf4?@P6uxPo~#l@w@Z;{v!jRp9XW?dAe^*d+BCm1Q4*_Jv!d{>fr?WRDQBU8dRf; z@AgEY*Loqi`>X&bjxVtI>cU|IZ6VGVAxD*G2!@A_#6{MSw^W;S>2Q`We}XY$t1tmv zCCXng90o5?HF??ArXpEDTe%5p^>s${R%doz1Ocn57ks9zgcbpwxrwxt5qci!h=6n! z6&iFVq+dL7kN%^s*W~Br0R}NF4s2?2_o0n-46RTrJOQo35&_434*N|eB7UmLhG5?k z$edpqxsts$d0^5-d-KM^u7A-qd!9K+nl;>;jc}^$PRF5;?L0Y?->O?%&|YVuh}yX zp-p~icAdJY4FO-i0x{sH1;qa;wHfc<$gFJZJ1XfBf?1LZmQ=YLl;mmYKRd~g0sjC7 z9ZfR8#bA7J3PjE9fLA`O*RXs^JUO{$q5qi+aM|oE(`4>^?nF9HIWbjP?kL)OvcO7t zuqI|x!4)Ve*qt+CNJ%z+0vvyB8OW{fo&$~3Ur4hjv_2#j2ENt_6j0nZJr{&A&lRL z6I@gv3_JpHV}`~M8!%*Ka!nx7E-PmG0EpNb5Ir&`f{xCwt?uz+yic|9DN65woT?*E z{x>w6r4h4P|6UJDwCkELFn`mLnHpy+)qB=(IEu+dFVMLuLc31bV?deo+7xSvPJ`F- zO`u6#^;GHL|L_fBYw4s9WFs|_wOxFfrWC)g`rbkq*RTn0WoHLhEkR>W>dN^)$z|w# z{*UUy_rr{5*QeU3N{%>_AGlIi+8#;wz)o^v=m|ygU8@XjuohMPChzV)2%+Wsvmm-}J~8-otqg1clWh=l=JL zpSB?r+`YoY39~r0dVzmn`p3wLfBlxtqz7jCSuDeI@*D>H$SD{ zEq@Z$&<|@u?Aj5R4?6|TbeoFOLzhi+?8@H=*(W2E8Ud!aawPnIJN$yfu}+ZJKRmm} zW#aA4yg2DfH;M}Uc_5G`wGjUf05Z6l&qvS(c^*gm4cxRmfSZ&78PRtuORG&%qLP@% zD2l%iNVvx@VFUgDEZIN3H~%<^J{XbkJLYHrlWIb2xJBiGU(j&QEUXvWitwZRLY_e5 zGk)&{NDvkntrKdNptCFCdgJO@GUo{Jn!f`LY35=*l+U4SgrfVb<^%Rz-CfRg=;WKS zID+t$+*{Z8R3r~nB6P8ja3eX|0CMW@{+lbYFXHco*@P4~0H{wqa#~HC5SQ*H-Ys=1 z7f z94WgYAu;_s3g=x(`)D+SKOBwId zhmWQ(yi|>8u6W7}^m#xU`gzB0lRnxLsD8>t?rg|1J!_!$6&^%a;>P@i-O?3T7}{YI z0Um?czd};?DI-O5ic^W>B;GmTW^zXYy9g4$7dr|;o`gG3{D=6_%8a^$)MDS1eC+{#8|7Z9WS8m>!)QQ|fTi0t(~K?0voG{nB}Ef%ZoS+e}r%Zz9Z&{Kf)44k0F+U;iut zj#={t`}ebK7o$ei=9^npEW-6}?`al+2~1lxz*H4m9pUVU9hM2WWrWxHe9i#c2v$g5 zI~|j{f0TW~tJJxfXlEX=7|+!f8U$;Wy`OE{ED=U%hy5X&t^Y(Kq@)|#gc!ojSkQc)wquw zH_h#tp)|^ZJf`41JWjt@_pTO%HBykv3J+#L>nIlaVr3T)|0n@-`KouR9kvV<^NYVM&25k>?hE6=^IPl&r6CwKNvQ;JdD)y|W%&+P=?E>*XYhg67B zE1wC?zazy9DO(w0{N(+>qO^BKp(B-IM(VnI9Mx@^oiRS?P(Do@i`UX;E##mHOS4`H zGTjTML8eqoa3mr0SQ$u$Eq9m8n(f-CWRg#Lk0=ADnhWPzp9dg;n#jrFo$gIA_3Z?% z)Q!MKXya|d<7lIJphReR&*pFH*LGs`C@FR6L}ksY;t% zZ-t-_S@u2)MY$xi%>ZMv*eHIRS~B9#i4a*VL29e?RE-%C>B#p*snS=EOH_z3Ph~%Q zWwbM*Jp%+de!r^^mxzZ#M9n4fj9+Y=xL1M@ZgZO8cOmF7U>EY%0-!6TKIfo+pR@Xv zWM6yasv!i8+7xs%35g27ks$oB^!UM`nf>V<`asiI&E5^5s zw{xaKsFwL%I&Mi#3W`UlVNXujI~}t<%#s=aGXPkk>ctR26c=}nMViZSJGO|+6DEg%Yj%wk>Dze9?iLMg~< zuzXd85J!xb8(<21JKo0@y1KIk!=n?lr%#gH=WO}L@*5$9nw-m4U}{da9viWV5o-Kt zms+Kq9pkkG&4NzQprWDOzv!4d^`~{hzb+| zZ3EM1lXK|qXS(A^qW5p;%l{I>5{&0H;Z8i$Ov0wEdO0#Y4Acze`!=sf?UW8lk@~2? zJl)nPa&!xuJYGeBN|LeP_Q;{F4JSRYXyz0<3IhClxuchE_%tQ{;jXPYSp#ZgWkMo^Y`5 z5vqB!fRM-~17DoaMg=?8GQC&l47JDInT#c-04vPm616jZvVjMGOz_h5*IiH64*Rm@ z5;fR&CGl>>I$|{w2f~kEZgZ!|9AUXNH0&a=$o?kJ&BPucq`|Tqiggg8?ka*7Pm?O@ z7YPdOIC+vPu-7{NKA~98k=PdgKtB7@EL|vXMXM8*E(b$IgrIaf!(kW{&#iF5qD-ElQdGiYSxO8lhP_gB!|WyI zR{0gF%Ind)4+^%_S??MK20vd{9Ii4oQ@OXT7`hi9KYDKd@O=%fWLTBJ7Qu(^4N1KZ z>EW03f$AUtBRSD%rvc`^^$}E#$YBDL)pr%z9Ro_0N#FFmRu}6y;!`kXRF$$*x+As` zbcBmM+dSu6G6I$6(c-)Q44D3ja=tzEb4SScrgG=m_3N6GF$5aU_D%E2x>2ITto_um z&NRi@n(E4Cri0Om);01Kk0}}6t9*OrZ*`>wE=59$9a_GAlZdu6bfUEe=Wo6gT}yNKX>gIzIN6VSE!nL<#iiR@$~)M0l<@kkd|q$KSZAz4Dn?{EB`|%f z1a*1WUP)lCw%4DFa3%R-+L=xsfm-Y!+vLo5Weu+Qf?yg2r?3t%b0jlyE+P(d+OC6? z*7N%-OL@m8CV}J*zeT~c`^%I9!dSHFBP)9fmQYc~S8HP@TA8q)IP88Utty$1q=zV- z2vK>T{QfCtGI1QIh=C*yMNDPS^I|mQPn*~`)3)`yAp?w(5ml7r#Mk+pqPf%?@=5_= zOLYl9r+#WeJwSj}(m^jd8uJgn9W=Nhy+5o-k_~Fp_?Sb{~KElXm4;0K|M_23* z10m$@$XiAsvyt<{t1$UzSDxAEK1(lohR5l zP(z0>1U#`y!SIoh298cqQt*6c@F&ETe|i%-_af~j5S@qJe*q!wjj%|?!>AE2-7sa; zFG{A5vf^5}CzHf8)TYtF-b&`OHf0-%7;d=j6>YTsRF0QV|876wIZwAppX#>5529RD ztJ3{y;V8^aSFaduBeZ3^*@n%})}|5S39Zm8!3&fFkL8tSaD8$*HUlgMAgBFd{_heM zij>Z#fCj8N%A<(ed3{w}$ygtoHmsj|hj$D(8fBRu7JKp#Tq)z9kDASgzU}+QTg2E% z&Gb}473HWsvLobxYa`!5SgYiMlcnjcVM@6DVQ@=1Jz4w=((3$u<61o%mwgqE!)>{*oL8fb!lok}lIg0i!j@ zpj0CI2RU&(%_QH(fckH@GEy0)8&%oJ00cSOyr-21anf(t#?;2C!0y~ zULqDlhtdtey%mrmyMA^I5ysIjSDQX&=OgOM3jBgUlhIY^JzdS!m1BtXJ0Cr2=HC8c zs=%W~;Be4_{6Sh3j(M!3gWA9ws}w2eyKW_5q2zS_`H*m?)dAyk%$JWN<-l%i7)r{) z$?Co~s1O6ZN@XgUwF~rmRz!UlF^%dd#Vvv(L(;@Kpn{Yx|2lqI zw#5$@{ldTOedphOH3CH-MYmVEjmi#FZ3YNYcj1D$=%tIZH?A*M30!;T+gy$%jQtwO zQJsUd?M*9{X?AUBTqmL|UEMtiD0Ya#URdyreMSc+<8_ePYGA-@uYCPf##ufdOxqbg z;l4+uankJ~q3Px?YYjwy+E`5HvCJ3UEf|VZ-zwumALr9egzq!I?mIwE{sJy+o85M~;r%CBLG-6DXd-hyYjBtIj!^a-gPB ztVFt;H-b@1lIWA=-eN3^Qu@eU4t%N}&=3{|toG?2LBrp~^Qb5k?K%bzOIhP7Od8e! z6c{azNj}ZNV9kq2V>Q=VrNrFlfAWZz|H~NA6T8@c1=4aVak}F9j*BpiX1BDXL!3a0 zjL4zPs_khMRu(hKMx1#y?YiH~hIR>XD7ctShj!f?g+hN>nPZDnTKtD47bER;CJA); z&2-7zNmYNHzE=yWcG3T`yCo{U&lr*_C!GtbO=mIX`eY@FXd-W@EV9HWhUaE$6~RgOm^sISci6DsX{FjG?5vhC+1 zPoiR7Y1g$u9QcyJC&JnvWFTzmh^a%cRrTf<2_+6m6|36&2zm9GWxR9hC7H3$^-ST$ z0ye~;pUZ_4!XQI1j;E-X`D!U)1;X3E<3=%s4-5w(`YXfsD<1qzEi9mCn@o;|UH0@( z2^vp5Z7h)@jRy0WWTZ@ZqEk7j{vhv4C;|ABXZkPfl?;sC=JJgGHU8%Ir`gE|G^r-n z@3m8lJ}g#|ap!-;F^W1)%ddvPU6_Y{Lyho-4S8@n!4ZL9B#|$T(xL)(H(i^Ch~G#W z3=Cg#Fe@;$TB}&~8nR$R)v*C%USGO^6q7!9v5EOP#`dDF{m%4z8a70DH8%`ph8#3&i*ukpYG#^Rl+E`972Lg0 zvg3pHGam4>v@t}Y1MmzgJXc4rfrCN6HX|X}8kjE?4d0tKTY zIx^^Mxpmt#JHH0MfXOR*{z^+w(?0VNRrbzQ;SA@rDtz$hlSEjza2j@DQt-ujn2_Ao z!+-~Mm&<+746%rLZ4pYlI8>YlJa_RatQAS@0=BIEB62S{&|IS~91QuYR*?VeriJzE zpomHsRKqFH)ErYkIf-b?g~v$Vm*J6ezUcrDo1 zGxwlj5wp)MheEzD*aYj^+#C1KhOBpgQ!ib>7_0^{AIMjy;C|$=ohI^jVw5Kzz#^X*u{ugNe`!gQy2Yh**60Zx;JCz?7Mtu%}uHzo= z0;Pn{Q6O&*XiBeHy}@j%lV1^(w#zxzVaU4^rf_ds=E*n=a0rgsDE{Z2vz4%fKYd86 zf#KVHUzPy0F^UnJOzC}MLG#J*2)IPNIA%TYjl;**j-jVuBLjl-F$4d(HYGAn)Z5!2 zbexJ9pFoF-f1^2PiWkvl1CB!|ol2N0*vS0%)<-Jc#BwC7gZ#D~jD96|Jvwc!Guf*x zZt{{d(MQ*cbOdIB&ql^%M74)O))X)uby^!#lS&dcFZRbAu-Qya7T8C#p-E49CL&-_ zN?@b93(tz1Fji^u7YzQ-UQM=#PsC4$OpkH`fLG^by#u?Y8{sNygkQScK`fl4H7}~5 zYIX4EOU=ZkMt1BA_`4uqV;O;eCk+n2&RyPjZdntN;L@n`B@*?cp>gXI^4!sAW23`c z{S%v^y0iu16!VFwS{wFDHg>Y+GX&B16IbzlDpRZx6)o3pW$^;)^5lu%(o17L6Trk< zeL{uQj}i3?;>D(c?fp&G*OlT!GAkPNMN<%ER3j5hUM34yW#^P~dRS{D8k4{oOom2Y zH^jUvABscr`j15^tz(rI8vxVBCQpMv%5sCIF*8Pi0SyALpPZ)&yuUdkPSSm%6nU*Y3G>g7 zAp2>%d*-fhZ^ zJRaoJ`TpCCD;M6?y3OSKZ?Cd_>05$Ng%v>SJs$(q7!xu{M0_l?Dsv@`21{5+856)3 z{#TB-@wHg2?^(<-B_zDS*$Z7Hr>n8P3{JW@E<_dvF@z%GCQZAi3qK^36@TmLJ#`MO z-|vqaKO0zA$18mTxls0n9dWFm&Nn{eh$wV+7;B5(0ON&J_)HOnw^qn$BdO7mw?gA| zu%TAx`wd62U@vI~Ei4<$G=^AcJ0sp^JM}hQ90Kyci{a2B33?l^`JV_`H90N!75~z0V!FaEw_u=M{NsYBJ?Q#$TP)f&;PSE2Y!3pXRzb_ee6aEcZY&1ejfIb zV9Q63PHH?%{OBO|&*Z_UI26F~L=QRwHQWkpcO zKNpZzdemX%qjBzah!EuS&A@6OH|b+h4I{wr(J4OC`+$`z^qxqq!3&Fr~HJs1x%lu>);9WG&z1u_2V3UD|!fiXt<`MC7*X>_>_3y@B{=R+v}KeyI)@Qm2JSRVpy&;hw>a^R z6?xjiU;-pYoQNwjp__H%DYV%Pr{s>#P-8eS9i*B6&w-mQjs`YMW%NU&S3eigXz&@U z#IXZIzGU=vkE4$hokpprpR7FyZ>KvBpY4#l{Acy|ZixOXL8?x8csvALFt6y=qsP%S zcz`U8>b-y7vAhqTZg=)sv`aKGtA1e^p~~?~DhP0xZ`a{D--SnkIHgl%7oW5+-Gx!; zB5vl882?L(|L4{zVOnDdkY0;`XE}8#zD-{3?VlC+a~gn4nh>Z093L={Mxf~Bt%xXN zT(H41aOsoh+ESwkM_=VB77LSJ#F%Kh`cSRD#f_4!uC7+sHZ(N6u5B<6ZXbjH4!(as z5)l-9Y?lZabKi~A~1Nx`+s?U3>Fw%)7Ny_c7X#T zGatAYU14x`_B5wuF71P}U(xgwBy(3Q?;*ASC<3qHD+pjTWbT_D!b&ZMVs5$(-|gvx8+4ZiZNO6Ls8*W1kAI{))jmC)gw!BRtM zA~&?5e5RT?eu|UmHO_ZqIt-pfi{R?u6?j*9+VX~zk}K*sI=-G+`UnIm`8d7-5i&q+xF`9&j7~mQ4JUV@av4jRJ zn!i6(^$>(R{(#oi0mmHkqBFE@^#HgA%#^%ioJ8M`2iskNbz#JK{_v*W_Tx*2h7E2cNo2^w!nKRDxuOgtaykbbI2q>QESM!4&XyR zNNRkOP95IWnuMByj!&1FjFv0(*}^7OgL_ZlOGFr8CB3kt+EB}g!)Pk8WArW)DYrR%%>B`3 zWp?`^z$i5-T*Qo$`fnqVzu&{S&w>Ou*eyc(@luNSr@1bL7NHS@ixM#PJX%SekkGz^ zUDPN_Ik^kEK<_?%GH{(Rg!xor--}GYH3>P734RzmyUd;xfzvmd15;MV8gwANj9n?> z3;-M1ELeQ1V;CitEce#GS;LT(_CWmkEIi7upS$>l*6i~C?lx%-*WFEfx%+?LOzx3o z0=Qc1s(dwHGpBN(cBf!%w+Z?%vps27R3fN_zSp>q7>6j%Q5`X{vZUQ-V!EkWZZQjz#Z~}z#ln|q*3is@$TEpnhT;*{To{{6R z@<0nRI(p>){eg2Ag~@rnO*+W>Qb5vr`MvLPvuI1J2paZO7*ip?WB2+;8ey{K!H`0= z)7Pubmj~Q$kU5Rk>O&ejzbvQlFNh=CY+VrzqBe8^GA+l9q-SVhlZC&FVY*_k%-k>k z%QqC|3Q`X#Pjb@In1~7UkNy1a;Lv}TzoZFXt1J|^)yuT~BtrB`X3=MLzj7a3&d#DJ zr;P2;Fu+Q9apl?PTS6Cnfcz)0lWC7KedckSzW4-2x?A@cfd<0>m(QOm@nY~&&yyMt=a4mQ+ct|re&Mi4rTBG{;1XSK?jvo zX&RyAJ@qk9;{!!2>ZwOl3+j zXAP3QKB9>prX~F+45G-GLr{zjHt!;HLUh7iBQNiB1YdG1#2Z6x!(O%#^%gr?>!vDn z6L7NYiA24q^0)H|!%2jG%;57&0)@*zr=~W3^#s7G%%(+W$@>_0DLOklvs6Uh+=(E7 zs|qi`Vw=JiICZJreBO>F#?`MuD6Nnxw{%105T>CTb}#w z)%g_O(#!H>cm^>#|z2`D!n20d=9Npg=GaAj8=iIVz z!6(_4&gZeQWCoH^o)Ew2y*m2Ru!XLVXfo3)O2?J0 z1P5V|V3f_TX1egFgX>!g*WNupg~p;xN=)^E4VQl^&w&iN zu*F!^o%MAO!&{HkJ+vl|$drmaq2;gbSB=aznhbrFC-%ZmGzW>0QlA`nJSX{uj%fe9 zdL&L+&p8qf1`A6|l4oU3$VUDDy#8`$M{&49m1z%{_SwRi4CP_zfSh+KKcTTY2S@Ah z^^RX?EmR2vx)(f69BxWoO-kshk8_>z{V4)zNm+$@IUxX*cc`C#&5G3JZ5m`TCprb{ zRsNsLgG>F(30ElUDs_ACBFr;$Lq3%ZSg`4(a%e9loiX5R;pztpX35+5)5o4D0Uv4( zpcCz3M?}AlBohXai%DeqlnHg;rnNhZT)tOd@?IMk0_miO&eck8El@pGcD3D}El^4_}OrC~QDrEq2)lW6}D z1i0$%?^X9cQ@oLQ-l)<}tI9tJ4qH5X@OezS62$D;#gBQ&x!Uza?sbV}gEZm(hRS=o zS~wNnKrDV6usc11m&^D-W<|t$)IaiD6}3$Zy*wNy7z`;&*xPQOAxjKSz(g|&6}>h8 zy*g%ov;XPO8My#`e~jKY%vt^aT@LY_D2BSn61UFX6pgnzj<#?qCsTjnQ+fLn-w9~3 ze)^eBG<;lXpLnSdoV$|Y;hF_;_VW;(KJL1EYrECw^!N3ibqK%oG&D44^s<8mEo#@( zY~LOr{J|>U7!LB~p&j+^!1o-_r(I`arQ;XjmSYmg?YG4(f?UkSSZ4iiH7+ic z5sM_qE*Zh2-u8etuTE);(vKK8?FkAP@0Afwh&^eb^QZ&PiM=zj=Xr@v(r_EY(N_m6nPQ%z|3X{n7PFOldTI!BvbN6^AozNK+XkzTFQ$_J~=R@u!$=g?p znVW)+t`8k{r-?i>TKL67CrW`oWAL1R*ST}K;`TGt(vxjCh==5^B=JOTQ9Td)mvP;P z6U6r%;#^b{&)UyL#7$dxy{R6uM-xa( zPs6bwp5*vD3?r$9(vs)D@ql2H%&#wbn>+4r8UgZH!xNtn|5xY<@)(zx9nJ{5w9t0NS#$LKX$cw|bS|CI^6O-JrQO?u z0{pk0bKcT2mmi{N)h?YwiSB2RgVd!sF!0uF_-qc83LU8ILtL5FN}Wt9RxFDreM`q3 zhVI49{nu28N0sLO-y_kajWPDSqRx~Zq)Sv=I!g4B&gDVlv}I1|5r@s!TP6MpG~ zCsH`)iz`s+uJ|{48`E23DL#o8&MoXsge=$^IJ-nul)>)cbASI3@15LiXe~7MkJu>n zZz;!#kc?vmQS>+64ldq59%4mmo0QKHHT zfF-|%e9nFKrjpxlz{{`1y26~6XxXL4)tGWc+=W}$DYcz!rrBKye0?iG2RBSZs5*G# z)-7vHZaiH0Ao)+Er>8fE4K5eeQs-)-S+p2^NZnE$>$`UGCyD=FS{#c##lff0pQe|zcUju%5zoSVD!eP7xg z?P7aBx0SrZH1dPTs4|zvcp>QMfafr~{oXw*MiEVtCZu7yNq#OB)6RNbVPRp5-|^6w z$wfQVW>w7*c`lE8+5=xrJY)4`%DF)gUFl6Mz;;g&v%LcUoll%nJ}s`DBjz#J!Ae+m zH=N~uHAC~dCRb>q<>^Ja&Y>2R)I67wkM(b?n6|W7*(DdK_pnbS3Dvmo(Y|(T>SPBIA826NhwqG2(+OZC_e#wuu4`>wtH%tEoNDwyONUr z(eLDm;@t}tEI3`7cm&Wu@+4FE&dm=WKYr9qlvv<0xAjx)ms(!=0j>%W`fdbzf zUNM{T{y~&tjm514KMI7dE-riol^_m7lQt#);6F?h`tk9vSGgB^E1;my<^#x2;c_?h zE8UiYO08^DJv<9n#A6zub8}+N8shPgPGqol^hccD8w|#m0{iZU{x>=I28Q9^1|?=-FPFcAAb8Z zEnI~ala0&0lWRDJVG+b{rH4g2M zETbz~Ct1eIEmcRIOT=G8FJ2EGj_k}$0uX-$&~W_=%i^kkK!9U}L(S<7uwK)tCL!M~ zvs#{!kz15cmaFN4mm)c7!jiZSAW6-^qW0AMc{uxuGX+IB7UoHn1BKI*j<{%@Rf}8v zfLDK4h2HC?=bQg~YMv;wp(f>x-bZZ;*lrS|!nK3mqs}LgO>`jKwZ6W5MAi1(=j@DG z>T88(ZrqJUyo|&on+_U269z&JN$7>Oh$S}N#$4m@t*ib@;JLn+hTYC?fYEJtBknRD z8-T#x`|bg~Tl&FMQl2?gGyoBmxO;9z?(CQYoja1Q=j`HHtg7W4?l@2Thnf72I5S) z3sC1QI~0fh&F{1CZOL1_>?*%H%BJ9uki7b+^#v~4ti@GNTV9-%m0!n3NOyWQl~=M9RqeNr^uac7-fDI)8J$DMWQ-aoO^t5Oo>%j0G43X$H=&K&6VTzLDZasTCitC%{DCSSN)db zz!y3(p4PuLJFYu3M77APQLa?j047F9hfVzxPr~sYUy2-)T~bVj=!%HR1qim4xK@8w zv!m&FHE2K}_tf*toNLhaZf&kTz4lS3p;Wpa-x;I*_Hi~Qs1~Peo>e71nf~XLC5qet zJ^9F}rJ~$U>f69T9FmQ>N^?(XKb<)9blFScC%(dxU(wlI$Ocf%af68#2mmLKEB2nV z=Y3}Da>Vd0s?syCLqOzg#!Il~1=eG!0fz(^Hy;Vg!E<8|%h1o!!K}Aomkuhq&!e^) zJ;ly6HJ2MjYhlz__qtdfuY_B~67w}Kxom2*In29v!%zVY_kZ$Sv&%bV^NY6Zh(KT)=YE~CS8DXd zvr+72(_0Rq@MLbGqBlQ_-E;jA?ei+;iw<79YW~CJ7&ms$QW-yAxS_-XU~G5uPoqcr zWZEG#WN|>>x<^202_o2#S`I?fteL_RU|{t+n+bh5DbE!5H+*+r;H$)CBh1`z3~&As zc&$a7XT{?uFN(dU_P}vw>=z58nO7gwl z#b_0yt2@zQ^QtszJs4^+HZruhnY_9il=znU;s@0&=X{iJE9v*#n_7{;TFi!@El;y5 zF3Mu*4M?;5<=@%5@iDk(iT3SmP$VPpcHE+nu;8AJKsrnzAfSEqw|v>q-6$CNX4z%Y z>B4iB?^*meGg&D4jgRh0ew{GKjJNdX*|s)|Guy_NQyb>6zJ;jSU$pBJ20OSXRz!Aq z8pxJ$=J`jeb6r1u{HW{{BqVQqTgCU{8q|v}1vKTGUFmO}8I+se9{eRgZ79Q{7c64b zK}@c@=YO}bQ>sjbA~oc4b8oJ+I0nwn120-wZ5ikJ1SGYX&6Q@J0)f{`{x?s@v7Yi} z@@ma2wCfh?^=kc4H}#6klqdnpJM))@jV*fY)hep-OJ^nl(*=P zh71$Ui1ERr36?u?BfZt_%jrtOT2!2ZM9X*up3hr0ng92pNvjTw4sJCKe1nvt6s2X< z<*AYAlX+}Sw+c&KB&!T11CUz$&Mnh&E6y$&I%(G08vM@XTH$tIM&K&)gC>+R`%ZHv zz9(lVGFf!yh|01GR`K$)AA>cNbY&>n>au%2SY#VVKWv&>J(ea-p*rjeRw>0FU_G51$0wBj% zQmk#}N>vJUMx+f$0TuYtZ`{18X0D%Pzwx?vysx3xZZdO>Hn5ljxHVI-Gsf68cw+n} zT(@Q!RnL7yixjykdD$kRqnRHh7>7Mwuf$)6t18KDvQuJ=1xV}m`Q zhSx8yRz-SG5{JxHzLsZC+m`$pCa6q`RIJksJb7oW1^Ac}w-3<-tSD*U&}9=rs;xvv z5%`qVf-M=nNglOHR=6ufeeSOsoS*?suF6`|EKik*9&W`C?IJz4{eVeOGsNE@wvZ6hHe!F}M$(ifx} zdlcN+f8HG5DSlPWA?)nE3;6j<9Mz&3rUKrl z+lU^$LoWN18r{j1)#tyCS^QiLh&~BCP7Y%LB(UVMf5f~3ueebQ?o4hU)9on!$ag%r zzm@E1&f_`8EVP{;JkYrp{NSh7Ygrd3Y6BG=>(gD6HdQ>DYg-xq0}Xt#33L z^JdG4zNrYiGC@B~7pwXeHz&NSCf;m-Q}7QSOR4*+OPAcE6rJ-tQTB%=?1^W7sEK!^ zNGeM*_+y?)?u=DtF~>1vC}9kWot>KX)3Os^PGo_tY_$6mxj?D*oB8c> z9BC8zv5)(~xq85z7!Phna-Xy12}1UghY4# zXV?5VuYp{fGx}?`%}7yqv<52adaj1AV%&yes04?#VqD#ZyWw~8&X@aluU2!tb)8Xi z5&Xf#*F*_Cq2B3~;$)EImwgoly9Ft2UU5gk!(+=+Ui+uUr34jh1=X^iU5+hNWBa7t z6xmF%;PKIGq#it4J9EDCq9?zvaif)g5k4lh@N!;qy^UP#*+j&jYT+~g@am?2UECgx zkXQ@qWrYF6;A-*HnpGSemVEW_ivNA=^#nJsfM2n4ms~gmDf`Q-q;7@`=P6mnab9W3 z_AJ$7V!T+68;n$)Uhs))q;jw5w)^vlIV(V^onO!EyGNYs)d`E^Gp!UXqI)#N^cT>3 zj^4;O9)Dk~auF&yP-=Ys#(!eD+^a8idn>scP=TR0 z^IjD5oZZIRv}x07$uzTxRpb)!$eZqzQ7`q8u!^AmP4_Nu_FF;GhuLLhcHEuBf63xF zR@Ky~@m~BH+J+F9LyhTV^vi!JML+iARG$)*(W}9RN=eGdk^32|6 zrrW0D5}Lcj|M{efvY*IT#GB{OIS>+33#IAG2j%1gvYe*`t*rQcmcRn>G%Wq!z11Lx zyD*L&^6i6FuFK70q5S5%QG66dZt!%G#UKSdDSZ*vpSb9fr-*;*lHc3=k)j9i3GJgk z&$dlD%r9X-_@qb7X3~k#1XR-H{)vMU-)Cm;X+$VThkInTF4e0QRIJXuhepqBY`<8tOgl3=mj$N8(7rEYpcYeqigbu? zMpIRtLO52gsCcg`!d`CwYI;F3O{PX}0CNCFAN_jqtQo-M`^d#v#18@z#eHAUr;JB* z;%9&qqdedFy4Dik>D<4!0BYG^TQ zbhfIbMcmr%NA2&pn?mK8UIQKZBBV-!*&H+e!Pus=|CL$d-UeahA(JX*DI_X4-UH>-g=?4|tz0(4V`zMq1&>rcavRBpZ6;Auit zwobD95jP^kUYf`%;~SW;*f?am0{}%sjHR?F^4_a6JJLi>_N0@x;sKVxI$0aAek{&Q$j-l zqE7_a7o6h`6J7M-8Wv}F}Ld}se z+4F8WX+rrfH=nI4FnqPXV6ndJO{0hj7hgZ-1@npVpR;$ftSnxEpR_;upqrj>EUuL_ z5#eC!f{w}U?EWhKrLs%*F0V~eQzZ2Y1E4jWFA@F=(554$56LXvl>~ijJn)QW97~Qb z^!Jz}&q0x-(k40M&>Wrj@i!k|wk7p*&9LQ7-131#%7tDq!gC(scxv;Y{d!2qdl^D^_mZNKnd=J&xBO+%L%gG!_O?w9ozVFe_*R8kEeN5XV=H5B~RR;hIm|cr+C%YS5Wzw0KphdPp z_sBcf+n9YpXinf|24A$l@FGt|t}KyY^f6hZV7rn24o%cNhZAXWOw%v?pm|GI&I1y) z@K2w>llu1;MiBSPxnEEE$sazDL*|_unwu=C4Jv+JbyB3tw9C#`ww~#gzV*f+D)Is> zz>fnZ_A_Uhr~c7T!gyV3eC{^M8$vIq`WRSI^%VWSwCld*bxwclu|jEppNp3rac}UK zrlLi39O|1Db_;|X9uqLEG0lwC@|bS7BRPf^y^8WpHg4GprsRp!t!KB*u=)-dnEQGl zhwJ}Nt09WG>5u!-Y27O@%klg0Oq(I+vU|akO`>)*D0zxd@#CTO9XoM?KesiGgLyuA zFGVj&ace_!bMuiqCG-BArT_jZyji%Fx1ZY6{NvN}6%g8#u+!<9>B-NLv}n^z*c*JF zis=p(31g?1%N5y!uf{OVdVsKhx3PZ96@&Lyf9g5;XD5wH=8p^JCu+f9ektDGKwq%O zGEsyz=9uqvjmGXoUr4Jv@aq+_TfSi_`P@4L>{JF4O>SnzEo{CDBLL-$GTAU}PRjB0XUL)9JOUxQSfAgYVSzmYx z^XB+E=}+DMqpv1udb{7`Q#m&ZVO-YZ=1i=pnfG^2`} zDGEIM)@+qL35x6yY=;l)>+7pyuXV1)z9{AM`k*EpvtCw<{UnsM8XqH3NS(_t@N*od zfz$WBaBikP$b|rpLY6R&tW6W;Js9Tcabi6g->FGpo>o`utvg$~01AvOaIEm1q5F zWz$GMH{%Y;|9uzW$-nokxw(Jnvhxe=ihp3Bx^B+u(5Gt4=WHBDJjErtk)Cc%A)91 zwSYNQUtds1(drQW^UAqYIKxLMStY&8=Kkz7K2{${D;k`SF%k@)?8|`KbNKl4RX}Vs z=Y*Sx1jIkfwk`DVjWDTQkN+c7Q5c4Y4xz`FR;FA;(b7SQr_VOHUUz1RgQ${aW-qPd zrhoN|9s8dIQBgY9eH%uIap3oIIYBM1+d+u=N_6(^e_Kccp^z)S8h>JU+9My(Q8L!ka6C+^)q}-%|E0)Dd{`EeOf?IJwr+GCN6Hbicsw( z^p`gphk0N6ea}sSYtjlR|0cabRi7(Z@R1SzheKaEIAZYvGhT{<<{T~=VRvDDVPS|3 znoIV6h{I|UYd97k*oQrb3^i@b%uUwRE~PlEY}=1%Jt3AXfW~WC=C^PinC(45K`B5d zX=2fWw;Phw5(#cYqNkwuc3bAkKvQ5*KQP`>xCVtdFThLr{IjQ3`_I5y!AD48QLY;% z9wkfJ7*ekAltVdaukaW9fheZEH8VYU;*$YAHJJkpc8M53vCy(aA+y6^4(mq`^c-?k zpush#a?dyALAGi)>(dw8q9-jt@bcZ#xWMcar=-qM4szJ3ouG@Z_39qO*YLWTf|-$H6fjV^^^{pdbvT+SB*Odeu%4=ONEsU_jN$syo9s{ z(dLQd6+ZhZ>m%Ha@kra|9T>)(S}at=+jU4KH63qLFLe^Ha%u!5YA`G_roO!WylL5F zE)frzF0nuZIAPHEK?cq}7lSpnoA@B%NckC}8~DX^ue9hhOm8zi#Ig#cxGN_s#kIFA zAHGRT^KE%PG<@~J!PQDdxaw_5;OA80ig9sqIpa+~szTc7OA*c;KD{{PcGfZCtHiaa zb}?)h?f$LQs*~&g(gG|+I)hJE(L#W1y;MB<(Mic&mkk)2tNQ?0#3@YBcKj>{le44F z480+FgMF58)(!r$3WXV$a~}AiOk@`=;-u^n1!8>zOn(fh67k&y&wpEa5d7AA&5ewV znj%L|9t?#q#a)70GxH5_RKm4`N+f%E(eC^*6KW+OV)5e}A-&h}u6-8E?b_FI=RmgV z^X*rx_|_J(Lj<0RMBIwZnYvGto(paQ3Kzwgh zTMA&4ptRC#!@#wM)@uL*dY-U*gRzHLeXHcK<($ot6L-UYsfZ{nj{^{$CsmCD#p5Gz z)kQ$fTJNPOb30|{D))a;GdvDkdoV;_L^*)kU!Zs8XQ1&(!X>3}08)6Qc45}AOUQ07 zO*%zhJ8OvV!SPuuEl$eKx7c92hu5lLaqFoNfHeCV(2I>KisOfxa z3d*oy(SRr(sCx>l@G7}qytQ4Q<@ptEk>u+y(TzNSR7__7{{0DFRPNcwChn}S_@7}5 z$FDVR_;cGrcC-e;wiA3z-5l(4BFnkR^7%|Txj&rZ)aNQ0PqFS9?rmO4pbi3y+80iI zkD>{fvrOHm*>=Nu!o#IhlW}N<{F{4O{`#U?#%%a%t#0o9_yHH3>p%rl?*8muI7cRi zvM)i&YHf>_&e1#ae0KjwbAY`OE2Z+o@u~DQY7Q-->qm3Yj+Aj>Ihivbr7g^^tkS*D zB-}h$E^F|UYbmXK#!sjH2Pg-kIG__Q5*PURPk&q;c!GI09$<|5lIA4VdT%IE!F)P# zelUHuV85b}+}X-_)*$^sjLdpW7mC|zlj1?&*5b&^e%%L(Y8$|&eXhN-DX;d$OLJZv z94qIeV#sWNtG9tnDLkB?i z0xAbe$u>&r?*Cq_nHZ~g3!LwcxwqkL)}id|X}j!ip#WH?TGxdZaS?@?uqVnAx}l>3 z9T&ymSX9IB1w;nOCeWhI+%bFDUsz6`6&z5uJ}%e@Hl+|^t_r2RQ5l=UJH*R+prJ9N2XspHrSDNPU9C}wRq&Yov4-}ri`mu_#Yw|9xO!-=EYq@F~eVNF# z6^-=j5>2ieK4sf;E>H*yb-KS8N+0;+($tdX+R13Yw)LoUlFsSfu>;e#t@S&3Gw_SE z+NRq#6pwuP4~OF288BEE#gp!|mhMP#F3iPBW}*DcQTTlmF?Xf6UeZG0LbxN%HhrEcH=$6;=Z?n>eX_)yRryd%<5ezvw7P#efdP0YwGeRc}MJ=MmG0woX&|&*- z9zMqj1=ho5HR@R-X-}mqj5dob0;NlQZt$(uYo3I^dc#K@rQ9{y!yvsyL7!bIcxyGO zShVN8#lv%@zF`q7aK6oby}{~M&CnI-DC}HAPPCQ;8=Nj|rLIZKEMe8F`*_>r&O_dt zfDw41TgUfbx@mi~cP)lAo`iNN**c2_LZ)~(rOrjv8y#uzt`PJ8!0)(v9cZ&op6_4A zW5mckT?YTVcmWKQ$E#7Kqb*o_pBSV*ojyyQZ^Bd=Av;V>m$*-CSD<@mFj0k)Y0*I~ z`6Kho-rR?fccn3!@Gs_fpp!pF+NgPGbnrdDj^@?vHN12gy?lG6qOQ<$Lz-NcA3%wJ z?T{F)TS0~2-2OxZVi(^B_1&hpjS?|$$f)omm=A<;j{VRmJL8u0V)MGAlTq)CMYgQ~ zyM10~G|OE**tz(}yfpd#{VeiKsiHug zUB$R~9%x(>*99e^_V+#TIc#d*Qo_BQBZ&JD&)PF7^)1}mB*2h`%MSuOK^!9F6^+#=wFsWG|K9%MqSQsPG&-c2< zf9x%z$s}5^_G$~hG_~zL`g`7uhfpINXmPU~K<=>*>CIX-dbvoME6FR;MNhAA@J+)h zkG3iI-ng|p{ATufYt>8z?QLxv-DZ6IM;?x)0q(<&wz&HBa_w2uC;=;xf`Wo|uGNVI zb8zMY&*UFWdxh$e_k0ncx^jOWcOv=P&M_f!Hc6ZSq|P(A*y?-yw712sR?EotD-}Gf zJ^sR#mT}DurnAAzw1ZZgUr*Nd`diSoy zv*zc0N%izT~vIP2BMeqNcg80>RtozNrLA3na1~m!y#iK>Soi%nYLAgd~gPQ$n}|a;BFNecv?hoXb#ne zn4g-blz1(!W<--s+{=JY&{^4ARtkzn13#?GFo)|{w0~M}UE20)<&S9^cg#i&rp8sK zJD>Z)xjr2Q2zP=-mPjoS*gwCRg$r`$x9UH(pt9#^VZrov_`=lt+0v1Jzv5WPNPZc{ zVIM$=iGnJq7R*nKzwxH?ZqzM^ZiGu~3TT~BX5B5sW$yIRI!v&c`h@hww{~j%x0p|b z1$UOQB%VD1o8&09e{?oK^NlauF+S_X0@W2yoFsHp2o=<23XnuCi;EjyJ=aEnhqb~l z{-J0sFk9yZ2Vt|9uy`XHs^$%G{$ls&#{WF=Y;2uE>vJg&jNx+U9~EYJQCg|86+3m= zkTtBq=U#%`f1gs++!_q>2%ha|CWa^v97X}k%jle&hRf`!+?=}=)5^e3Vo`k(j!!L$ zs}yBo){F@F7T~~j@^Uy%;rEb^OYZ*kDL1e;xV#7=tSKWH%%OTu>rXaEO{_S( zsqqTt2|$g%!W0^dEEyoI8>A^J;{ToVm1{*rfMD=BafVbxTgx@z{Fqe4GUX`$0<$|1K@9BwGaQ7?VaHim6zEW2X?amtVaD?&U z#;JChNV~nG@a;-BoYLg{eaC02HQDaI`3tpi$Fq@)nERfyJxm=|cYCRm)XpE=mB~Ye zs-^3gCNy6wTk!91!mkB`=q#(D;e*l~V%;Z5m)wvrjU|`JvLf_zXVeN;htSO3ZN=u8 zwJ%dec>_}lFdIm+D7!2{)@bIiV~_suZ<>4WvP8|Y&0G1?C-@MGSfA^FbtEezs-AE8 z3+dp}-zki_dy&MXz@qGZ|0nU9Kc~}S8Lqn+l*J$YEs8JiuD@65M?UKN6XC~%=j2x$ zQN{2zzPb~s6ZP_mABv$5-_S6)DxsO$mmU^VvUJ~b12jSn_$i*Y$~PyGqjNIKm?mr1 ziBpkaw14mR)}r&!@0zp^?w$Gfk1YKK*tA(c5 z;4;xg_u&qL$EMh;v4JwMdmkL+qbE54ZDe4uzZ)J#JaCf>^m^K$5a$`+RD6XUFR9S4 zoJ093m9Zo;0FmMnE>G|oS_!PrW7>(%Z-rJ$mY>IJrEDEP%7p)48BP8frsGN3w}pSc zq%wt~U9Sy*#AB}B+G@h9tOb{Hl)OnrrjYFa9hjmxKvW;$MYPx(RYB+-W+uKqS?@#b zUl^8g4CKUSIi5l2BH&g%I zj!WnD8ld?Q(KvF+vQzxjlSIb^)%zIv=P=!2ydkQUkKRj>){3xl4W}X|;@R0cH+^3$ zJfJXz;s;bPq%?^mzoqBnY>KN^<2ugu}AYi0o_IXZ%n>=6BZ4=xx8`xpsO(9JoMy?5|$Gwu#wx{}3HZ zkWliJV1>QxY1%#Y-e=Ir@*|KI4n7_lKQA7Y^3RVUiYT$dCP?691b*(6miYogU@AQ$ zWdduBbV+O%zHkp>`5J`-(d6C||V&ias}y=2OPmYPhdT?eC%9plG|Mz@8FcXo!r zS}bcASXU<*J~X>O2Bhz4V~P=kf>JB8bx?pZIkMm_KRNZ8HdOGcDPT*iQT0o=Gp_Dc z=bFn|3c2_y+pt+!bw&kf!Q^qC0HagI{@*|T$dkImTSs>D^uP#3D0=QS!Xvvz zyIC5WJ-(Z&PF`@@$B|JBcC^18oQ446dk3|!FNwfCxqa5 zfRe{pRe1>@ra|Z6`#b;65iwK z)c%-~;>H?~m}(4ciWujJtp@Mr_4Y zoREZ8rN4q^rY#i`XEKcbD^XW$%FnAJl^R*>Vi&G`!0el-9Hw@iZBf%`;dl*&Ujxoh zL>2o^!MOo9Y7@_>rpQDAA7kYWZhE23XbJSuoh_lzwdNBz$Iv$(BZTzX# z(5~luGv6!B4O_J30UvwO7Jz|cho+zmvzkWeR4Bp8=)IOu(3N++x?wPKu9#aKuGF!wg{{A66xpcUml z&;F+AIVgYJ0;)FnH|J^nDGPfVub0{xG2cpfu~Cb}1D$*sls zrj^n+(T=Nr=yHEvq6~S7SzDiN&DnjNIe?EO`H=MV$BJ2B?)u{Uvd6!Xi-<$QQuC$L z@w~Gm=tiNVcH86zcE5L13GFgugPK-f@&&bGeD6Cja328M?%p2ha3e%t&hN+{GJ%y7 z<6``8J+G#GcUwJv_RqU8q7Ary#MvYENoOSvrO14d(G*;}r6`QVjwwYSt@~EVS>C^P zGVoXfcQ0i{^|{~1)0U@k9Zd6MbOu`6_PE9|jP9B(=wvQDC*Zlt{a}r-wK{}U3EDiu zH!p|XOBI_^65!ewnMS7_r&PRBV1C}{9~87+W^df^q3E~h_SWDYn!5b~&@Z86R=757 z{TD0hlT9hYO_AIFrJO~z#JPA`F}fMwkNkN%;Y2i%-{|@y+BE=Xbw|;Q#MOnoNnV{! zO+v^&@EXvI4;8wWtMJhxK$2LW(#S1t6kgO-aA3Fn`1WP)w}S_pyuP6UeZ+pPQ{ZT?tlIZE&cd|@9LK0n4U}90tE^jIK7PCsF|IF#(v5g{ zM@l2)rnrg!z})`_@BC!(ozJdzPEkCJ;5!jA313OFdA29UTf^3ac51s7aODhQ;#SLv zWuURa;JVF7?r$P?Am|$y(2W6&;g%d*YEJVRzz|Gy$H7N&UJR`{ z84LguIc9zC+_}6mQJ;!ztHi0G5Qd!6;HA_mzGaHRbruV@EZ_IKZ8S<$DU z^Aq8msKLE8Z~JayAA{!{iqsou4(KE+l8hGVtxUayu<9F)f{Aykdi9w?h3BNLxjL5~ zkC~q16(e0qmLa5vqFIk@v|?q1{la2J9xnI?O@0|39PJJt{rf;p&@gr@z_n_hF*G6{ zqo|)qS(|Pr(Wc@1B-1oJ+)bCCpY#kedAm?UFm*iKnd%!xuT2MSo(S?S$L2?wdyO&9 zX`K}k5c{9?ckMg7$uN;}G0WA}727r*rg{496!n|SRWk(kVomdohxM{b%`vYQY`Q&{ zj4l8SS9Y#d{N>}>fxvne^`sfAw2>;s$ONe)Yq$6I*Tqykk@09(zejG_YBCih z*jzOh9XIw=W;={Z(I8BZg129+%%2TR75Z;csH!xqU*Tp3r=l59>*HR6&DkTwg z$9N3`<6$w9?yDK9F=G!%lj9i`bOET@jR6WBys?4%q0Gy6Kn!u+w0GQQU!Cf4b zJ+&-R{p0(TghYqvlEF+Ya9Gdfp5(rn(pu%$Hk-V$oER16B?Sf zDvauDWu$)lpMBF{Ua$$tn2XD!MD3AE$C3hhZca%0^7fTfX<`%+Tz!0#4Oo}-yaUe2 z5Po`H{!eglu+u&ssmQpLD(cK$yRJuxD*)KDCemP>YJV3%s$Qlh6$4!Yk_=Yx0$hX( zc1-77%a~OrKDInPLF4CH<|n%PevazAPaor+@It#;1|Z$6avm?^_cyVRypg31(cO@m|pWv&!@=} z+d1Z-Fwd&#BGO_{2BnxSLa}Huq2Aw#m;r_%sih&;1>}rc8!1}1N1$T9kK5-AXXluk zF#Jk4{oY3QB8d47`&gaX?X?QtzIz&f`J+M=iLn6# z_=_VEkJA8C8z_DFwZU;p?aGhvHLF=+uxwsaxtCBDqFOW2Nd$Gzw)=ac>Z(&POZhog zGY^PAQxJV3K9yQJ?9>tI+Fsmhup@K&pUBOIEA<8*r%m{!wzl3rxJqaKA`j6FlZZ_` z(eZiixOO9;fwc_2zXwLFEgp`+?wBq+(sv5 zH8&ZN8f4$6m4}lVo(vd&JUcskjq63XKTVvAK`~cTIxm2m03d!yvE+iI*6~5^Gp*kS znUto>o+?|AdM!vd+4J6ULTTW`BQeL_br~r?gAI1|Kqk7}JI%}(dz7M=fo__X$E*IK z3GBwsuqa2)viTHZpu4E66zX^udx%nOK4%q?-u0{c1ph|Nq>RFoykMsx2M;0cH77Jy#LBKSJmLNfbU&de ztVL;evn*)T`@wh?@AX!I67`PZps@3lu4e364DT0_8e3LfTpBLS*|pWf*de@jhMn}% z==$k7GEpooIn>ew9M%ojM~IOe2^b_f@14}xvXjF3#jzi`_ORh^u9~xehMd_|y|_&` z_da~`>_s5EdNrXW$hq^PWipPHCS!|4Z{6Oz^&dkHc0^wfsls+eWm3qnKF|^@OR0~_ z7cf6a1TQ}L(=2G8CGkKdEc9AEI51#mnn-pP#W74nhn4VFNcMM_$@>DJ)RBpeB`paC z0X~lq!z_F;#GrepQG8l9CvsmL$92DqQ5mqj(U}EmC~K_W^v;MlEKdx9Tvgr3HRleP zx2;Ryq0-#&VAO3EmdB{A&50Y687WabH-)9ijz6fx$rw5x11w6>%4D|viRIy;XlK}A ztB|kcvU!%B@>D#4;`tjCpK_~vfHs*kHShUJ5}1hnKN}QSW7&LrVHmJ%{%LbcBW>*H zv9TD43Z?tnPQIAa9CbDU?|3=izr?9f>roe~;O1B!`~QV+2`?PJx9rFYf54e64HILOv1 z?f>>6(q?|h?$J%`m~0S@&mfH7Dc>sn3ji-a>73J+%0&*VSN#3bICiv5*>$jt-1{+s z8kd`JuXS+AQ+-|p)!QT{7zM-IPE z=%^oQT~N&%{&CnJO!oi8u@i%t6P|7B}hPg82;6)3XM|1Zg&!ea7`Gww~0 z^9mSVvE%F5{w21sGl?=U-$O`%#x<>G8E(aO1nao1#B_mMg$7u_Q^Z*lwYsVAkdmy& zl2%LR6`C+D7ACB9^;xbej3>CrYdf-swR?5hn9r*d}k`dHt$X*JNb50Rg z`hfD&LIER>(#_n>uyiC9^IpvTg8++Yk<(@@AQ?X#@!Xg+_7_|J7u^WrK`yz#n9!$<+yIL8kazmi*Tttto6t3Q7Z=v?}O6*w0m2iP>301BQF)WjOlklR2tiW-Lo69ZQ5_4_8?gs7?()<=S3=j&UJ zWc5`l*OKGdXm(jLeOuANSsMy=0yPTC2Q#O?X%B=MoQTe2lOw}@>jR{cwByWWCo@hz zfSHpxpPgUMZX|;sTb{PJyWy&y_k~sb#beX(WS3mk0uR z>#q9HQhK=m_x3btMe7VfmX!`ZnVUBsU$A(NOJHA1%nr4Rx}`HvLt8U|CB8JKD$*iZ z^SL>1JQ>H>1`hx$9EK$U*j11x5~GxffIS_AH*-rF(8rIA z;$!eF_UmL$dE%Md6HgkoQ=(&QDYM~p }!Lqrx|+B{-jS)IijM&^wTl&kID zF|P2Z#sR)Q0@}F*L9oT;zCU?}$YbO~9qovmBrI>)nDqQ}-bur}yUXU9P@B^LEvB#a z{=2?y=SujpUVih~Eu_Lay9G2(JW_;tJMZI&S)s0c@iYew!n0QnPLwq#f!5o6HBf+C zgnKX;eDCjYLtV_%(Yt!nVn;WEN*m$ry`-cvO+YeJcWBj)MxYFyjQu$e!BPr7->a4Q zo{Ys(4{OvXem=dnbp?7$(J|_YVGJuNK<&Ai+Tlm|B^uuSoSG?Ysd4F=s9r8ebTQ22 zmH5mrUDqOE>m;;~$MCLRL~zSlOO2z4gp55LVH)5Sr~Aocz3iV4K|_Rjc{}(~+VF-+ z?EG4pJs2=eGl>fJTIEA|}SY1hP!=tl=1+$l^eD+H-VtGCz4t`OIj z1By)ma`>%ZrHTeOraaJ9FXOn3?AsU%xbJ={2y3$#(_%V}xQBStU2gp`X1P01g)0w* z@G2d}UaIQJbiGS5MGZx7Wu-VyFPq<6Y)N|8>FmepI!COv`ousJyR|<4^D2tYDbNE6 zXid|5ov?7psagMeby4g2SQ!i2p=*6|I%QxZL@kGk<5>&gPh3W_;@GVdsd1dx!YZRF zCqMn(CZKB*&+hH*mF}zv-rPyQjde13!79Wv$VwazL!Kf7(M`CurH6-k#fqt2c#Xx-4+sRVXIMzB)%lXY| z=Uur}SPapNYHkRxD_!7z6>qZOHe?Ut%)!Ineez1MI=%H2<-L-c@&-SKht*R(W}!LDANy8;HfXh|U#S@nCXA!5hq+FVgkbWJ&!9hqzQqmk9jV zCF;o-z&gBHx6Kr)v9zkbv$!jCIXw@;%k&SK!xyQCZ5{(@CP6qSw;7WDX5DP<&)SYm z67=EaBnas~kb9HQX#cn6#Qdj~J?AO^c1O!bX0INqk~$+g(+^pb8%|$}(zL3XadW+V zUH$8H??3|+W6e~*SSkGECU)n!n@+38j^qm~PA;H}DiLs7^d=>(LkXObhOlMQ+<1h- zZPenN^v#Df8<8mX4IxYIx_pQAgq4|g>r{c@-)IgZeLHpH^v_UQdPp`W?)$@~Tr%`g zbJFa4--zf&tFB6A;CuejCn`7PH?%=2!b$8oQ&HB2QmmMhkqqp(9n~XgfouC_T}#-X z@{5It=^>a|Ay}N^^nf^hvex(+5gbM4Pyw!;1Ix5p4Nr{XkXP^q3rEj&xEyV7!{ptA z56jU)JG|=ntP}UPR^DFNbLyoTq92=1sbLvN1RgjCehDfS8KEZjfaE_-L& zVD;b@EDiJNZh<)5{7zG8-<*$cdqNm-cb02Oiatf)U?{Uz?P7>ts7Trn!;-;Zd(1m?NXj1BIEqY&)(;g%bWl?C&D_HxGR`_ROr zP0`A>)+yfMqj7n>TYY+wm1n~lHTXYdR}pm{b%Wj0Jd(1^lV&(mM5F4RGuTaE6TSnW znv3Xc82QN6C?qDN_vz$W902_z6d#AzcOxc`-u@0Mo~JGHlyt*Jx0Z*m!LUjYQCHs9(CfB#GThl~{2?nVHXlmzUYe6J|C+t<>%8Z$J3q_mP7_qFJ7Y1Cb~ z+>27Fk?}y0$dSWMmY-`(sOZYWBd3*dd&xi3L0v0p#Lo#u&~yIjmFbHt2wCTIZy^TF;=LcOU7Nlh zbeX3UegEi;LhaM1>YqMucm=#{g_HRaQw+(iJYGG>heGd2^P9o7Vf%PQmH*seCm-;H zr+{L9*D#Q2U*C8qLIp0tW|gviOWjntu8MFDm>`9i_kezJ`avH4%erC_*QVvoCdqId z?1^s%5!VE{nvU;+Ka1EScv$iC8~y~XFqjJKQ$w704~)$GaSR1+WFncU(x|Dj2z0wo zyWeG3HD>@W*7_Rltjk)(sn|2TT|_Iev;4*lC!+@LE(MZ}R6Y=!Uy9i_x7%5T6>Ze? z*g|m0oke#-)ec~FL*9PusBo3i-g0Quz_ZmT4r~%7rf!2gwa#=aEp||L^2M5<>23b7 zd6o8Pe#pDFhPN$c++??i*bMzt(k7e3)O3s?UOfJB`QXnT+x43^j(q=~giBj;1oq5t zv)K3tAz3mDY2S)?JM`_FeQDu5$2=00 zAjRKD@7L^+WUO0*26owFef{FCBSSa9ffMf|UiHb}yqg<=e8lP4FWtD}(e|BhLPiJ9 zKkU0b0|ty`gm%`u{4h#vMV#s9v*ytmki{7+_y%+P`2}V z?(C3zystoEJ|ZgeHfm{s=hkVo&k6WfI3yjwNnH8jvl)f!Iw*<97z2`%7ErI$*o7im z8OD3vw2=7PP%`J;EGM~Sp3P0O5QV-=bO`JK-F$FO>)~^JWQkUD!Gi-xl3(lflr@wq zZ6#S`zIE@Hjsx_~Kb^BOovbp{xvn+IM6mjOQA z=ddMlp?lyTtFrq->Xb^*8aIB=Q-cTUnJ(Jyckc&BcpuGE)|JSA6=RTFno&2)ufYYy=g$>4iCB{3J4he8|)-i8eP}qkg z-1U17-92=MnNMp<|B`5HtX-e|^J{s(GnR0EA@>SGoorXjJ|lX?XWIdn=8Z9T@U8=h zJIUp&uIgN6U7m`cT{1fmAamXONcpnN_h1do`z1TRlf6;=&edCI|L&XFU%4>%s6XN= zuS4H~Qy3{UXcS>GqG*fcy^1K&21f)XwHUvnb${GSvB|IAQE;T2cl;(QDig=(dgd7vA6#M(tx$i6jjuIk%2Z}82V&fiuT+>s5q(q*wm zR^Ko4N1)46pQPLP2{%3kyxHG9`wD8QOY_NE@-yOv5q_hakHr5!zTPq_%C`Fhg^?je zKtYtoq7)SAMkEXnq#H#frG`dQ1q{FdmF^z8q(M-W?oO2&TDs4^!RI|^{Xd*9to5wr z12gx1T|0iU_g_@Frs~;HQsS!3U$1+hxJ%>>6S9jhBgq0-s_feB^bfxuMOJ55?c@BO zajV4N9D0Q?1boi(P=aN`;j`1dr+#;>9JX3zg22g6yA^6|7%8xKS3HuR;R+g`;J&62 zsC2BE9%E4i}U2sVaaPcV9+wZ@WEx*W2sB^UO%kI zn;NhN)6x^To@?nX=TXEgZ`Du)J|A%Q_PoS5<)v0inm=Ew4Uxrqx0g&sM^r|7toPfw zUZ-UwS*NqaZ)VL0PEhc{q(`E#mG+{d?OzrQa0lFSM<5vTL0N3prVLlX-3 z*Pqm}y{q3TF4XU&vA2N%OHh6olqtV)t2B6(<9_sat1x^bc<^T7)%e_`MI0^aY4^Vc z&hZ0FbyG6xb5|9R*U96Bzq{{sTC$67#lPl$^^$@}7GC7v`|#-vEHKy*+A}a3dbY-E z`5SH70~5q-(WYGh4g}2EFp*#*6Sywc(SKZ`?p@Rx-nzU`5MmzRLIg(y{$$;0SV%={ z1%g*@7cAai(4F2rPvc?&w64?s)|5LBg=<&ovQWVgE6qgUaTJbS8AwPjz;XRHjJF;F z1H=7$3U+@6nDC>RbfdvYnYLKfcADgpiDKp{R&J09fA@K<_owWWe6}sChci`~=7~;w%tok!)V1xir$0N zI?kE0a~$nk@#dkp#B$xKraKHtcQ!ii@RXc*%u}^%KlFyL1GcDq5|#ikf<4O7eoh15 z$mFI6tO4Z4dkSa#QvFN2qmX|X$HJ8o5LK|FQsi+Yu$_VtvV)D80loh=_Q1;2`Kt7UgAhNe#qGAJaN1C8fPVHKY# zhaV5mBf)ZZs3bq_H*2kH{Un{O8e{-w_)^Q5E2$(XX*A!^cz&ydB}`5zh5>#A`hsB+ zYONK)lKXw+9GP!sF5dar2l-hq!3vUICP+kEE`+rRN4HMEzpy=9L$?hRaix2}yzY`^ z-Gp0*2T;m7!ljl5a zq&YM0s%-qj3DcD?PHIW{tkPS5scGgAbziqqC92Xo9e6+Cgny|u&Ow3XatulHBZqUo(jHNk$s*nc3sd< z7z>$ug${{pCd?ZLi$6vNsnxn=#EE`1fzZ(dld0Wd+3tMe2c=gg>K(zEZop{N?@cRs zT_?O3(w{tgo>`xV^zigL5Ohg5H(t_Eb^)o2(@L`-k=W-RXjY!Ot>@$g?B%~|4BamTLTMcI8ns{!*w1hEB7p-=1~c$(^kX+9va8YuYL zrNB5F@5<~b&i`l=;O2+=jLy3*TPn#_2LMkDfXI6;;CxU389k>Da(9iA7rC4LXUWCq z4^l51dsJBv_)E#oK81(C9e^FPgOVMUNw=|RY4BK4SOrYCpL^E^EVCVZ<<3Vd|NEwR zJ}%S&pVkTcQ#0eB+fA0r;k7YVKdc{a2j_WqbJwME=a!yifK4F3R3Hh88PMRKOA(_Q z0-}U5$z;T{g!0nk0H{3AF|o*my^4Uh>$BW#Ei_Zo4TX<802x^IT<4|!>}|G~r@tmo zb?JSBeU6Gv_XL?dLuPEspyEl6i}y8YtY>&liqsgyk0|>p+>2qEyv>5d#abdy9=;n* zFK7)u?+1w#ZJC~mfsN)pFtBO0)%9e!H)^)DUW4t<;ZZ=I$QVzodx7X;c_+CbL!-*p zueb_nZaTLu*lCD4zt|jho$WC!VeF&32nD0xI#kNn$v+-2UxC6F4?jlAlN#RqH8fxO z^2JYjzdq6$xYWQ5-nXCGPEP1>&1$~(QX$NsrEKB;ppNO{(89hrS%oScUxZLFY*){? zFj)m8z_Mxb0BC^%INI^$jy70g}L{|@Fl3V2O{z!sQzBDezw z%-AsQOz&8jX|$c{b3lvOALAQQ!85N1y}ecOh@0izS*qWSHRDL_9?F}M&p>o105;((j!U*1apj!M~-d-#qRY*TjX)pwSCobq5!7Q;Vl6ANsu2I!Zs7Cllw zx{mL4rB<3bchoxzV9B=QHQ|1(V!`P|7~DN8lCichJP73p?J1%YuaNgo8IBhQ!}D`b zqe!mAM!%vvr@8XY+@%kcUS-}_G0zFBuogqe%w;6oBRgbPC!(B3a)I&hcC}EFFfmc( zB~6t#1I^QXh^o@hO=)UFbM_`UE7}wFxW*b$2-N)!^`PQYt9>AUzW-NVkG0l?UX`fQme0Xsqe` zGK*3B#w!}R4`pYeCtYx=>p84TRA^aF3E}D(@O(BYkqK%QaFy1fOb^eKgq=i+|ljPWzXWeP8SnJRq=f+wT4j zmMoP=zbl-MUh$*1&M```pJtLC?cT0bTha>F4%A54n^{rg3SZoU!z_q?wvxBIH5J2+ zhb>tHE%}3R-Ed#zy+U9(i}u`^7t8<&s^vX;Sm5}bVr5JnJ4kQfKF>c3gJY4!w$g)j zf4zNW4&%sk7Kq9O7c`_<`U$#XljSa=pF>`>43^ktuh@vUeFryaD(_L{!M^(rY)BoZ z3KE_?Lwmd@=NnGity57@cKk=~XcDRhUV0%wS*3BunO(+w#K;uVm$%9mEAwA-m$E;m z6dZ9$Xspx};QQBCsGV#Jpf06oBj)pIfL=-ebYRcgE4kIByqEkIfs?X5k?y$i)TUo* z9B`iU09hIY--(TD|2rl8F=z-%M+uodH(3Kq)BY0GyPiX$;U9%DSP@zX=xINMR>b)u zJRq+m_{!1!Su$07V9&#PVtk6AB05=>XC8RTmjTAHe~uImsU<} zqkux`ht~ zuJmC#gdfN{vCbcy286jx71&?k&i^tO4#iVlh$D7uBV_GYC{W!4z<=>RI=N#?V<|Ly zGiv>2EhlH~V?bV=Hpl(lQy9AChcO0@-Rdpc_+ywBVYc*ooV?R_63 zx$##cG~K(k;psQNgRJS*h)>^jc>iw`k#>0XtGy2K&HGZ~yMTAe;PJdqWu2Rae#K6k zH(Yb0pi>D`OK$#y)rJEaeq#5Rdz=kpCF=v0if1Ll?`f~493BhgM?Z<#mv1XAqOFR? zf8n&1%^5vOxU4;_>LP6{4#NOiCrwUMFbK6Rzy@0HGEF1x{svM8U{}Q&*H&E(h~Cqj z;Q<1|abc*R$@R~>%d<3rf{roCJ09fAa9HNe7<@erUmeR^2NZ5Bn}b^irt~>Xo#tWV zL)rikUWD=(jzG@*!}L1|jNVXiC{V-udTF1O%7(gj5-x(|-1AdPwt*H6-<20dn=fKr z8`#@vls~S6;$zOA*;By~HWZsfp@bn?7%a|4keJfYfVPp)Va!^$rY_6`e*(-1)c{x7 zzxU_!h*o)B9^!|dtv#$E!p}FZXnKf#e&^$5^9I5bYJ5$rWIW|#638drF0UgcwG@Ve zP@hu!dIH!QkIe(jS@3bsD?oY6Iqf=gW|XtyJgiqo&1K3oXQz`6;UV{ zhsz&?i(7p40fY& zBQ^W{4W=OVlM4$h zZ?nNzpS#-BEybyKAO1G-RDF3fG@Dh%%@&bQeiU@z)@a+swsYu{K}?9*9U;h601IK{e(w8Oxf!yKIAazwnT^2vWK5k z5I+zv-*e9`0x+^WQZI|QRhW@7x1E3(#Wp~bP)S_(&O1+}J0Piv==M{xRG<4Yw|1bw zhJI)QpKBlXUF*U+7QHqh9{(H1wc_-i#tNfgiQz+N+M2!@{IXGPmUQ@j)>I2haHOMI z)|9q*esmxR&@c%K$Lq`g98=KfwZFnFD8B%L@O<$Q-)e)?xlB+AS%T|^?2A5h>BE%} zoz0w?GsiP3?miUn9dgcPsJLkKZR@`%bpN&y=p>53$(8{fV02V*vaIMti0T*GhU<1G z;4l8J56tZT5=bg=o66GlW9lY0o#+LaXvk05f^FxEt!VsmXfP)47vB zJHc&yiVFh9XZOHnT=>RfwEIFXP+i&peLYlt$iwd zsq-UP8Y!KislPd@Q?Csg~Xz?2H~*!Bepovf%N?)6uBw&pzpO6FwsX|n~x9`z!+?_z7{NRu_z?b1Tu>D zpi}uiMbLE?Alhr~d&1@|vrx)e6GsCx6yX=rldz#Dv%8-rP7LoA88Cr~&)m;SVe5{HXcxrqyBNasPgZbw6Py^F z-WV`W6^zb1n>rO0;t2JO`m(B-(%IJ72-@Mz^!gLEWjzcQNr z!xYpje8Um5Lol^DdK_c$@1jaQBH&Aze<#m$HY~~{_fxrES z-_wV2ga8hvJl;Ef2r0J6y6f&{W-OL)7;EuEtT{i~2pI%`1&vFMb9IUx+Y-Q4Wycqd z@C{7Jx$da@D3ebld^;#HaK|?!U+gP&wCo(>A+_dQ5$c%8$gZ@nxsQ(M#-nSFXJuR~ z9tIguDubLPC!ov^K?R19{1I2z33BH=7`T$OP|?#&sh;*$-;>%0LKZ+=BsmvYqZ z6{4z3O_d4rP7nJZK(8_rR@ZQD-OwSXZqIedz@8JjWT-~#Q4qWAMjPo=F6dkwm?ux) zfbTLP(qS3$4!V6Ios_ShhgIHRDQK;jSTMu{l$)ATXTzY7OjC>&>j#!{@uQJXomcT6 z?RXNHs9N7FjU7LM)V_Y=U6|(cnel{o_=Vg|z-QQ#uFloDTLID)1RZf@mYrM9DqF~d z8D@R(d=~xgzUd3PKF>ZN*#L~lLY}MV<~Y%(5n?_TfrdT+8B0T|H&592Q9};@(%KJ{l;=pKSp5D*kr8qSUr<_ zlOy4(U+g;S2c5v+?ARyGsd~GAQHaflr*iWvclpu{cc{PhcjWkRr{~uRySD)35Kxqi zYcX#BeE-hI1zLRvmA;Mp?Mshhkff4FMoFNw&egQV`nDQnZf-tAd1+~lOwEy8H#OE4 z-Wi3qEdDQ$#FerJb?2m8J=_~>i@*GEv4p8RfX-S)CY9ICqBrXajCN_tDRIL=#%#?h znG8R^Xu9zWVm{wN+?m2kKXbLmD^N;=J-|feyLC(^KZUX#c8Th{Ls^*#*k4IM&EUVZ zP7RZ>Wj4@Z$KE!)A1*NeP!{u}`6Z{NM${PqS`N)n^>M zHX`xo8eoqn`acW)dx#js`d8{s=qSMhjC)3=oT@uqXZ?s;sA8G_p1oOCF^+_=orN-< z+KqKce6xApF)qJIX5j_?a3nX8H92D$1NA;eu9PG?!n|>3R3rEZ;u2WOc(>kTZ-u(cF_9w`t z%piD@&lX5*PoK9O`jXoyU=8#0%)i*1^jMt1S!P%CnDqJO*CIXo-seVZdURo^c0yC! zPzdCjZ*oBlGky^*&7OUy4^x-VBg!`7b1vOreNYvJ?c7RD;3I{+y6L2+dS z)4=V{=cfggR^7+b*|_|Ula5^J2{%+=c2NVh^WMy9ZaIf3xf>#1$^gyOQz``({9zW$ zX#*2IWWN{rofHFK17$72jF3Mx%E{Ic1BQ368}#Jx>hxqAE>7l(0s*Bjm+f~NsJxZy zQf7>Mw$N;s>q3|DXv?oK>Z7_g72R21ev#5zohXW#lTgLeVq^yc z*$nSz^-&iN5ls_yE+gy-52Pf^ip`UIL!#!;+5=jJ<2A+yQZViM+p#ANJ4f}=pRj7E zE2kH3buB*ynm7v+vT!A&FLrUJDd7`Ax)vXFg&u3_w5b|ISFS*S$d_mK=(4WbbD{_3 zaGe}wdx?4JRkZh-cxI5zvV+aZAJH2KK=KF@ZscXn_bXB?Kha$-DMm}soh|x80(Scy zC^*zsbITLQ;o(-=qRl^PJ}qt1Z02&&gfxRygS^l{g+-H*7y!_CIU{k4FAB)Sgow#@ z1nw)owf!5|%^``-QR4Aiz{~4>w24*kcGr+Zl{w6uG@}`jKPqp0^oS8F)c#7$!Iv#} z{(?9oUGZGk$;@>^6HYFv`R;ogcP%vA#Y2-b1G-Wf z=tyK`q%xj`510pu;8Ke}Y?&q2 zig}HxJD3Ko#q2iC3i2VtB6U@nI6vbK)cl^N$>$a@Nq8dZ znrh9%z%%hX&#|u-XsDm7C76Ga-5`Sj2~E7jb5%ShqwJJ-e;P>j>U6$v3Hw$`t193a z8$V}8-kt`NQ>i|w%t>B$1f*U+!6SCw6=`AMq!^)au~{vnbSROG_#%Ar3M`9d>O~#6 z@w07vDwFX$Nl0@^{X=6aeTZ6d#R<%gNNKEJc8h)EVTmx)Q6gdBl%m{JJ~bkZ=W~H- z8YCX(XC9bXnte9BZBJ~*E^D&6Y3h+$j{^y2C;eKUNGFi4U$AG$Q~p$C9+VCQ(>Wo< zy|2>s{xF{IBh zM|Zj2W&GJ?5GQ3id17khWkxf&vQL0TkbN=JiaDWuB(^9J6w+w1B3#3Xv1I%x4 zLM}KWmpxEj+;@=iOH3#6)-_=8etrZ-Fe5r?n|>M$vehGBMkH2`O&rQ60uxGnuhW`* zvKlGhddrgMr-KEYDeb{kOnb1jNeT>X9OirT!EG=acz5YzJz*O)mdH7P!|+2#Dk`6^ zZVVWoh&9qm=kB*@*a&6cYUm#9rK>Tm;(}xQL6ZBM-ZAJMBRB*8LozP289_L{WU4#t z?Wfw-x;?b4UnO&D7h6Y_Wvk(7cfNHAn*uJKcQ~7n6F#OO`LT5GT;Fz|OvaY(&MJ@MdL5@(g_~(ljXW?Q?32|a6ZnzbI3;G=HIRGu^ z=W+Z=><&GuS~%YLJn~i^qIL%Y&Ri6|0VKdlcAbJ$0LbgR+Wz+F+LUY=gO1Y(Y`G3H zfBmknDv3pDa`wuGeE~+tUh*cd=rmCS=Wh5%w1vjES#~yKSUvp{TNclo2$uE`By9lLMq-){v>`;%@iBE>5abbKs zyhkV+KEVkZ<*CboEmW8Bu$c}qu~ZUW99$uXoO1xLwn;@QM$_82TATm)?7lR4J%VDn z1)sbiSh1V6T~|I6iYt+5K6iyszRjO0y|a zlxer;cxHBM*Fr8VXkj4ouJBk!$Ve7SU3|Z1L79^B@_0UCr?o)fXj!0Q9P9Tt;ewwx zuW8qgS}n`W0GhL*NeiG9OEt2BmU?EdIl4EYSE9QssV`{!j`n+TBE{?nvS7os@1|sA zm(bpKdYiocyC`j z=OH_P^Q>N}{>%35&Uty-tBmxwRgCWWlmN2J$a{GefRIlKguQ?ll)0l0+&J5?R&7AW zpcy}2%MbxlqqAEvusNiTvaRr7zIaq`Negp!he9^9H#}Rb){lr2=}?B92A^=h?rXfX z?uAuLdynv1J_1u7hY?bYQn?=Xf<+7h33Q^wf9XWFTp>RS4^Im3)=gkn;@o`kyC)N?phidOH7Ba14ROrf9z{nbxp3Y7RBpI~x!2Ov8VgRmIrV^p~V=e9wYxHp}k3|}$^s9fE>J2ztKj`!ftPpi8;HpT z5mFGtT^kGI)YbNRgbvz!R$tHPtoJc&5#PCv=&=5gMt8ldfa8rYSf*yoZ4d%nrsMqQ zAo~%Q1~)OwrB_Htz3alif?1Zk+u21OZgIXVe(_SFE&}X!DQZz5S&W4OeS)!=mx7`t z4oPV)G|H6Cjdf}XL$b8`e?9`jdq$jfWj)w^Fy_`15y2q3AoS(WWZl2MTx zp%-+10#MZCOl9o=)u5lPkA4r!KqEX|$UkHeE+Lf~|H)*p>QhGn7I>BZ0wA(75bnns z=MOTi5-FQWD#?(r73doM_ylu=x;}4>J)v^B){hLUVd}m?v83gsL{u=ak?efg? zA{j5P<37vDtE6~1-tAPgIuZbFo&sWKKZ}GP;;j&svdwm@#7+|kv1d+`%VL=#rXn#x z;!wI<2UH8CS=?5W5x)~U6aU7Df6PrXc9-4+9VdtVpd|8>{D~U4bgZ86t>*K3(P*tx zaA(-v)utM!ev7pWYgi5s8$nwe2uJQ-OAC;tf;mLAwFR{87)bx+S%!6u|G zAoC`wbo{0)@VXeJ!71KguQF`5+bH#&?C|~yooriW2Edq?|1PK0oJUoLkLsw8n`BqK zR2ZFY=)^B{D9!WcpVM@-dbU0hTy3K307K0K-tgDJZ#8vs4Md)0!e)*!ZV>RF7cg?M z0x`Q|ws(c&L!V^CCg?MY!3n&b9X?(n*DwP!6(V}-kT9ZFuf6AS|1h)e(}3>WZ}xd+ zkjO~Ib5z#0vry#8v~@y7(W?&A%Dq1g>NV+agMVxuqLb77*|@G-Pna4WYY<^f#4NpV zU+x_u-vN?{F2h+dl?oJSX~;a_p(S&Gx0Qs^KiG4r+JZ0cNvTM=K)_#+f9?6)#peP4 zX1a=c?U@(%`80$53^Zc9c2oq+J1z)PD6tRjWG67?#611`GNTcCUTcdb@?v%utkolv zRg37~<=hqwF@iSH^u9X%btAv_6kznxtBBw1$`v8xnX_?5<-Wfx9R5)dZ+lVOOg+@3 zblSCL^2#zP$r^3WYN@g^)9EWAzCX8>R$60^J2LCs+t~!(ZpfqYO7YOi^{!L!|9JTM zI?;7}!m}0H#?q7E51+yv_&3!UFM_LE_JNy#>foaIqQ89&`EQ_GjeddHE>aYTAisG+)I=_}c=@@-8+ zk3vv@iXkXXb7&@p>>~+wdsa-%VZR|&!?>*2tmS*BlIRugmY25=v5O1E-Xa$(K*nj! zF4ydn(tEJC=v5o-Zv!S}?%%Sb<_&c}Z>fAtlKWT!cS2EH8dq@ILS?vnDWm;H*R&4Q zC(42*;sn3sq&DVoKW5N@YX=>W5<5lB#dhZMognZMV6#Q6vviAF%44Cx-Xldy6a@kw zV0yG+ey0^Uv9d~S%9s5L!2pL%pDL<&0MX2v@6<3wTITJYvdM7E8(GhM>yf%iyKZ|! zZ{FZvG&l|D?r5+70IRnYiZsbuK4_KDmb>DjA0Mj>W&n?ZR;IfrU89-4}@LhiCTUVwJr>$^^ba$gr- zadpkU&TNQjwWNLz+MaI5Ia{JP8(Z6jA8QJ(vG)`bcPT^-uUkZF zY6PIBNiSLU!%(s9$CA5CGHrK%fxN4M{9>x#&Ao$EtnI&6fYj_2+Jo)6NS=IbewP9( zl(fhp77XL57**Oy@|)04iO2hz`~ z!-D6B>md~j55^@V^pCtPF#t(B+$myisY|xD%o2#)_S~gdsgKrQ@*6t>`tU#4%F{(_ zYbTQC$d|dS9LuQTAIbwute&8Tzk8EyuNUY)u}}-Ckf8i^KPmTU`W4brN`T%(|06W) zN)lU-1cP%WG;8BgH3q-qnfSYmU=|qaNy`!7VRs>xO1xltNAkUFEh}Nf083kL*U!7P zB2$TG5>h53b?NNCJ{dR5oU^nTLK^|NHBJ6J%GdH}x zUtcaz=NY@Zy%tKKk*eGsk+XPz9nIh zd3quc50!Q?cJ4a}RZQJG!ntNEHbyYo;SEB{1jC9| zgH5l*@kLvvHX*Pp#I4V+?KwU*w-Iq}ZdY{*1gi?!Y{&|Y2OnziwvHoF3(qH#z}rzr zm>kb?P`R?UfC*w*{Q20R=Q{Q}d~Y&DE?QVWG-2ZMo|CkH>-Qc2OY`dC)=1$_z9+9x z-$&kedc8(5J*R-m@d1Io+r4)Y>we|Lcx+AR*mwk`e?<(n`a7~_^K;%(zla&^>ZpJg zsVZ?b=jQ<*G_^xVTLmfUtSgApPEI{fS=*1%nC`ddFQ2Ii&KXFDhd9+med6C7h_A90Kba6?Cb^H zwIGj4j<8>03ryqCH;z3J`#uo2?|DyT=7KKh{Mv*sBFY)R90^=%LLQdL20@Y2F2pK{ zKWa$opqbL6Qn-x2x084=WUG|FehajrLmu&h)P)DePE{Cg5ZvxSP9OG&PZnMFtsgN4 z5z7_9gtHeviWSPT{6!TA-W&-LKLn=h47`PXjJ?QHktj`>=RKLGZp5x-JPiX*?ymHu zYYL-%q)<)mrxY&PFLDvjk<(0zTUh@VUHEc-OgiMrT(c%tc%mzHivRCSE(|wH-kN4V zLR+76RYz8jzsi+BEyLFq1H>+dHDGr(yDGWLb2hWs_k^|=qH$s=IM_=%z$nV7gIms3D@oW>Pf;?EyhF!{2*t$pM^XZ0s!k8=%fpcV$f*b|sFj?c9T;B3SspGI|`m&)Up z23)7KYxR$0a`*2Ufd|f7=NK=bfA`G` z%FxH!j`zhtOFL9`MceADE>!yQ@Kh70UrZH1+sTb4M%7Q4#N9{Ecncb~o>_thRDBy_ z(8ErKWZZ;)wt;vA%#(pO-LbtIs~Z8q37|j~&HBy=2R=?B#aOPHj)@8o#P63^j;i6+g|;GEX?C z>_!mXyW7D&4PafKn@$LJ+V~9pI(Y%z;y>{PoZ*|waGnXS^&i4x!6+CjSZq_d^&S0! z|7jl}RP@2!iAwPOh*05{VttX{E&6~IN+Dz63c$*qC)))ADa#3ILz-hGC5;pd7hoI% zXr}|cN8+XOqG@5w?pV>&{}dN+!VXY4qd7<3E(QLP6#)17%_y~3GI7Oeuadd%`^(q} zS%`aq97R9bZkJ41=BS;6;9-Yo4q6K1laUA7g|6IN*PZQ3`@qqbkOUaD^_MXt>4YIQ z92~DA(AMjKD+H5IUnGm?!%hm?l9D$R=;t)YVRpQIb;90KEkzITXtY8a8ZV31cp6v` zCI5l$*=Wm!Kldc}_{3eXTkmMZGzc*uU*N?y9`-hH?;-NmfdEm4$7qgOtj9gP<0giV zh%3D3CL<}$)fS8P#Xh63rkeF;)1wOjA2g2?qFWDFqbn;s0AWf5yb*`cTm%Bm&Z`}` zG<_JRsX8WisDGV0;;|NcNsZ=ex ziI~rpc!S}QY6Kh@gYI)aJtK?MC5ZGKs9ciHN@&Ujpnm9Zm;5=2P%VC~dxoj_cA)Q= znYE07X^Y$0Uorn31Uw&tQ=2e%F%vSQKfya#E&jXZ=GpHfD_AAPD(!CV0Wz2X^wzQJ zOy;}%ymYcfUqaq_C`E1pS)7ElzNCdmar1A%AsiBvyR8?RJFbB-!4|1wIH?>w`dWCGVG6@)WPCu;2uF< z8NL#gbMfe7Y7?9N;To=*2Ph4~*~f2m2M1Tk4Kvf;7g$dk<)W$^&ViCniu@ueY6w8`RG;*bTb_0)_IMf9dAZM%r!{QM*HW`o z68s`IG^HPqvt_cpX#mCe94;eU;<-Z280?1BM&vlKQul1$QpgK27-o$$hOC3jhiHZ_ z{$AM{D9yrsm5}SbQ4SK29suRN{lj272hz>dPBmOu_x-vpxCqvW)}@kz*8rIfKr+$8 z+xl@n*ddto(ucsg?79`TQ>iem%e;L9W@sMXEiE#3TWnkk4duFDf`)t@Tx&Eo+_MT4 zjsklaa^1`w8I#joSFtVaFgp3xf!NBaz>tJ@rC->ZE}!b}g@o4gq2lmu*ZE8BZ>AC! z9wO}uggyY&g`Y;>rzYM<`r_S4w=S`0p!M%kpm?h=P3QMHsx-F)&#Ro2)oPNL25gdZz%X} z%i>6{XD3)aL_Z7tw)^M9y~q1PdutGnhRlxZXiEQSB9oy>R$V6W389`IsVD<#rYYLx z*UJzwFC|2D74Y19p%GlG^B7Y{2asxl73UG?GLq6kaHhoz`$s1Z>dOP_N#FvJTh0pkKJnQHS~HCE@?hi?XwjzSXX1n=| zE%fKt2VRX^qft(&`&13_-VPK3QSH!OJ^Pc28_d9!|jnF_7Ky6=zh{QcwzYf=}^ZR?J!TLat~veLGb zp=3Wk%@g5>mq3AED%%?mxU=;MD>|C|Bn(&Tvlo>I{E0iDqq|Ef#3#^maNLKo1ri5;NY;t{cv5Z z*m$h>PV+G2kg&q-W92>-iUgzgcZ?wWpH)(jiNX0#AYw2ke!Wx!)0T(yu-4WCLIc%o zZU&(``5nf~sc+-ruen@%+;pZ>EY3h>X#=qsNczI22`ilt?WF3LdDvf1&TwasfWAh` zf-KeUS{Wf?zYG5u`4Mum4x(GG2Dv&ZgZpa((|x6%PZ{{FEmCWx$iu*xaF2r5bVMVY zpL!i+`Llp|nyIk#AE-<@^=O-#6sEb~3;ci^3&BQ3hHxy<=m&o?^y~*c-*{y`H8t3r zW@Q#^$L87Cf?sY9XRoNOhb|Cx_4^>@0o*<@Vs?epi)G-E|I`P9j$sQNbt+z_Z@qzB zu5W2g16aw2Bfp3Bs!(bT8X)upOeARSj=P%iYj|qLa}4MSr`JUk0x9+yJ0ueoH_N9! zLz_@W+g;c$J_B05Vda5vW9^!4$f$nyI4s0BsHx=J)Cod{cN(~|6N#SY z(?9G2GFq%zocd)va}4-_uan<8n=N}*m2`8P$fx^QF<3{emvNbXD==E7Y^0mi2x)YE z)TlvY6KVd_+!gGZim!Q*OBdVWMPf2a>X80Gb#Q*TOtAJh;5xK?#8_jlPSyTiQYKS_ z)x~l;j$@%<_w0Z@zzcrbJw@9sUBARNDaFdUQW1hN@gJN|xL>K;`PMK-O{A z&ca1bIMTtUn&*wv>!SiGt*6$?45!>%gr`3H{Z9NB;C`S$pTTB(CGPa_fgUJIxoxpy zxn6kMt=*MlhJ1S=v~z1-k}DVPu`qK|ScJ`l)SM*6?8_e*ky7&D z!#&{%YEfvf^aVcqdEeK__4Djt2uHjzQ4SrqtPz&;Y%x@2MSx{Scy7ALE+TSF>#})) z9Y}>wKs0OZ1%soMVn&rE>1e9eTeVG+zw79Gp#kS53kR`u~WGf z3QYSQC?GWaVsqBxynR~c;c6cbPfB)#J>pQ>)u7AA%u{QP+}%}=g?)@X?YNnyUsgnR zgUdV873mb8E$EEuBD#%1i43DSUIl*po3VcZ!foaEcALsxJbf@_;Z452R{_m(r)?k- zVaW4*UeVoFU-ig*W5s_Q_WXQnX5NLXM>KG`af81uu`-5V#EH)4*S>V>^a8uCoBgF(m-Qjn#H;UZXsLU^aq;SayC_aMPy?ZOt<+RGE(u^0|iHH zrAHw5lxsh2398yW?5DO*bUH^CM=`DGtZdBW z8_AB^S+E>egVKX~I$?FA;!PwXWj2X3TO}h;@Brwf^f(K7Zj`=?oFnK(hH`?!C6@p+ zbCx!WlGhmB+ve^S!7U}pwH)kkRAtG8qJEY##Xa4~$kmFf;hRhFZ6ghsEy2a(*#q{h zSC}uV0sl{bl#Nh~B*35k1c|l%(ML@ZLRifCR>MN5tAPsfqs^rU?Hg6TRa4RK)>>z) zfA);TT1Is5#fy~AFkOr^*EO?Icz&Ro3S%e|-*_*|bN;L96RFRD?P>t=VtQIvvRDj2 zi@sp6!~4ABso^>@P4M=?kf4onxQ$B!pP`KNwtJg%Z{}}HJmK?sSyPM9c$E>C8UC2_l;o11SgP3J>Rp(RfI`c{;jhWeP-_n`Z{OyGe za@Wt35w}@A7#Zaq5rI_4DBOop!J*>8mQNEAl~FdU?+QR>om6#VLMyClr&DlUeA(oB znK1}rY7d@7|AS<_W2%AB4jGq_*`ThR(|`xz_J+@JDv!_5e~E9Dn#oi5#Q|Uc06mH% zRbOTUnI(cC6VxoH`Zma74$s@$NE99FewbMJ_1<3ZIo@Y{*gUW0)^K&goV;j0g7=ys zGZ}Fop$40GtK=lRC8L6PH%B;=U70}EVz&cALA|2)el=a5g;B71=+kf^_{NL%+3P!A z?XQ8-;`3n7f%t~sq9onzeAvT$y#0^Q(jm0^HU(8l*t6>TCSN3HlB-9=XL7o;?b-I- zV320NYuxQtBab`2GtGE!G?-3YrdBu@9MOsywxA7nXHJpa_6j~h$M=K0Hq*)|+l+Z% z`zD-P_w%o&M{2pY7kUe*3`i!lbb6~iM~eO_YQ3e*Q2V3fFo6ergM8*4b%Dp4Z#-$H7ca(WHCiq+|>A;Jt^HKRvFH< z4AoXc&h-O74a4v6cYG4!Vrn_IA85m9RKZo_*m;;l`qbn55N8}yW0hwm)~Gm>rxW+K zc9Zw@@YC86yA#?z|EaCxyeI#x9Vn@)mye&Jk$M87Nc5pVUQp@0_NTz?$8SW>tT+|R z8qFVxncNDga5Fno4_$KUmVa%OyI|L%=c2o0q_OK-V;cV85rFZw0VU7q7y|R*qai2OpBDcu=itLEUp|)=fqC@CsZdc)GGz8g~(7p!apb zJx5R2AZQaj_^h`&ozOfRHqF(Bn9N2D_PRHUL8RghFJ)OLTfg zIVeeFyZ2tL_|7DI5pZXmwYK5Ve|o zDLWci5F|-ahIttLz}&h&pN#Dw^JRxkM`j@u4uap}>naN@ax5E`09)f&=0l4)jrPb| z&hYH3WyWbi4Ob1I3Cz^a|Bg;|U&BrCCOh2CzWe!VO%wtnlo$W|A}m?Re%l8SWpiN- z?U%(Sgw{OP(=R%nwW1bqX*9OH|L1hiLv3e9oYu@x6`Qh!~MpZH=9)?c<25uOEL&3bV#91aQ!-grg z09nIq}JC7!*gGQ(#yR}8=_nYH` zLvFK~=9PZ41@P!nw&*aOPh~Wdw0Mzi$9k2)jeFdaW=9R41I}z#Vsv8WZ5NoD&P(q}#;dd8GXS4VWvxi`o!M~0p zkGKm8mQ3)#vqlAFFAFr&-gF#ot{hM5e9Bnb;lMLy2oLQ54!k+ciMAUp-d^b0Sb*yN z(Pu-ffQX3-GXXKVa4PC(rYb8@)n(X=M7~st| zF|^ca-U6IgPGJ(^5t=()l))t&K0!$yoTqMK$W8d=gpa13)3d*+YSGM;llBUZ>X{{X3n?Kq=(Yx+bxf8mV@J=vfJic9K_fBHf&NIe4 z_2lcri-y^@F*_nT+Q2i-y=03fc2?~<65X@Rpv`OohAt*ou-bo|BZ0H@BU5UBXu*5_ zUzF6uM|-P3J$qeSLuF;bga)st0q$+1J^g_oMEnc4bBs++=f(uli7gCZ2frdBot|kTz$@7MKs6rd_}S+ihk?79bN&25)wB# zE{+XjE?BzDSaV;RCogF?Mgk&B9;%6iLHpkNp{wle9~A7kbHt!5M+^_)HV7IQs_;hl zetHE8|KnMHNqf5oxqKl#93Z5?4JxRgj^e^Fa|hxPJLwyG!4?S+6$pkT+DI56(^#m3 zzi0g?5AQ)e-KgItwS9{GYj8Tsz_051(h`Z_+MrxIP88Mqi{VH@_y8R)Xk!fyi#yp~ zu>QLvX60xE6W{~)ychTXlqE<0`(Dei=urM}cnI=oDt<+cLbnbodZK=@IM z=`E#(z8%8;0F2PDao75Uj~*>CiU_0NW9ei0mzO7da*zl0=86Cg8nPC7`Tk|9&%Q%OJQt0lHnI(4t!?lePO0 zu7QWGK#GiYIUzcC74hesFVS5;nz{tP_7P=l1?w>)9^{>T26`#zzu*5qYv|$E{Xaj> zA!O{kwMRCG-{b%92YhItE<<9T{lEO+zsGa z|MNHh{U?hx8vKD22*49%-@t*)&-jwbMg#L6ku%B$85|S;UEt^=UGPCb_c;(s7D6R@ zjikj-_5Xe?7Cs+H7XuJ>pMZk72;gdMup?!{6qUFqO*;R6O#o8ll_;y7LjFcI%qR3f zR0YyhtoUm#Ot2MIJyp2|2_`Tk?qyqa|3BOIJ1Us+WI5{)s$_k}Nh;1eCc@SPdILG9gji@(6Nu zwFm4aAIi5unA|;=5G;`gdbH-%5y;0Yd2|e9hYt^Ct-!?1T))jiGf`+|Y;oEY<3T4Pi2JIOV5t4*jy zp8aB{~k0Z=?w<(pYCN2 zp?cnlrK01Yh_d`!pJp`AKgW1lATCUKGCeZUU@fl>{q0Q`FGCLs0)o>6U8}rke_Ku6 zO{gwVxtjhf>E@*QLK)!-hrRdPzEES|-#0c|yLojNpNiP%^yJnGB}@6z#&@SaA2>Ui zZfl9CA%F5;Swi<0Pt&OI$wbC6>)niX%P>44p^hE*{?YwV9#}c6)tPJBNxIz^Qk#(v zt@hIAt5kbbi|K3tcjrDrKkI$9fO+--w{J;z-A$-6SdooWaLj8J39RP2G`)D|yKm@R z#*CNmete&;OgH$a(llqwysLXAwr_h6JV-xvo{+BB}>s=SU2I)=N&7XHJXR;>alau zQ29Cn2sHy7(buR}eE4|OyJJHG(_b>)NQHf9k}M=I-lCu71Y*0qKhH^5f7oB6P5x|@ z5xB46`tyu(Iq&%b)Lbof*7sZ|p%ua$x%xkQPY?g7!qc;lTix9DuOG8s%RIvku`9 z$5T(N)rJb@jv0?4uZecEVkXz_e<|oS^Rlm?&6xjw+UW4|a&3)(yu94PGuXruMC@_Q zz~hFHG1TPW4hxrFKq^p=BRa>PW491tY?4Xz228eFqB&=5#~OVK)L@o8Kqd7HL^^7b zv;b~Socrxk#h61V007pxZ8YJ=Q11|Vm47ZjIyzcyNRjaxIs7rk`FWy>rwVYaAvfMg9etvrc5WP=YRNAkC0>M5-O68N+HLiRW~1wF2#iZY&46Ak* z&n&wlYdDr!_6a^k+rH2eU2zrZt@lTt?eZ>myXj#nF4?4|iinlnME4=8bk9hGFDzz_ z*1FA%&AJhW^*mH;-`F-ook@YDaVntB?1PaIIR4=JL3&-#XnZ^NG>9YCQqcdE1`(8? zwY!LNAT|@5FHnjS1#j#gSc0P)FRwYxIq$%pE2T5d57A{r$r72xiH6scXy8kQjQAU3 zXu#eB;i@V~AL141?kVGDMgAq;&Owxe(23$XM`~%iwzt6O<}9uaFH%x^DPs2MdWyPT zKuif$<0R0#-&KsH;?%y~hnhqFHK${<$40wMHti*n>qa*~uOFvrC zA$`UoqJ#TlF2DQ;M570k86!A4 zfn!Ua_i6!xYfIqr$$E(g-VzZfX!B0DT&K?nI6k9F?CmY>M0k|Mk8$zf43HI%oenwt z%WGr_^?_eU#2Bo14U$xc6igZ*i5ZVgDeLX3H;nm|BhHrMqs0iuPI3?u6bff)#D}84 zENIQ-HU?Epd>P7dP%HEW3NCvw&2S7m{J=-_R;4~8do-K_4MVbI$Si^YU`=cOptS4x zn06Dc_%A99E<(uA}oG?B#D7o6mBmlryO`- zLL>4#C@i!lUjTXHIFiOa&pL`|6RZGr75bK^t_3V1WdQ`V7qEXClgluIT!HtO@=a}H zh9aIq1uJvAVL_A2?@-XXn<^A~fTfrp@T&jUlra92T1MDL4VsniW25kjayQgISc=Es zv^H6g?k+1{&YNK5mlx77Ar0t-H~~#DpUAyCNCAnM0M}Mv3yOQ=M=c9tg_63kz05cP zp)rdYyAJ=3W^6PWARkrZ@1{@`%OG@BKAj6|#|*(u$4z|Mxk+OW>HCs`=Jh17eIm?l|I6&+vZylx+Pi3NkZVj?>pyBhk%F&6g~s8 z#D4i+gcz>@@JBkYWUl(r8!QT-Ld3Ka4RNVN%N0kcSfSmXNM9hr4)zxKqOp^XAhC0- znDRmMr~j3cT}*GFK95FZjeAVbfzH0&@DYj7aI}#DIa=n^A|-mm5iN-Ovt-(e7$VY= zvi6k8=pfoG{DCNyWRFCfD63)Gm=Xl)^W0UT($D3%Y8lS(>6Ez;*7#`7u5)vB1&Nj) zHlOF|M9nB;hDx7!@?^8oxLr!pSKe5KQXu6}t8@@I5zpcPNoWh3VwRi~J?Ip7bHncN zOpG$N{G+Z=!D$5kXm~8jeK4aRW zA+Tqd{9`fWxz4k(H}B3Y3mJ=F=wG|H@By@j2)7V|_QmsD`wkMVam2jAY^OQ~xla-5 zA50*M=z833=4L#_kHiAhN6fLjuMzwSbSuZK zWZa!R(W3kUsBc)1!e9LXrwWX=84;Kkg$P#Zxt5SI#E^?a&_ui0Z6%Wpa!>Ao!}kMg zeCog|Fozm85x&}oi0nRW^q%&U{;_8mv-+`-bfeDY0r|T`B$6R_ik852e0yz9eo3;NHu8rOXn4Yl-+G^xlGteY{VsRrCEbuh^@YWV8bC^BovI^awDo;X;=isZ)f>|F!80JgtFiS|c_aqRd zuY{RH_y&;j(t2dVKm&+k@d}pA7Hf!?E=hzQi{nT<=;9JkP3Ajvd|W1&K8(*+4n4yW z>cm0`fl7ewqYp$8=zp>y3FaPWE5^8T&0QK%@EuvmS1{Ysv6E$+JUIUtrnJu190CDj zzoP$iSSJGV)OY4bjMk0un$#1p^-ub~W0(~-eHjk$fH(pOh2 zN=r7lnL)qM*#C()aQ1$Cdkk#^mdW9ZKQVl>^#Wh{iajk+a=VE@9er@s_tFshy#X20 zia_9LC!16(E3XNV@CT>ysNTbkPOcGgjtyP+iN4$L1{YxeC4C=6C$#2wF?G{P7P#lOxV8;(Tc7|`Kf_1gYgXn*cKA_dMuU4 zg}D~cHIl)M?nKQ#l|&v-gPj?<5B+CdOQ9$`33BajM5rI{X|d+5tpI!TnOuXHW01fi zVZc_4@-jnak9nL$0R;$><8gd&FEEWaIIqDJoSsA?Q-|#*$Ut)A@d+^pV;ZR9ccA1t z0oWOtvgAjcOf$qIAIW7lQ`Q_dAP5IgaFQ{e+w*}=`2Ej-AR;A&2j02kc56Cc7eu!? zN;XmV_dl$|Q7gw$yh%ZdapP~#J7ZhO=(@JzsIIOi?QcYIfpjRC?c(+xmCeEzE-rz6 zZ(gE578*keWN$MtV0*&?1O=)8%yWD?jW4v`uK5gOmEt#1*#$Vvb-!eV!gIwZ!7D9I zr%X~X*Nh#}J2gD|t-^e^)0C2tRk#0U`4xF6>!l>>gyNbXNjH%x&AOWwHHsi9@%5p- zw-YLl4&_+eM8lP+OUK~|2yOe4F|Z0|7&hfYWlf~@GUkRui{VI*V=b}{r;?#|$0T+XHG^lo@xYZNg?Jw(;>ChBZ)IctRi^j{qUD0nt<^ z6r;8c3bagR{D6$zr2;Uo2v!83R__l?(u&Y20aLb_`dBf|w=e_5O!MV^>P02! zTZ$4R^dqalw~B&R%nYKEB#hIdNiTFRg66v0YCH%>ZOr%w9`78(ad@BViyH?g9(pJ& zJ-&<sN}p?F{BFxegufifNCm&~g^J)Gdam-6R)8L0Qyc=Cgh3=H zFxWhpyqdW$@bAKCn;uwml#%XpZQe}R@%lL_1DPnYcL1CJ$%bShX`L(ogM5@%ERMG2&@^E*oA?=TGgG zyI;NF=rOKn{o~PF_NwjX+GurD^Rn-@D}`mxf1Z2(`!_T+zH8Z}d^=P8=Z|C2Pf;+! zKIK%_@P&U0`3Xp_7_f*K>RIkcyYot0`PtYh99+EOR5#Xtc*=`&=k(dXTQv5uU@jFz z=@!%#y%5$&6TA~gb2wW;YAp#Xu?QNZ`oD&1n3tke3#w~&-PJpu4)U#4)~D*u=mUIX z_YgLw@}KGYnzL=}j!S0IJUl#Jp}3!HSO7f09o#xK#B;5PWaMwCbZ{}QO6}s=9;U1$ zm;L$nX8^#xX2l>=FV90ea?AIid5l-+Ff71EztwJ@(5g6AC2ZJVCvc4}cKc zq+^?k#;2g#rt-q%8PEA2!PiaR$e5J}yp&#!16+&Lm{WCh$_JY)BCV4?_znc(*#PBY z3=4#C&$7)#qTC((VsW9nKzUS2Hinv3EYb&JlNem4u{ztDyj)>!`iRk5gnT*V-X#er zF`l-RW9ibREui;$G=U0bjUQ~qfpjfsLb!sgDx%c1dKtbNyit5;i3MVItrw}QaVgyu z6qFcy&4RDa(h{9NYBp2UsQj6_TQ<%edpp_!0rfu*q74hMbsj`rS^V#*v-rVz(4Gsi zXN1is1r1ML)MUJJ)hkNn`rk&KNF!eW;+d&<%EW9<;(ErkZsSp>1pfNbF_-b&?>Vu9 zhE`BqL8EPfs*V@R@#}`sp}xlX$zG;?FIh{2G+&A8aF@?36!6VFWz(O!>YhQ2QzSP0 zoBr{B7sbG#y2@;8;K8%Jc&!l zUJA`Mk3J9yZWCozWKL>`Z|CG3pT;oHvK<(cxm)*#Gk!ZwrV8NVq!Sx@m^M(fs0zqX zW?R5MQz#mG3TP|NuLGytbc%62V%9xu=S$C=+azEH5Jvak~(K?RM(8&b-oY z&P8sTc87XAM?YDzndV_Myri(8a5H1pbhC+<_9>TpD3gHfQC)MZ*y(4>4NcEP`|B@# ztNmpbJ4HgucUS}aOQ(8y$XeW8H-Q2#alFyCMym@AHN{XB)=ii?pfz^MB}>>9Xgo(1 zIW{pqm_ZX8Y@Xa8(~RU*@r}SY{Ky-N*8CGNpZ#aWKTF^q^L;JSwy^~k>UMCjQ#|J{ z!?DeZ0QJqlxA$op{J5q;U#$I~<Lfz0+2Dzs2X*G003ddgO4Y-AI$7gHRS z*Xtqu=OhQ)*m(PL9zc`3m0loR;ks%KIWW&`2RruUd%!0p`92Y5SWQGc$0Az!(M-Dl z#vn3Ait1K?EtH%s1;%dSmN~}-N!^Dmg%{XQixR7DG7Vzd0&(s}@%W_@6TLL~yRIVz z=WaP}4_+yi4wd8TFCp_7j#fh*rgm}Cu04zyj+XQ74S8egDNBqq;EG(&qdHZ_05FW4 zjaDRlB>mM%M?dECQWs8n7NVqpZ95mjX*%&x_pt_@vKacSz`B0T8elM6w9hm>tcx0gdGg7He zj3<;{J!gWv*gKLmMcMW}<5jt2sMy2Usq+LF%SrGKRbvpr>N3cXilVi^3vF0J77@1> zP$Qx^t2>o6YXX1qewj*8v%%OhF2Wy>d3O9WPYD6O1x(miUKiA4zZXBlsT8;&eSy2@irpe1Ko}2mGJ3
V26^ z1EgqY0XLAo_@I!v!m2joiM2Qq4Vh0|EbCTeJEv!p*GzR;3*0HRtkq`t$|Rc+U`M_n@o zEz}3`)SW3?A2KJLa}ib{^}>8X#=Ef0JdZSqV66H-fp;a}DP6|2@$2pw;~P^`PcTPa zCaeG_14W|vJE&!WKkVw7xUo--S9f>Q_u4S3zigOM<@zg9OvvW5=|YBiE-|4SO>^JHEeuacjS%M~ z$9WO&cN+yE&RO9Rqwh*<;mrhG)QSYm8#Qtjv>VOA25n`{A&36AvG1H0Mnv)XJs%^U z8ojJwGIe0XQ+UETC?2^Jr)Cp_r--0(A_Nk!^xm@fY~hCSC&=TFnY`clY6rs&V1Lpw z!5Ii$D1PmE*m|9fwo_2u{U-2Bf7AM3?rgLz{71m@j4eM zb*qk0knL^2QOnE8i7sgD*%_G@H-GfeBTjKtzss|{@hSDF8#Cl_qnb@or>wb?p3a=@ zvMtSo64v|s&lls7*`ObIVCBf6BfK@aJ>Q#dCNvvi^=(bqktOT)9=kttIgmr5X7m8|G-|LNDitIKE-#% zpO;(4o%;H_16VSeJ1>n79M{TNtn^5mV^ z@dukJi2RS@^uiUS1-P%`r0X?VQF3B8j)?5$ul-^^`fHQ-oM0gdkYFSpS!>tBrXjgM zw7Q557liXD63mI`TA4&ulRLN4dTOvFW4U8Tm`8!we3^w9QC##3xHZ_5curI@)OKgo z=A<#Rk2yy}znzj??yu@h(4I{F_4M6gRja*~yPq1M{q#%z@>NWC$a{nePdlklZxzGS zve5>3`w&XE1RqOq2kfjo+?$bKq`mIzZn0V*OZ2bBNSu1iEm?@li~rE)4!6IiKWKE4 z6$@m}d(lD+j2Zt22}&SqAj?wk)^X6wl$IAwKkK!PogDn0vs1fc0Ttx$!VR$X+KpaU5;Z z%bl7fzGyCTfh#2tQ#*x-o}M&!0IZ9Kjk9Ho`w(?sJeI9G{D9>;)Kr zGnbiAcU_9wPoJo&7z#SId`Kv?O)^yQja7w_LtE*Fc|LWZ@t~){@n38Kq>Z3_>3W+ui znlIIG?&jj*z7o}9^gHYwnHI}(3vQBjM0=l$+qLnpSm|+F3fOJZ_v3xnUDY9}dCor` zku<3hbf2UbTjt1Gro9^-f6qm}`Z2`ew2~7g_uDr}0Dcf|ceylfFxOMCHk5qw@|Z4w zH)fN#Z2noSq+*EQ^GhdWnur)u>ic%mNohPS{n)%aE+T@^b+_nkH}b^@mHU{tr-U!dScUB zjMEk(w;bJ}K>ny3Pf@UVb{SneN^cHhC^CF-lHvU7tCdV>0gu!H$TM?TtCnS6tvJY0 zbO6*$Na{Ns=f#{s{!(`5`&xR~!z1|vRg|sh-HF65Y>gwmN+!**;w2}-DkH(^9o( zrNM&_omAcny%iUDukU@Ot}1Rbz6pz0J)l@n*2E{iW|_epf(Qe##b#;IhzOea$5%KF zv(}S#514=_^-M6E-YA*I0h=@x>!Jl%e$Zp2_1sz!OI(a_kk{8g!c( zpW&k*4y+Kp?e9JO$@&!D)r`&US#OV)LW?NNa0z<5wMFn8mf6h2T&cirpcreYKU7PS z!0T7(MD?%V-(V$YqVk}5@7Ms#mlt8BYMRcpW5ib&TWjv94;wV*hM;HL&%#fM%oiSz zXxq?%6i5_z!HcgO`2AfB;EIDahdeVLN4iInwk;R#$BjSa6g87eLi)&B6&FAwqWd#= z*C8O^HiX4q;ty9Omv0{70DSIW(fbyYY6gq))=}wSL~xIpN;Ze)LVe=}D8nPto#XzT zjdq?Fq5tKBya=+!?nUe7SKmfIHjdc3B3idwu@Cq_v!f6cwpo?|59k5Gl&vJVZwl$;F_>Ku2r-{6O0ijm)D>Ftb1#OJhV{Tl@BcHn=@thp~1402R|kFD961@Wd3W4_-LtjCQS2~>H_+N?YVlF@!+~!XtYnV)JYIU7)K#B9Z1-Mz$5rNzgjp7OFxU3c3_muOcZ-n&8&s?i zBGiKDhCChr;PDiKPZTG*_QV|lBGO~}oP0YXpnzL4gcmH(E4BLjWC=t}2zimnGSf7& zSl5~$Sp)|KduYrX!@)$E-;zgLZzj@`+laHRp~F%Fl_N3f`>8u;9sRt5tV)?vq|l(I zrbfs60lRVLM~vGeqJI*0r*4uw#WLSl3lQ(#sIYq2;vsDcg2I*SA5P(E8ENC_KgtZ@ zdqcJwH;upcb>9LwERkJzwwDGd$=*8AK7~BN-ewScp9Yi~e-;sBeWgR3!Twg2wyX;# znco1!Knb_8BTDeqZL0#Tu2_7J{QZ*aQ5ewWWvUt zZ50rLV~&}ldmH`Jh|Qc9m0_abPVRQ^)y@dkFnJ!LgtMfs+&OySp}CB2o)g$S9w1__ zU5#~3hpte?^FbF(V3J3|_#P^*D}_;#dVh^M`fRkT!bCdA?B?-wcVG@O^MKerZL^Vh zYaEI+ekDfRU4sEK%eO@AYIbLdav zxglxdn*Zj{-abBQFm|DSkZdNwY#xBeEIx!TGA%y)&w+k<%FF^;_|eyJ@k?>oHexrnhdAHs?S%n~4(l4pAg zwm0`zpJ6c?>b)tpU+QFUauWH55&|D%BeMwDEmb?*A;M0HUZ}fc$9#69wNLIzGN+Vd z_I6m=qa%8(?{SOAw&SJLr0H{Kq9Rgh!6D+9(}FQiC5QCQCg(CoNrbcRK)eS0NXaB# z`~=p=yt2f6lJo~|R<%9PMr#oaQnWTrU$6nROnOL-jq$$Xx&=qazt1sc3+kVo3WAjJ zy_FiF%lQpuGy^2GrVx*mf5}@^yP0Bnt`5g_Y(d*;@DfUea7qCr2IgT!oh2~?j#Pf5 zNF+2>K~l9@?x5Mpd?WznZf-XUIG1s4WLazx4>(EAjNayhkOW)z_YAQEuO4|M?BJ{( zj^{7%(m25H1=hc`{cFp+uZ~1|wLtgh3evQ-1c4zN%qB|aqQ4C!8H)s9Visb=YcptU z;~()Zu_ZFt3y2XD6vztpl>}odhHxd%daEuzaoq?C4rac5KmGMma$M*coX#D2vMKKc z1a2r^`Ub4;5Q+WX)B0s0>y%yNo+`R_CBl>QGTe=bmK34wV1^ppvE(x=S-v;AykcZc z7N0uKSI=@$62(?fv{QbQ$e*B|ImqAauJ>MMquB`cVa=AOePI^1Nc6?d6~t}Cc0rtj zWd_OnMy%wRhd!lDh#|BV3)y3^V=p8J6%w2#eNX11Q%`(o;}u1eSu)%f{m9wh z$QpWMtGqIM)l$-rYhdQJss&x^KB&~lS%)rNB11#?uo{J2QAo|n#20L?Swg|WwWinU z$x4U5Y%4M+Z?Dy_V|Ki*W0C{NlyvYvkUX`2EvwU@ns9RhG2+W;w9zy|!&4X8?hp;l z0sYVsty?bK$9f&-b4{JITE?-ra~6u|mkG$ZQQh>&W?Jbr$*ou&rbtRo6zZ>18~0Ic zG)|bgx7EE-Q(fGTmwXD}lKarP3@H@KB|bB7qZzvcxJA%xB3MQ zr*9$?axN-6l5Uaa2rl>WOS4nJPdj3C=KZBE1V@ix`Rtr@JQQhj@}ci=N3+1wJDKkA z8$sNha+#Fvf2GH>`a3SbNie&H*c+@D^^{l!49DPjTJ7y~;1Ar}b+_;BcUKWWXnW(c zb2bx1kWlyMt#>ShDjhGq(9&KbSv~LF#4$`EP@LfD$>8jaU!Jy82*{p|5s={W;?+7Ne&77|TsUIWv14x_6^ma%~S? zyD9;-)yxl9HBHfc-|z`BlJV@&8Xk(5xbLt+Dv+O58#RMje6AbyATnT)V#UkjNe3R2 zM^Ifs`#TC6^g`%X)qxk3a%pNr2q#hlp%G;KEigz=f;^po{+rFmDhHNnEh9VGvq?rQ zfGUYws8T$TDV88m6&McE$v1wIj3oqRmZS!YoBlQ#JrlsYRUCU?X4 z)QFwU$eoO_n_NIgvk;56`N9?cQ$QmN1$K7l*%Xx?;s56mcFW*q9 z)-q|rZT6THUgonN?*DYyX)>r>a@tC5*FQsmSq6w|(Hkj00Rt}D^**hpeqmV|$8=sJ z50+q4w}niqNzr43As3;FZr~`(!eL@K`K3n^DO)dZwGxoHz%JQ^d3oouT8^SHU05NZ4tK=zV@R#WG zB6AVD<3@3*aTEbBxKrc^U|kdv*8A~?V!0i|Hnn~j+^rI{VQS`n@qC$oVTOugb!Sty z_pd=<$gy#k5C7z&uXP}K9%(3EsYo;~W(jH8XqWh6o{qGoe#_65+b&ziVz*e?D8quq zY5j;(hD< zadG>k=cI9BxJ4vCs6H||58G%{5w-}JL?~Lw`8yGY$V1v26db z=fGJBBJ9%FCVy_0rI)SccBf8MI<5o-Th3QI7%YUo*; zO&6eDIotk?OER&K6ruifKu9B)GY3(-FNB!=j_Z zNtdKh{rrFC?zB5lCJEKkEdjse+f+Ij3@*BJ_SlbI@zqw{!3l;YPP_ih1$c=mK=Guq zrE!I%dV(rxg2Lm|&y^$z9M)H=_g3Y+TWunYmm0cR^uWvU zA33`0($YVK0tERAdKN`O-UH3+rm()D8?^?thBcwN0W4E1uA5AOr#rivrw_%elQ<{c zG;fG(Jgrzd6=C^U+znUDfOgzC*}u{80ZwW!Lg;<-QYz{}MM(GesK2kBrK$031k3I+ zq+<&qmcF_v8SsxMGD4L)w28gKN4H_#=q`INW2xE{sLUFLu+bKaz`b;5e6!06R^H4$ z@rmyN|A|DyOM7g?W2u87SK};K6GJnOe1pn|*JLs02gSK}@(7Wr1;El%3+20o4!7ol z{TlxL*@sl4-|Um;^1|3v&47tCT&*KTbMSStw)Bw{Is{RS-_`EPqPDtqCCGFlw#;Hl zk4Uf;gnjvvk{p;=EE{tvPY$x`r1m>EpVwAi=Nf)LI2?4J8 zuSLo%Ar{y5*aCtppeExDDnvzX(e)sr&DjcNj4S^7d!?KX+~CmuGfkvA{tY~z+;p5t zlQmGadkwbdbOa<#M%$WGo4S!;%*7GkEk++L>w0jS`~nG|32=hA?jT6Ra>L7C{>6H8 z!O#Ly3Y}Q+|LfcQ(q}feXTk=mCh>#RhN`WwH(lp)MGv4E+3kTrMbNsMQ>CXYs+{bc!#-iq6E2Z-&sCr&CLV!v}e!CdWCPeas`1 zNY$bfX!fmL=kokm!`HHEDejQo5v}sm|9bL1;BTJiruK~60|`zga5T@F!Vsm?M++N4 z$UOoM|H@;le;e=G)xAPu`JZ3yyIMs>?08TET1O&YbEly?f3}49Xh7Z8FVsZz4h*ow zm)OP^2xCdSUbCmLizk%Tt#f$jic3h~yiZO*Lh4R`=GnhGm;`$*0Y)o8+{?Aa63x;}mnPfXBcU@NR;g>~_cPeK{8AGCJ8y#IQmAk3~n;3r0-EDk?YewHq&L zPOz2Hsr&!igMlT2>dedRT?Tj5L?zHJ6pc5f5lPVu+siC**kiLYSI|-+^^6EpW`8O>(aFUu7L4-5m zRay}OdJxjvFepW8SlbI_+d@FPMu=4T92&^a;z`ZejCNe>=4x4c1S*+u43VgS8D6saJU;}jOKzJ$=s0<;^$_zjLqw<)o8A6uAw+*tV$erGMGA(=zj1z%C@G=`%yk8aA|WyH{m4&6R8?uDi9XyN(mp5(d@Kuua?-?o8{EY+L1V-WhROw}t2d=G_n*jr83hefL)qV$WcAV1E3I@DA=e z(yjI8^cvZroR|`H>V@WB4RF(T;;ckW<5$yN)oyS-J+?$K(edqs%d%6_I6>sxS2#`% zBO`JBA{Eg}3kdAbEt(S2Umvx8b?_hh1|E5`Rv=z#+aF|ida|?jM>bRBNZ~ks^Xrpm z4tlFu#yMW$*_E+k;u}%}-Y*^=0H7^eEV}=^d#QCFFFM#pd1l(1H9n zE@92&p|c`d)zEX3jX zw+WdW8!Z?MZq<6DiBX3;7vb=MTQzbh*AOK0HA^nAd}#Y zY@q?Z+6tGO5W16&&{o1G#{!M&$hGZ;(e2*vLnuI?kj_rIl8-L6&VDd%V#`QewHyt2 zR@*-RHC@q!c09VTdbxAokIqn3mUCm}{^c9~w|OTZK?vA*%TF~I5H`X>iAyK5+wa5< zcOsKBv{_Zr>_Qq>z!!KL;QbB9hlijD@B$f^6>=VAa6ijY=;CctP( zSY5@aGX__z)UKqn*1;RDu?zvz)bMscaTLJK*AgXvMKowHke$5#%#Y@nxcdmmmEwM) zzEP=yHu{^j?P3$B+GGPoC~Gh&Vq+L9_5}OABTTU3)lRD`uDr!c5t;cMT_vdg3U?v* z-J-!IkJWlQR{>{;s5?!r*Y3Wv-nGW&86=P5yR?IqLM%S_es5CMTy_p+3d02%Htip0 zUlQpybv+(kUt`=*;v*&ol-Ky_>jflomf5f#Vaz4hts5rn?@MH9!qK&&xh(ZEVwEG( zz6gB{%>DI-e-|qcD$6NZ_(iwpfys+Up8Tk&UCgDV8%q;*{Wb>s6S z_zAkjJE5Y&1^zcGAPa`9Cp-@uZ6iS}Zu$83O$aV0u4eq{0crd2bc%A9^l&o6DcOm z{6F_2Kf#+~Tz>KKqt^0&dph9_=m_F_Ajn$inh>mvPUJ1{OGE~^+(R92mK}QPJ5#}8`?ZoCG3;oK z=eaaHICH{;M)+|owds%adtLpn{oKHchInZUDn`P8hHM;@&=ay8i_4}XId6hpQCDHg zj?UDc5TgBO+H^ZfqT0(RsQSOYEB!aINDKEoIraZBW2R)r{vW;AksY0)yc6DXieWRU z#t@3JHFB3#8bPF$1tjsMgkgydR+O|6?T@#In%N6F(3tZEm;|Zi-v$a!_`tPiR)Y^m z`eW=90n%W92`5c>#$t$Ft+ z0&7UM{~=uh>vVpDS>m6z!%)RVjzoNfJVlD6@m~pp2?f}Fx$3q3@QD1-4%xNnLa|~; zdG>PC<4;%5{P4slC>*9&Km0p+jPgZ9wm>jwJf>ta3F1Y)ae^dt#D&qt)KmLkZAE9Q;Vgwx>*)yCGJ4jqJ*%Pk{6kMZIVtz0r961CEM|Bd(Lu! zf_u3G$=Y_S>in*@TYl_8eP85WDIs~d{&~~%yhnSNbIGSEkEcy=zA^K`=$$=Rj@9%Y ze5Mdpov0hh6S6)(z3bnihtAFb_8;p3*23&DaOLoKWmGtrN#K!pIcG`BSQpGvLG$`I zf`((hRGl2q;=UYyQhi%S>Y9_W3%_2tY4PDidV_^tPv!STvNMJN*&fndao0R31IbIj z(EXpTdnc`x;I9eGvUxe;P3Z|=DSIUa)Kf8Oewsd06;cN#i?IP$( z&Yq$CgPQ*9Cn}nMVi(*CJ)5SdCw^`PDrQ8ZE#*e>y)awBSUU}Soe;(4xcN=1%2vby z^3?`7DSG~w5r@A>-QjSAA_*k^qgJSE+sWX zsmczoCl6q;l@BZUT^+1F7esJ5tHu4KLQg1Ky)NGXp+@xyudKW0IY%$EBZ!%A$d-f#lq4v3%q~Gjzhb)52Vbi#cCY6s zxt!a%pr2&BXzyB0PHV%l2zzV!s}Rjr_U=1y^j>swM~zA~kd;IFCCf|Z3Xo2qK@??5 z!=jTKfMhO|eG}SyM7y-3A2Y_&{uU&MWq%HC8h9RtETG|)Gm6}XE>cRlrHgMCl9ZYB z0~n$4(Xd;|gU=cpMI9P<*oB63-`odQ zL|j76HkJ4O4rmVE{Z_=eBWo2ywWes*JT|woQ+n=``{-K{6*c7)$`kD^E{@Xa0Ze+CP z=A=Ghq|y~qF4<|*&tZQ8JkrXy0A{iV=jEjtm$v=pz%PGU{Qk*Z(l|0$IdaI3^y$Yg zy{WTUKXgyn?)<>1-3nVy6F#w=s^=={tKlxe<&s*`o|QipRGn5GlMc^}cRUbNckg6> zLs{0KOoL1vIp88tC+1VPLU!D@w%@NSphJ!ivJaaJTg?5b5Uy|m!ZCU-V%;fac}`!_q5kIVL%fRa;T5pt*D zCcSg)^c@LbX`hv+lY9Mi*eo(u100FnYH4r2wp!+W*}~e;-bStRkvj`z{|facn9QH; zBTprRQf7r)-uZ!*Wnh;-P>N#y4O!0+3$r^ejy2(_g4TPp*M=J;9Iy@h)b!`w@8q_- zKTC71`rA=NI;1I5c~bb4xx~HisNxz&MMu=ce{!x)>fR9=UMe5X<^$+?b8u3KMWx)c z9oy?H-M@b@R^nOnH9sVPL#Nz%&%1p{z*A;r__t;+YYi%1Dw`G3kzi!Mu41ir3G#fS z3rPdj&)OdW7)vpH*REU%2ig06ebKb0Z8MHOygRF?{o%g;^*ZL&J6~Gf`H7Vz z+<(y_Td%fqcY3#aQk{27_YEDE0psE|Ad(pJIsR8!1N|UPaaq-nfZyPNXr{P0?>hf0 zFn_Sks7=^5(td%gf8zG!kB2YT^*d@jh+ZghYZre~uV47AkgxrY;Az#mrs~#Pet(rJ z)=83T+3z=>8-q@vuE`<)SzUAb-RRh#3cFH1oOadkI)9)vEF48czkr9@<&*A8oUgh1 zxW`T~wr8*u%jF?!Z1<5%6ByN$Booo3`rL{_=IMce`EN z4j*0k)gcE(DgAe`0|*Ze_inlbp2(VRg~GURdsMfP*B&_iiS-4=ADlwaCLeNbC+nv{ zcL~;$yxtKqR3Q?3$^KdNx+XQ@!`}UsJ9QC@?hxMRTKeg1aS;Epesh13qsH&f>i-xy z`3=3o4O8E)bm~JP$TD5hYJZz_c=Sb+*G;P{j%uekOJ582ul&?=V<5!B>yF`V%lyXc zMu~}$a+TR((YSpV2)4$L+FOK=EtoLxeY*&AT(Z&kn>7Y4;rV zudMO;i(o5Rd1C&mx&}dFGhQatPlT&vf|20eJ}bpqo1o36Sh}||04A*CqgJ4I8a__h za%)l^AxbaI_c@y0*_{)tp}GBT;jz6}vj+T{QdU6}vZEv8Xh`L;Eg!`|p^8tmEdG4b z_2&CXw73&rHOT%_-_~MFPzpE>b%k(3Ka0;x z<*Kp4dz1wqB1AjpxvIX>+m;u+iVQ*t_$s1H2S@!=Z zTPSgTms7i8+tk*wT-vdc;EtR1GkBMN>1LtCVsv@t`;K_}kGGkV0Y^g-V(_xIK$+)l zf$N^PU5m^jo#lbyWY@B)XuB4cMRcl;7Y2h?Q)5FKk-s5Bt7toHYw_C^Xyd1D?zBb$ z9c8~SXePCn&;MW}m;d{i{s|B}D!+XNN}mPN&4=#N>dv+K#>W!CG|VDRT!G>2A?4q* z_6dl7b`x0HUbxLIV_V|xGE?!&ILKE*q`PCD`p z{oT*oe%st%+5@Ih8)?%jm%MaZGUU`ox$vOpLm-G&v$NHurGV#U(G!)o9-Zx+mZ3V% zA4wo7^PJ#>*e5@BbJrOS%!{^0Z>Co7evPJZ=f^7_)}x4Fq|y-9laXNkOwuOhyK(GY z^wMVyx|U@*+pMogZ8SdYS1vg|C)u%Ep$=PXZt&@Ti3I#3`q^plzMn$9ZdG9Ts<1y0 zc=GeIc5V97i1aGVMW^Pcpw7UF%J1vKmX=HUorZ|R^Sn7o1 z)rE?lw{UuOgTo)xQl7OPK6m&xbfHu~U@wm=TQ>nDq^SwSs2BY_x50J-qE%qZ6*Vib zztF%Lb?JuAl2lvkO`?{<7QtRObS`gE>6+aiW%EJcI8Wo#u0M(gsqW)>;kd12`4De}Jw;KeR{NX24%^*Np4|)odAVip z^Ea#sG5WFPk(m{*sh`l1DXb};KX{_|K-+;_Pd)cQVGXyQjmxE2`kSzfIF|yni}-7y z4C{`(TV&>XN81n}<_-4;U30B#wAK_63|++ffDR@83-``dtXr0N)gi!0CssI4V?{sF zb-(j*hK>EvwtbeC?@~V`M7nYs)Fk&1o|7oN&SaAc9Ahfj7El=6#R< zJDye52iVi=WWvAc(*FX!RtBL4df`Rvzv|f&j0r>I1u?$HzK?q+%;$fAGU^4w4Elfd c(FRq&N=Hh}GFSM)hW~9-)83r0(d7L90T|SF_5c6? literal 0 HcmV?d00001 diff --git a/doc/design/ops/images/rnn_2level_data.dot b/doc/design/ops/images/rnn_2level_data.dot new file mode 100644 index 0000000000..1d85ae2617 --- /dev/null +++ b/doc/design/ops/images/rnn_2level_data.dot @@ -0,0 +1,75 @@ +digraph G { + chapter [label="chapter"] + + subgraph cluster0 { + label = "paragraph 0" + + top_rnn0[label="top rnn step 0" shape=box] + + p0 [label="paragraph 0"] + p1 [label="paragraph 1"] + } + + subgraph cluster1{ + label = "paragraph 1" + + top_rnn1[label="top rnn step 1" shape=box] + + p2 [label="paragraph 0"] + p3 [label="paragraph 1"] + } + + subgraph cluster_p0 { + label = "sentence 0" + + low_rnn0 [label="low rnn step 0" shape=box] + s00 [label="sentence 0"] + s01 [label="sentence 1"] + + low_rnn0 -> s00 + low_rnn0 -> s01 + } + + subgraph cluster_p1 { + label = "sentence 1" + low_rnn1 [label="low rnn step 1" shape=box] + s10 [label="sentence 0"] + s11 [label="sentence 1"] + low_rnn1 -> s10 + low_rnn1 -> s11 + } + + subgraph cluster_p2 { + label = "sentence 1" + low_rnn2 [label="low rnn step 0" shape=box] + s20 [label="sentence 0"] + s21 [label="sentence 1"] + low_rnn2 -> s20 + low_rnn2 -> s21 + } + + subgraph cluster_p3 { + label = "sentence 1" + low_rnn3 [label="low rnn step 1" shape=box] + s30 [label="sentence 0"] + s31 [label="sentence 1"] + low_rnn3 -> s30 + low_rnn3 -> s31 + } + + + chapter -> top_rnn0 + chapter -> top_rnn1 + + top_rnn0 -> p0 + top_rnn0 -> p1 + top_rnn1 -> p2 + top_rnn1 -> p3 + + + p0 -> low_rnn0 + p1 -> low_rnn1 + p2 -> low_rnn2 + p3 -> low_rnn3 + +} diff --git a/doc/design/ops/images/rnn_2level_data.png b/doc/design/ops/images/rnn_2level_data.png new file mode 100644 index 0000000000000000000000000000000000000000..4be81b2430717a6a506342a09fc26899568574c6 GIT binary patch literal 68929 zcmdqJWk8kP)-?ZiWsZXU17pEda2HY7emIx zFA~p)i`;#amW1W!^E|Bv58 zL1o1u^jV`}(Y*G*e+hY`5JI$npXFOZ+F|C(s#8L3(w^{kZK}PwHJ0m7-$(7XJwE3} z$)8`Jx89TrrJWSiEHDbaW2q$-$r4X1pTNJ!EQ0-??@M!$8in*K?v?YWkI{}}%4WH> zI9lMmq{rj3oV4b&y==&1Klw6_&slD9tn$jQFnZaT9`v8LjT)#23rz^@n-hhqjap+k zzq{H~|9e>BdGw$8IOg*xJY=VjiN$KOw`n<;XXu};^-21()u_2uxzoahQA!~%kH+xp zVb~#$nj>V_(?r-88jZJ%?}yR9G8L9}uNbTxy+AKx>h`sC^F_JaM&`_|^vcQk;gw(Y ze>&cY(buGJkqvZ*^E#Se*D1-ncwOo43sP1VVkVW4ky85rR^Il1kA_}BEsWBJ`@!h> z$)>!cvsoGw(5!U(bVDOgIO3)@y+vR4o4Nkn=J&yZ=6UI)9AYH2c;Auos1Ojqh{`$?H-PKyX~ZX02{KTUKuwTeO}TVwA> z+|b-lj=PBVZ_`vz(QJH&?82{{yy8Kh-m|QT&5d^&B7{0<=o5Zf0vV^TxF}5Y zWGJups_Qp4yt(!VzD=5h27E%RhZQCNt8rt;sWyt8U%^5zYm>!;QWV~ac74?5B68^w zzbbjU;3#N|u;TXZ&nr(BiTVk8QT?z0?VwiS`M~w>E8c{M@BI0|-B%O`BSu1ezb)RJWY)-l@eCnrt`2FST!Pa=Qvb+Fs0ZC`ZcBT7P%f8LfH5UCU zk2Li-%<2DHF++@LRMI|wlanEwZZZP3axXK-073`n%L7t5D^&*fG} zq)3H58xBfe#4^n-e#@*b{fdk;#cg9YIf3>+iOm~BL+DT+m-6c06et{sHB#nypX|=#t&J_av@%q{~xDg)_c!m>*E=N|(1LDGGextS9>KsYKyD zBstjMo^GSI!F0zv<9we|`>6{ag^K(hxmooI+_s`xMW%)%CX5P6J^}5*PNxwmY=E7I zV{Y(?H1%Yid^NBhkCv%Qzh$C3gLy7wakRXih5_F*q$ZxvIpx8(7j_FnoXD=v5N{tJdx_xP-*Jadp$P)+O~vD0%o4B3-~4V2+tXfQBse`S6GW&>N9%K8<~A2j z_mbM&*RbxH6l6wE5|8e#Y3ZzX{xz8%jJ4&gU*BJ?*}Tqg&Vhtzd}(%j<=4Z=^Ai7# z2I>uA6c(Q=!O17D__(8`%f(;NT`7`{y|YL$QCoR|R&t_tB)QGJF_bPDmwH=S8VX!b zuD(7bp5OA{l5d=DfsZ_f21alCNZjek`6()VMwtvNORus_U9Q@&4wF51k&A4%ERUV7DDx}kLUNn*3PN&ZEGB&u5t zhEbuS>B%YwzV^xSiec3klzKxW?tpJ^F+Nu5-aJ28mJ@~fTp9u)t<+BF>YG)T!KvX;ciyNMxS_xK)vRQ)%s-q=$XEG`)7XF z)hJ6yO&nW8_HtpH=2?pGpLrZ?^iP&LWmUthp&J&&sqic%|~23`gHFd);sYs{Tj zhRbtzz0Ff#L)iaGOcBMu{4dN1coX-VT6){x)W9rS`_OIYa%M-Q9Vj81(&t}N2V(fl5{J7y~LBQQQwN!l+Fv=fUqxZPX{6mgT zCUa<68f<>lyTqJf8u#ov3C`^_T=oIxC*}t2@kK5x53VaFKQU^F>YUH1k~Z%&Tv45JCt0hJ z5<|W2+p-D#nhqZ(0Ugrbwi*%T&!FPF6+-!Wp3Gb7~>{z z{l{GnnE*EHYd^C)Dabzc=nkmQBK5sA@4Y5Bd-lqW>DN@Ew_er3rZy3`MQ8F;9*I)B zKQh*L1OhO7)pB&MJ1>sB0lcfv5p~D*=UpO$?gVq*bq?#Ximoz83nG{9vBzatb<02M zWS2ie)--$;e7IG8#F-VfIoo@G{ZpdXt`x-N&+OMZp_!;6ty955hanP_u6F?;GQjh* z?<|a_W#zyb(S*$U$j>}yt#WXx++`*Cy1#|qZVY~H??ItaQjwno34vucXVy((~SMhMG9WkLUWYoO-d= z5UNyL)8GW2P@jYgmpDvAW>ouNzu8r~m3Q3usNz}W!dMar8D{`~(@zXSzROU_P)fQO zYxge{^nkfe5v$!k1%2KZJDy(hI@~dd7V@r&KHEywe5pz%0H5N5?z~9YCga|Z0IJ_W zNq;!6WlfbuJ!w;nysk{oo*So6{QAqkoNr5ALvCa1({}xo$5>%}52j)TIveSG=4_Mu zQYC5eQ`ee7&H>{spF-ze!?g3QN5<8Zkz49HI&XL#XB(j4wf@Aly|vECGS^;uO89&O z(WB0=&=7w<1(FK5fM5<|4>+ts3}*v%pB3YUCB z58Nke?_ZvzstH^;yj2R5HpB3-Q zop6L}04t=0uZXH-_u`atV!7!1@Jv)#w_HAfVm0y@05ND_kKe)r-_KA=MYyz=meSG9 zh`JvkP(7R3+xf3y;F5%Yvgp6yu{J)LY;^KH2qlozhLzBLn%cav!dY~+5CjM4Gy?L& zeK_iWb~ok@6}g32zX5uanUHM-yiHeV(w;{8F0uIXx0B@STLN24%C#6rd~!VYB`5Oh zDcNh^TUQ^b!Fjlbfpd{fOzD#|>5P{zE-6dYH|7@?uhW!Ge|d&?B?VAoD7A>+p0hFT zY2x+qA@unr`%lK$uNI!``}hc?h1L1OcIiUn)|9)?FWj~sgvdDYBu$z+25jVxBgu35F0h-{fEy2VilLVybm)x{l zo}Gim_o(UOWg)K$av?9B3k-6PNz(Jqgor3AGWnhyV61Hoz!id+Qx2!Wpn=9^J;s51 z4zPqog3C{AO@Nmz@qFq9ChaVv<<5~U(d_pjFQi(JR~g^(D|cSPRKZ}?uet$%CA?qX zd$dPc(}km;e04l)HsFoHTvk&s8g&rMR-mFcnLj%Ri6bg zKD$6I=DADM)>S)cUxSa9mLj>VM%d~&ypIl?J14?7FWry6oyxz_qrAG_Er0(h4snXh z%1`6iYL!LEumw&FT1e?MJ#Yy8h|kq)2j^&f#G#F^-(j-lvg=g5v(c9)*j**I4|is> za%sh$oy#d-zI2+&g{g61jTI-BFPv^%S`BcRREc29v@Lb`YLC7Ko;ODI`@rF_J*n?~ zKnIJ@?t8Pc-$$pyWrY^h9OG04-J2ZVhx=(;ahpkNYJ~SmgSg$+KalggDBzQGQ}a2^ zvsDfhF$}}Ok|9)VP8RuMlHmG%q+FR~VW+>!)4kMTCN!EyAVvy^FU3xmsoWp4V4WqaJ1CE{&zHx+r}8q zEbOl{UohPuIm82Tz86X_Yd)!eKa}=)seA~vujW(LfUjd^j?E)EdX-Fo6&%M7cc!R# zY=1t%-X9b*R8EmJ^z#B0;(Vpqgsgi>Dv-{58(sFBi=)Hqe%%wJT;m?spi;k3)FXd# z8dgtUprDp0n3>IV!Wz$Bf}q z3EtrSs(i+bOt^awq{tu&)50ZA({x;#=~nzEI{Ac=a7BM7r)3sUCerHTH}kqd1jKo3+_pn(&yAvh<+LijPd!}RoY=leg9CEl*j zbjQ}KW~w}wjks!`jfo+2u&%r_{XUo$poMXR735OUv_qAPr%_^~J~6KOWK_WK;54Q9 zQlIZo6?DAx2)Jd-h%ul@Qa|fOuAEI5C^}nrM~jLLV0}_x{gdldq)rYRWV9my&3pZR z5~oLx28}mB3lV7IO#5^7Nh#z&IW_3y8I3@aKDH}vMVTW4G(t+d{WVl>9_6wVLi*PV;(=@pksAZiG? z>c79g){f|anx7v>Yx+8ll-MTlq4N9-UsOg|dz34$XiiXs@H)B|9A($lMg~~}Nq>>G z$yRpeC-+!MH($RNhGhL{sx5xCi8)s?OYI|~=@|F+Pyep1I$bs4JS<=u5fLerC$HEf z)&NgQwf*(ID@o)GK#kDLlo@q2QdQ?AAyJ&864XJkBZFLAh#v-SjH>d|Smj(SnhnXn zOe7C$w<9H{aPH)ldr+DV{>7Pzh%7e1Ih zOXWcg64sse(Ka87e{cbX6w1p4p5)3Y4#}j>L99j_ zv%QBJ98Ji9+g3?JLQ5d!L9Eqy)lEdAJUrODU*)kQLr`*oPWtN|?blm=K8HIytz6>` zfCia?e7{9Bh2Bh60XzDfTuMV)B6LD5G*o$AeSaT6Ch7@Nv~{1gAmb#$cZ^B3Txd4$ z23C76qoV6zc5!GuE&Nd5Tc7J zBgpjwl5f;N3bT4nVB7UHqJudaT!g=atipar(=M9;1O72>cbaS% zQa?d`Fq@pea_4cBCJ*PGZxg#@R|H;*qxc#S1}RFafu>#rY=xlWb*$ z4R0lZbCJ=2M7;xp6y&eF%dIzgj)pTpc1q*Y0fFE*=*e#p^bJVrAU7!->ogk#zJo+} z>5J{9lalfERZTRz38YziddamOW%%$T+0(8L^{1= zHh^W;26!6&^Onp|g>32CT~7mO2o2G7Gz|>@G`+oT(RU*n$}9iw^!v`XfHV8*Nup12 z3LZ6+%nW_X(xg)j2(udg-0F=h(|DSmP`Mz&OMSnxoaUKgw6pl+Y(yK7Dz#$nSTaGQ z2O1xX-q^fXmUr)}QaMuITYGT>i%e#-QahC9Au7 zN_jM}p;03744;At0hwV8@=cklS$6~I(m?u9+p)7Tz?vraMj8tsn0&cWqzIU7XRTf6 z%=_i=3#W@|gYW{s+Ea+LOoh5|pi<^wHZxjYY`VnYsRyFi(RFINmFL3y+bgLL>t7qc zdFHXR+E~~w$ZR?M`7Hp-?ce)x=%;VE`~(G+Tb?oWEV4U3$`i1INUECY&5E?wcAy-o za8;gcPhvyb_a8u0 z2d!8%eB>7zqBm*os7;|f9x2J+dCBcGA64Hj_6%z~R`_vFt`@1TNSSuARVef#q6Iz6 zs#Swmk)kJpRKV%jC>#XiR}q^ICnjbj- zYs{gO1^w*T=o;uRrHTiUC{?;SUe~KAxokC%qs#Z4lvOY8?Q^2N+!_S3lBMVt7(JgK ztJF^LSe4po_dXQM8}p8R8R{xboz}0u*8^-W6qt7N#$5k`?8tExZ7cYo#1Z845X0sW zGNF~38#;7DAh#IJkvZgtO}-b2&aFPw0#IUZJt^353=esW`vIKGE=`?$79 zqoW^QAh954w_@e{mD?t5JaR0f!4DH?C0N2O!$u zr4tSLf>|)VJNL^?AA8xd-BiqR_@V@z?@UrA&UcJmih*95)nT;F*zO?hYd83N1$0m9?7cjq*2Qsz&YP! zh*3uleJ^LAti6V^E%Q(KL8q;YVQ)6vV6@lp<+hJY7~lOfY`8&a`;vmM@3#rYFuB@9 zonngQdF85)+Qs*xkD!`Svop|IR~m!Ouz$6Yd9!^c2Cd zLbaTN^nvb7mqCGsN>7cdruQO9xC4PMF=-XKG$5QiYOJa8WS2I_soBhA3+}@LwG-woc?}<8;Y>B$X&wS*P`` z!xlQ}VZmM^d2R{ZhlhLCd*QDEFGR)Ki6x5ttd6?>Ap|zlzc;0@4)F*kI>17#;Ce0*aRFlsDmJ;{MFlXI4+g=LY;L&RDYRws$ zfL!wex02|q@7?hE26wb8=UV9Fz3+GXulARjF##Sa?e{La!8?CH08_JaCvMoWe zOKm1^0|eg&pzAF(c$;inK3V55oQ`8c+#xqtPMGqTb9~JJn85vK>i2P~>myXkcMFWJ ziR#mwdeU9mnSGV|!G<7FJe90E>Zh>Y=2rfqsmJ!CIjFe=Li-BYS3QiqxIJ4p2AbY7 z2XWH&-Y`wdo$;_HdkGLZMv$|yZM@2P^e}0E!Bcl>K{&See!HW6R6SXEk-wRt-u4lL zJpalQSKTQ~JK4TG?KKs39$JhvpB$A8`#sIN>_?z&CI_)}B=`;I$ZKTx?f!5L1!WA!nzMTM}dx=zn9s`d*&yjWt5)7pU-8Hpa$VhKL0$N&m&EVlhv^grg z!`YwL+6DKrXU=C<3ON2Cu+T6tP<_&cStLAEWcDp)Pl@D)26Z2y=~~@4AbO5u5k5Jj zPmdB&`qjs6)G6eE?pUtSZpOWe=&#fwzOpt!i`jmP3i*>62 zV_#gTvCAyd8U@1$-eJ>OTfEXLOxs^doIp;f8$dH5HNum2BowX@LeEyEY|UdR=ioi7 z`0WL;>_m%JbkAeJ2?aW34vpvPyGwr*6^B%%iLt=4n{Vd3dk^kRJ5dOx+FzNm;7yV5 z2EFoYjL`@`C68T-26JePa6G<%fZgxF#_o#kqoaYHL;?3c$3mVZI5cNITaPdDFGAia z0J7aV=Hb-k@rfm;PU>T?W^0E=JnoI;l-hF80h9s%&CW+B??DqsxoU5b4_ip*PBQ-h z$rjp>f4*L&+bVPlgD|}vx2bB2ORPzymxWLdIv>Iro@Bo-bk^~;4Bq}6!IhL6LICau zb6FP6v*TnjH031ai_H&@`56H&`sC30a5CQ-QUl_i8tb|S^JPeKvAz5s4MN{Tvj?A< z@JAln1$^d#N&Xbaxjv`@F=Fasy>Asi`b29IcaddIX_PyCd=Cq3?ARbd<=L|W0t#=o za14(V@FC-;%DRR{fcx>8ON`Y>WWhf4D}c(2ZBK*f6<_xA%+{4Yx;j_ujYAuaTF&~Y zTMxl4~bCz97d2Y4u5f1fW*-#b(0|T|^m)sHqOh$| zgX0m!KFfAf$p*W~dVK8MQO}sk&=p3<*&aX@nR`qb0;AamPwmS$Nea;{+NRzJbQq?C zT*1X0J1e+*E2okR5gPG3U0)ZeYk@r1Qej_4?Z3I+p=Zl`zAC4IO!WnmcYM8EKr#L$ zHetm30;)+UkZM;CIo5a?E%)siKSJ6gQc&{(G_lnKaJQyA#T;IXIgd4Ku~Q^xCi<&& zd_7{8#y%rTUDmH2zt@E`-vFSnh5YH6v-s!M2FVHE;=d8K^Td1eEG>re{KW3;`FpoT zYTiTDnNpOyjR@6WaHg9Q-q7_=#4i4_su2x%pd~O8H#keB;j}D!hWe)VXO22KN5ixI zr9BRgD$ZzCihq{)Gd6_P6PgnQ%ID~V1^NnzJk~Cy_uuYH%5O9IP!|jWF3av=H#IA^ zukeMA_I&7T2K5fBA&w<>;}*8My@k>{XEIpcsi@`TzUvw4LF92PeAG!F+}BPiBv{lF z5@IBWycrvi?S#%Q+d08GrYbIil3zrw%xtsltA)o1WpKO@GxK8%% zUlTqg8dV;p(L&8+G&iBem7!4=iCu-^5TxW{F{ROJD&0;nWWOUb zTxmaq_!4>o&Ftj*g@vRRB~*(X_UM z+R#5?p=^F}8^jOX{W~d`$NNGS(xqpzc{9h)TKZ?>MC@exTFc-@!4h^=hRkw0Y)jy$ z*&Gh@9!c|_4~EZXG_&?Fy}xA`O5#rI#3H^hjjZ=gp!T*9T2M6QDxVASV1-k+H5qpv z;ixr*P(=CXq^$l@bM%)r&<%KhBWrV#oYOq~^0Q*kJtxf)n|GJp_8YDR-*qKtr6H6- z0|hn|nnO`nw-KI>C@>&%AJs4kP9OE3VK+E?oG2l_m|%lNdgD|u|I*;B41&lmkR?U`BS+y#(qS=fuftE!&CSm6JPli zZe0_Q7R?wo#x2zGu9(;T(MbUr@ zky5ZsmA!b+Qe=a0#ygfV(-o?ql!k#HCm;kzRh-%h4Qkt4aC5VkSWSpl#(R_O zbX9&aVmp+WL02T2O;DOn?=JNNUx?ENXi|JxXK@@=4oFVmb(GfgSowx^#vx~K^|bTi zrwp32&1dFMf9)o$>NUyrxh4AUg-Ss zftOS#`R;R>3GFk(Ia2DBeZ7Iub-UrZ`rFp?dHrb+euW_PE3wTWI}Z41oNS#^r7_nj z3I<`IVDW3^j~YY8!O}n@FLc0x%9Tz?Ko}T}_-+s^J(>Lk#rFjVZ1n{+ka7+!k|#ggvYJJuQgg+{#N z!}~jH;!}>t7HQY@P6;~jcuhudm?o2EdIRzDLIt=dmndjCY1H%X2UN;P@KJr6B`2TfL0#aCgR|5ANK)?a zB9s7Swy&MIdb|QH7)}dAbl&?b0X?8VO-9?KNT6>njkAKUoTK(LFt8W+TpLD1&`F;k zE)GVCV^C~tUP9-PJOGv<3D~ZcZ6S(8!M)Oqm8bGg!<#yz_W~0f)L?mJke5cR8ZiR( zuEG=WoCP*NL<_O-P`L?wLNSg%`UzUXK@k5J<-FsxO??r4{eGI)^SIlOq#0GXQsxE< zP2{7wto0}2Jho^}&Xj_mOkd0YnrP2^DAsrTn(-w-`ZnEE$t-cy}0WyAl++oI|t z(*UV=rvACf{_9tn z3OQ8D7D}Xl_a&K-+jF<*cYC7#YjPeM(5@oJz4l~h`*Y<9S#a19zwi-hlz{tZq^Kvj zFY=Un9hAW<7mlqNPg0;zggka)dWCd-0+2?75md}W#VBs3r@7~f(zT&eacpS^(+x7H zaiZm3t#sRX@qWl&3*GO2jrvjIfvzPeCW6e#he=6^7LasS26=eS|E+Yl5dQ`d=j&r@ zQQ)TF!tUo6{j9J7Fy(?K9cL$KyA1KCc3;d8T zr*XGekL=g)PnBN-&&1E}Bro{SU2FKoHC;L$qLWV|l)U8sko&Jz@2i15&2N-jwVQcE zr$m9$b6pVx=7h^QJ75Bg_TU1qX>%lNBs4OmfN7^9Z9%HNl={~cVdLIMy1Wk4)F55I z1xnWl&VCllp`tfNjiH0(qt4KZ{~wYo*pXr#7KT(0_r{N!Kwe@(JRHzXy6-@F^*}WJ z(Mxa|R;xnuTw*;aRghIZC$0sUETWAe0xH$OnrML6EaCOa_GtF>jIanUCVxPHQDeSZ z{fJElFjU;ebE$)Luspx+PoT-kFsSvT-`=dJ@@@!Xt(xAw5DW!Y*7vMunr@Yc=K2vR z>E*u>*K4Agn{|5Beq!IlYkM-?{UbXSyl(5$XN2}Ra+e!_*hICD0V@f_TwVLq%mA5; zed=*XlJ8ChB_Ii-rf?}(c@zBLS$0_=ebSp+v;+d{AyCzaJusE#xNM31h0@4ln{ghQEbpGq$^ z{^>Po@4FpTH;z{-&*e_kCUTGU1YhWUiRa$u zL92}~?8Dit1#Jgkgf0nsl)Q&|fwoq)T>Sd4&xu;s0?%b9+=lFA$DN~7`jh|rZs_B- zLbna`!khFP{ks|hs&TGE1knF5S^bQ5W;5;VSeAPJgKv~2iF|z1?grp2Se$PX#44ei z$e5d}^4x1V!poA#(VluWeN@=X-e$OgZ||}FBfxMq@7XvXNCoo)rS|EAlwn8RZU#n) zW{X~WrE7zvX%l@!b7#*5a0aB*;bijY7kGWO5&TRdYNJuP7uP2g% zWTBQ6aoGYX$S1Tpkm(jTc3K*{2{tdhu!k`3BD17BcNVKqfc_|bqozTyd*qYUZi>kO;1*Z67E8yc@&<|!C9KtN?oIQVy zS4JfuM4Un3!y)f@IOecJPA1F%a2NKKW^fGF-krils>mhml0)x49y-pa1U_>nG4%07 zECuUH5GQd5n#SW25s_BTQ)^9QtZ@8BB}lFE-Lj&8tndn@;=k|piH_I7d$1dw<~j(n z6q{!JmDhi*Ny5~)9cNfVw1W$fR477i;H}v09xr4HomYB=-&&k|L6KFInq4)an->e8 zS>OA7Kkjiq>vynJaN>%a8hixKb=zfmg6&a0p2K@DslIO%*@iiWo30&UXM2U9*3jd| zJB)H3Kwo?EIx4q1G37Nvd({4;_RW!yzU39R%d_Mryy}mrz0%CYi_&9QBHr6SoZXKo z(|FCf-mNL2?jHp|F%BT0y5qV#JXKbh49jqgBFG?NC;NWOIYy8PM0wRogJRU2Cb|o9 z?nahl6+A7^*MkyRNC%0MK+2j->?gegIVVCWC=i+7}jnl`7k!tRVdISB!O^x zEB#Ei->vlRzol=;VP=wupj`LnE%;SYsq24GHt~vu@V$Ve<5LrNIx~S{-V(2z`gL~M zH7#(#YS`P}zN>fZ@36rCuwwaMH^}{-p?}-4BrHcTGb?^}S@OWngaJ=)MsPPq;EA+; zx&4$})DP{lb$vmGRx5qv8k5oZgoTYA4Y z7ial~PS?``rXMCBQ79S9DzNmSPfoht#pKNm#V76Md0R9Uhp$*jay{hOyPjg2$WXS? z&oed#nowA}E(4y%taKn5t%C_xtf$rDdEp5!-@rp5i{%Vxf+wSL_m|SBXKNTfQ<*)o z;1l0EDxxGEG>IusRA2Pi9Q;$bw6P}UO`Rtsk*S_*CD^Sa)L+C^@M+@Z{CM@a2YvZk zblT2bQ=d~;Z{8;fhi%Hb&AA^-?TJFxcV?!@j>=`1pJ(i=V-} zW889Sjm{F>SI8(&z8VduFFhWXCiCkTn0?vAa!avXvikj$hP=;9@UwbYlgVUW+%O9< z17QRf;ZE--3w8FyIF=9tYd{1^>r3O~=Q6Z_6VAK>=~ifFq_j{Y>r%|aDKJAofxRu3 zc8#&0pWQGaQHlUx;SF5%On z1bwr8#$37~5~P)#di5ved0&b;VPj6Uhh1h*+3Y$lnh0$N_(YYjEDVZ@QeQ$Z3W&k% zo`1_bnh~bwCl|+k6`KDpQKOD~h#eXw8wN=YN$@%-028qSCau&ISMawkGdBl1{^j=% zR{0|JATM75!2THlb2zzL-vPgH#ve%r*qa%2aen)J$jdL|zFjI8vlw&aPq{Te*zNJI z?_F)atd`ILc@OIXEjs^ID5zPRW6%SPjGoKZ$bVo(d^7W?9efxZLD|zsCc13jIlpZl z6^{$6rGx{1d7zRJW?8OW&tcNVu-!R@u_(-ON)<{y3F>eFT}RoqTY+k>_)El4<$tU# zZ`$wK-%Or}P2k%nz;kjP*^%jfw+k+lVuBxkH7ehv^9J}ONoLYah*JIJbq#n8TG&`9 zM2TNq@0p`|VJR2yuT9-^ByVxRIrD)1WCQ(Ffsf{WzOjMOTt5{9kTH|(&&(*~B15(? zetIE=^Hqt~JH4Yo)qxkn6F5(HkIdr5a$ssE4b`ztKK@#_V}5LnrKa?2M2GVc)WT|_Ex*g~=`g;xh zKYC`I3`BlL5mT_lhAi~AOe59c-~U_o1CGs_6iWA(P#qVC|NH@}BDc1m`+xt!zhC{| zBkceAE#z5mg3b^zaVi)7*9XGyvw|SAhQ_4%ZUF#iXR?emO+r9tN!VqGN*46^J($@=^$9nCHOCPDo% zheJSQwi>J>m;#7zcETQb-xWDmglXEh1W}(!_(M_9h(m4?u@O!n^_uZ*6~ig5Bx}6!|fGK5jUBE z{)Pd}YJkoOc-Z+@!7mq>(?jHAxMz^eJ?h->2o-<$d_Ix-CclF-K;(UdUvxnD3G zhz#?H-t{p*es~vcYOnq6E|5RFv$ROVJL%E0CEWfv<<-d849N9M=4;Xw1Xy$eBz7! zO4NJs}h;EFc_IGJ zQ`nW6cG6r|ND|)Gb^V)^eflxlX7z!eFpo_m`Jd?-|?gD$|##+u#9ttO8_(EN;! z*e_AgD9>v^`^+@h9~m5UQ0>+&cWQ)&$;6wA#*-5qg@gF6Z^NApgV1q?^DhoW`3AT; z3&{DLZlr12k^g-lE7*Jc*cX3&-pLoZJ+||Zt{;#ZdSprp6awj^!vi`{(-bMbTO5Bl zIJ1Rw6+u6U0Hv-8=9_5Y;snHuS9rTv9{0;%gRZDFR4qwPuD6Kao}bm`*LS>`{@g^6 z2*=l%kVZ{Q)GaY&PIV{j&9%Q@h4R7&nCsrj{{VML(4(Yx)3(HLHj9;pom_!o2od%4 zq;3gwpo5MN_gh@yb@%`+A9ZkYu)vJd%X|3qFaDOf5$tK43^JeLO7~(!bp$Di0jTgB z=&F3zeaFp?e59}fGTz%{oT>zW@o~mj4?4ba0S>cLm4_>Pz9UdsTHBvr#{_Pb{*{;} zFpj;lFgYboiq8wK5+p&mZO&hZue|YPu;JM9OMMafrVp}|k()87pP_*z@J?3!3=J-1 z$q1N4ZAMNE48q@Pc~kc{4wtEXVfqB=Pv3QS3MS)}UY%^UK(jwo7Cq@{e?XM2Pk$JZ z%Qq&(Py_0&BD&NL&GAJR(1&XI+6&X?%ZAsvk;DlBGDa$i%)oqe2UcOM^%SQ5C8106 z2pmddLOdqN1rtjP0!n1KNClbvUw&BwHdgU7mXMahVI(fd>*`F$JO63Yizn^8TBZ;| zPd6kLWb2mqWrqL*vPC?f(D?F`0Pk(sjVuUA|t4l`Zgb&XN@ zAn15}QN^+kfCvKW<0c5<4Ky;-Zc1zT5}%8#Mu)PkhO#x;J6S1D;FqW_HN?&8H&Xqm zr)29Z%JKq_(oDG>XH`CQD|jQv-S;eD2X_=gRwP8;QQQ3diruNd4yFY&q@UD!x4R5Y z2jnBMce_2uT^1ByQ*30^1-XvmSqh70!Pj*%Xdkuzfh^J46cGQn_b36!XS#@!6FEMM zL?w=M5$7)7{+ex7X7@)LaX`1ka>;`>>pWiduX~w?2E*un^Uw{>j@dojTe|<_%hN~j zKSSrA$6yZyEdm~U4BG!b!htx*@Edad1*Ds|key>$3EKs>bOOy4EKhP6eFzNE&Wu-A z&ySQU+fB9IgbG50*Fqua!EJom?&AM-fP{Y|{`(MQRPUqKrxZjYUfzTeS83!r1ff9n zzo9NnE$s^;Tp)`c2UvxX@ld2|01?Lk^KgNy+ zWc(eujR*#O=s}GXKYIek_-Pdijb({^x+y1l6EiV?BFTZ^kKCQ(v)~I8+AIp%ukDZx z>LiTfb4Y(u+hgL3517MP&8WBtYR&YEXPyiFeRR}&O zF5J!IiBPQD$l2*jQbE3e4RObgDblb6c_9LL47h3Bk*TOqHbc?=QJ0q|U{H^+0j!>g z2!~7rLN2%y#4%lw1=(TqG;`D>Lgv`9pHo152#`5|1ck%pBoZ%3ZZhq37idw<4FML@&|2{&A(hi{xM^$ACD{yJ)Afi8%^B$Sy`JKYkN`&R2MXgui(glg2P|F;E&L418+!6T6 z!S?s&3M>}Xh+5p7%@hbQLK-wg!H1S`V`=6LGCKur3WdNP!Np8u&aLo;?)C-b#;3k) zO(NsM9J@#ueqbARK^Wk6n9re0AsiGo@gW$LavcLc$Tkcakp-@U!YX{lkU(@i>_x$= z0_*nB&jvc;MHRcVS#Y};VdvYEnLA%(24ZO33Qj7QI~DfMy6x_##(r86EEL7!SigphX(rLE-ic$If0a#t<5DoE=C@ zbbw0Rc0?Ccd7z(Uq5Kof6VJVJI3Evm5Lgg;h&M&DCYGk{Zw^}sx1NcJGER?#g=e+TFYhY z(K5Qt1u%ytbM2Mp*!4papNiC^B7M`1un{ZVa8~`X^`2go*n`U?hwQTY-D4GHtc}iS zkiOy|`Q0z#avZPnOa#(L0toNryc)O>AVt_0jUJ^xOPv)U zb~;es{yLMZe9p(%77;Q#JzkLHQqZLnsLbjuGjAG#w@!ujv$K&G=r9{Sm5VPJf63PQEgWO60w+Xe&G*kd? z>_mynagct&=Zj2e?K}b)lbu%iGv`ShQUh)Dh21x23C_Nu^K4x_xH2gCcq_+8PaP7V z@#I{0HQu@e4_*tvzt(aF`oA0@80m^LGDIV*l}}m%cfzgD4=S(sFTy=VO(3!vt^RI= zD;p*UOyuzMNdjk^GdC~7;eM3eE`pQ>pImfgavizUZruT%QtXAhFQCB}ETOjyYEUl2 z1R9dC)8yjGwlLnKA3vZKa;;d<^LV5f4&a(9EI?sWH0ChtqV2?vM4DdoE8`Weg>Zw< z^`?g|oyBNFYr?~f$zPYd162l|p`abIBngqj1vWpztbR#6U1qY~)A62$YuJJ`F)G1B zfy^}nS$GkhK$96d^P-qH72~we0URSSAwUY zn{a$r96b7O=oKnKi=ogwQxHGV!S@w1QUkP>ad!*z?-#LxZuqxXd?qlEM|dF$s&mEG zn0wlHWOQU7Ox0~ei9+6bKn`hP?3z?}20FE$ZiDWL%q!SaQEn&JfRn>xJW_Sxdh%Kw zGC<%fOw?hRMpk31YbruD_xZ!KbWAM)|NM$F?hZKIMli2^+iTwi+Ka*a5PLsuxZ`?z zs|66II=S0TwF4J!{PB2o_IPuuh#>m-EZTtMTxHBN>!$B^uQ^nDzhzyq@@%BsSsuBV zWG!G7%uy}>Xqm$G<;*xJ2iq)r>4BJ&h|CRxoO_++QA8=sl zCkW7XC9DB3$QXP-q|@@}0Ly6vGOZ!31HHjksuuVWPWM_3Rg?VXWD=W$f=plTgMvEI z%ZjlpOprozYY+>tABPk6MgR@oWC)rBHI#?jU}Wl~I&%27j$EtbkBRqXeW0&e3}Fe- z62|Sm&thXPB?U;!w}u#u0VS3ajnKgk#(=)Hv7a^c*@zCl4RV_0-Bai4gk9xZ2(wIswSQV);Up&A}8As|ORSg{8A?hVUi zigyr*;rfv2FLA$3PC@q02Z$c-GG@v)thqNDSSLbZXYO6{D7>HLeTe`(%P^+?*!rg+ z3dTI^2Y^Ixk((L}@EC6+=Ob4bEEsVD6YICG+hE)aro0JvQ2|g; z8#|>DtLaEE9)fIej>5SIry2vLl}*PZ_B23vI$OiZJ~#E;EAE~cs~dP`jCcfBlloMa zu7qEMqzQ^Y(P^h$1_BH-wRiB@%0tU#YD!eok;a*Iq$*NO*C&yXiar%pXtQsu0FQ8s z-X0U;asCUGi--{9{;wjjF{$B#5Q~>dA|LZwOn~B0`J}A)J1(?ry?9llX=GLN3aiFs z^d8$_z!h4Nf_r-iB1k#05MVm?dGqjk2=muSMyZ72cMq0DDa*E@f4xn*xuL{-WMee| z#(?2nw(7~)J>}|~d%=OziQ0LT#CR-l^eo8jHJ@BE^nI07;HFlp@5u)lBy6{uic z`-xI}luA6AFZ6FG+h)0adnEv+ljKlS%oat zNUNnjzN~}0Q=X(^YySd?L&KBWlr0Y*eplo0Uz93eMK$4h_^ZNl;6p_^y4r!zjF?Km z#O8%sj@bKDZ9vVGv#JrKG7ANFqw#kE!F||}_T=wpHoqR-G6*U3=D)YUw_wCP%>POi zi0$(MVhe81{FVB~klP20xiYx=H8nXZkNHV)`LuaJKHEN>VoVzvs;Kc_lPoKzk-18P z#>QAfU=t&~_BaEsiW{4Wf_EWv@(IXDREb;?RN?y`gD%S%#6`tvSq!!T#{r4>f_{!5 z4PmS-zk02q10Sb$+M$RzVtLh6xRiU*zdWRRq2M^u_Z+=^)ii~4i--vt)b6pQ`b&ka zbW{~@NXG!gacKDzM0srEagnEe3nQqSoJKVR++6xqGfA~Ki(75?gPw28Z^-sO-V*B# zt^FQgyeeJ)e6<~lz;xT>QF`NWZ;0VQ@VqQ*Ky%x_i+;J~S)e4j;bUT+!plj(fa5OL zWW$K9uc>)~hIeZ&bBmg<-AFaO=#Gg|`4jnV>z!jyk^XD1j&*1#;2N5AXlS@;4v`AC zApp-}t0X#*kMP9~>l-r;7|SQREhw?XTd4zH6H#%2xJhq=qWSc%UP9E{rpQ3jr6T(I zG%~7>^X#wu#~_dQ00P|#-f>8|-c&xU%nQMT>+1qEWGu63PNF9TU8=t3UIh<-GAJ12 z!dqB8267_-Y?fKoY^s}#U0^?}8}7prH@KZo`o1PQoV4S%ktS7_tCz)OEUf>zRm}j4 zjFw#}*ysmPi-&O24vSbU5Pdb0M~Ba%4(Vgl*qgZ$ zaLcvVFywxGkI7rvg3Wo?O^R?*_S;Maq*EK+ZJ+@+?A0ORlsC9catm&qpXGFmx=PSr4(r3ccG4QM!+n#T7Zpop%>O%ts#3DEXSv5ry6uSJ-SDr z4&<#qhzJ(RAQtD)@|$jd+Po6P0*;fs#;%38VJ^8Y|w)`waIa_&5z zpTESoudHcI#Qcm6I$IGkQ$ddm6}Hyxca@Yyr!$Z&6QB#XWr|P;sANDQdMYZFy~OdJI@!2 zU!q@-r~o+5I>B;ph`%`3F@Sjl5Vf2CoqI>Gj6K7}^iGOi*`22J*^}VWMw;8= z6geSL)2DIJRds^=+OYRxwDICX>1fqjQJO8J-ekslRd2?l6|M z=x#w{r#!W{pzqlescMhxgim*#4ythwx<$zw7M_Xd0uMczCHGXS{x=d#g5*V_P-=K) zugkr1ZxMtA+A#?V#a~I??tf0^{07~Q)dJmGRGVzbL$SI3< z$Hu4E_inK4*i^eDVm>bJUkGq@`|=L>@QjID&Ey~HL;v_oMvr6ao1<)!qF;y&nZp~M zz~0)3SafG?1s&&s4K!&_&SBdrY>Djx)rO&I08u^TItWj)QuOSCHZLP6L`Ju;pEF7M zHF+K{kPS#Uw4tloUT&TDhZv+n;E!?U8RUAeV*OnnK~;Uw|4BC$*ei$zG0fhmeURtp zu@?ez?0h9DX_IoLY2Wr%glNw?7ycAY@T2gz3SBIDrb%*6rK}t`cBl1{cUgJsVCLX4 z+%V?-qwhah0ogn{lP9RPpm?Lf^hJ+Gu~IxjnnP-mbn7`453wub>SJ1CI%9fc24hA6 zT&~unZcwnN#sQ`a)Prl(i1P3E^p%0T-c@ebA$=@Jc~IQsbkOA+R|eGs+7|)?8~V$) zJ{*8i?!38i5O~X{0V{4kdwfm}fAd&<^PcCxc?+?!a@xv@z7CX)T@Yo@Ja)|yu&Cg1 z*%MTd;0x&%o#+YQuZGK)3zEv*MP-*aL8Dsv>srT=GDELSe{H|aKbjVTEJL@&+*Kd)HyDVL2N^pmCJVG(GlTZ?0Fh0D(R3YhUNUy~q!M1@F6K-{ zMW{Z9+US>2Xf?o|vb+2G!nnBC^E9({-70jRK(>R}?d{|^Ym)-l-kP&>V zP^oRbtu*D>MprLaQ`HF97}j{!Jgg}_lq&^LIr(d@#uB&`dTcKp+1%)2c&!S^;b-h1 zK=xe-#yuVcn*7I57(5`g0yh8cKfWdh_=1LtaTeV0g$;wZ2pUj0K$jqFGHy&<{A>fr{wJ((a*DO3i zIe%FCJei*q)xh))d=w%qF^izB%y?P>c?s8~Cz+f&ILXlw@FqfCikTQ6FS^Ig%l3xD zx1toIYetR+h`FA~h37$x>Uk8)nhOL!T7IsEonJuw2o4vmYQeEERn;-GF^e%P^NO#v zL))P$h0sgsLiF;d2W1T}OL{oLYIY;!42$zxg(~#yMInEWDk046KD$b@<(c!0SWZqb zO~JwRLYm!%@jt47RSGbDHH%+Y#j`_8x~m~*LW(XB$V&p5?2!`dp?E@bs8MCdt4euw^P95D7A+OI-K7JisGv}ZfoZP zbOsX^-F2k?1f@*x`LknA{nQEl&+Qw?1Mt6gZ#F5hG3J-k7K>k>CIb?;2O(6yFMXS) z=Rl3lj&AL-^>Rw><;KJUVJ$nbXlmX)n8)$XaH`<2F5p=0cR+7|JZ#HErTUXWPHBpC0qZ3R%}0 zutCurBE;vsjti}AY9<3QrN;yut=Vg>mVcGnkxW^Vn~2{(R)|L@fiTH*xAb+uUafE=Fc3GEy-M50Zjo`ZHrw(TX|A|f#;=goI_uDP2xng535ugKE z2e?ch3K1u;2|q>}&+VDWbP6Or1YxYJ2x0~YUoxpA!K|x_F~ku+|_;C>b~meQK;vZp4q;E2=zE1#`LAc6oA|- z&ki@BT}uT6DGRHAlG4>pr-o zS5$c^mE5(iuwS&#O#s4Xg-yvddG#zcBF{-WR>SKu}%jZiT`Rz+J+YOj&2fw-4=tBTGP}=pd)d zA&p)y=Cdr;L-8aSpkTuc%G9mT8ZL8F&_wyneoBNKjBYqcU#7```c=*SR_li(B~=O} zXS@>B)N-qhe|%Yeg5hpk2Yc8@&${o$%U?N-z^sqnRq-N(=4O{$yjB+@0_FP^k?VAb zx3{*H6lV+x1)CBL&|+my^P27@uAkK3^{Rq~unVZZYBNh<<=?r{Be@^`F9cGrjcR~+ zxe%n}5&$wb34I9`8QF)=H=wTp{Y{DVkHVs8VosGRsE!eB}VDxIsOo}GO0aFFzwv~A3}kq;jaT=VTEU= z)g~9AB&!x_Iq=rU^uD4ul5I5&lhkK6rzuncmPa2W@#K}rD@I#CwI}91 zWUnA0Xx=?HXv?w$R3`F<&h?X{K$sO>`g_|lS?rTP(2=D8&06nouhJBjR?`a2Y<8Mz zO$iriI)eUZ@+_nv-R|T~vjE+}o09^EI=kLh&2Z{09KcMBb~iw#ic$-$^!9ttUTLT< zLVMx@NAISJTzOdz=7vXy6%%Cq$;7WrPxor|n)O~YrA-bJT?Ctpp=Q5aKLm9)mih3y zRKXs;Ch*qZ4AhvuBgxoLrYPytDy9MIN}28}4T~a+FH6e$mraE+--fi@1Y`2hwy9Qn z9s#02uIOg{1#CJWOpl36&&x9A+Mvn3gxhBsT%ZJltTe%ewex-hVLT9S* zpj(HB5CLCGV2ivf|LgZZ_BUI8TXC(bh|TRwr>3SUh)kBh$yg)D_}vfn6R7$mh;gDP zenuYw(HjQFd7QuWZX-h(Hky{gTH!u)&DV$0L&!PQ?z);BQ^k#z!!6Z2$%%J`m&C|! zH2&7g>4EEHlXHec^(LpE?9Ui(p~CwW;GsHA<9r@GbdQRvKfHn{pT)_YA&6@)fg)87 zZSkZ3QK~5ajHLP_kS~$0=T#GYm0wexOfF7U{tK??X?f2GRWRUwye%?9)~m(wNlY;* zr`g-cLEvGr6aOg*rJBH@E&zZxA?AU!NAOWeLm-l&rOZ&Ogm^aOg8?`dA`eV4x9T3$ zsIEn|o{V_Mr@BK?iyhBM(AhKUVl=jvx+ghifFUF+wXq%;1gm=rgi>fmcF=L+w*@E5 z?^i!D8CpNg^l41}xc_K%$VB+lu6VOuPSsTdIxlYG(a?+BA*6I1xDbr}yg-}f^_-@yyLwH42ZtUkV{jPOsb=;h+Wvus5^T9%x5JZg%^-0GVvEEPLhr# z1;7mYR8sICgDt=y{6)ok@PhXJ{-52q-9@xL;ZbUeT_DB3oTMWkotjK4r$RUTq z9Eh;mH|LQo!WZV{xvW7OeK<$HD^=3`1vB06DJDqMM0XlrH$}GwKll21pMT*U%zx>= zZ6%yESUa_J=bg8vUO(}GmO+-TX(i4nVFfOFrk$LX(>2wW$$Z!HPHVXv7Y9XLWiKtY z@%s^3kgC-UXZdY$hr8I?X%P@QslKMyRY{2Pnn;wg?5f2V-`_Oy&i2mcvu zVCTBmV=yhfb4Xu$Q{pgwBvWzf++=x_)_Ps>!*rRXB?XQv&33!NxBJU>f^Vzezomn3 zLYunxeQ%n%)}w=$M6cDHVoO6p6`)@jf8;w!EJMWVv?xe4%iT&dE?h3)kwP5 zppJ^I{8RjpFeVy7I)X$y4qdsM^L!@Wdu!x+td-1Ju(&lSSq&fh{c#qZ1f2CIDYNeDzc4p8K+xX8HB|fIgj!g~bDEN;B$sIc;syIc-L%0U_`TF5P zbx`r*;e*R4t;Gq~iN~~}JBRPK-6ftE^HDY1U3{1F;<0K2`9VqXwZet1Cib>hMZ4l{?60yd$h!~=yxT_+ zlSkZMQ+Gp$B=&dMi%$fObTiJg{pfMHFVN`ICitq!pylt;TU%QMDfCL?Tb8Xac$imO zc=ik#+7yCKndQ8lUf!S;{|>~{B5#*g#Z;kEq+YMCHLjSsS{$f3UO;#lo%am`)~Ql# z*Tnhf$?U=>TW2Uvc2INx9mxZWXY6Xdfg+pjJB8i}KHPq1RXF;f&R8Q8su!;PNx474 zK_EW-tC0SoiLgyGS?(Ia{;m|t9Abv2k3;pLu$k+B;83fBHzLwk%HRD1_MH)$XCCY< zMtpdYG00MLf2Upxeq97%B#KX;(Mo&mgq z(IraY%Zt4oNJV?K+~{y0agf!>F!!rvR**fTbeYv7BU_qR=uFe)G2J!DycX-8+#wS5 zVkFadX$(rqg@e%=V(0 ztgPexDvW(V>oG<&x*j<`qFvm#2!*9E%|JbQ&Q4fN>+Sh_eSFKDi5rPV)$18DF17U^ zs@1C6U+WNm!J<3_iv{O7^yA}n%nui5Th z)emO6HA>=h{!#Y1I(Qm%w!f>S(BNYk{Smb)8INImYTlrd2*=TZr*z&HdQZEO>lTaV^61t=eQ^`keUwgG#unS?agI@bop8 zHswS5SFpG0W2Aley3}x6fyYHlzz8@v^wLPG0&)nR7gd+}$}cQ>4R3JI=wrlRxy)Q_ zQ>NGq3lI$Oh4c|0U$zU>tO|!41HzjrvL?BN(|$|nxBDw^zp#KH?<>d`gT4^_I*%U8 z;A(L)j~DVHE~NI8SFxPC;USRaC-C;WaNYP-ficf-r|*tCwC=kB1XitW!@EH>E@>KI z+O1g%z~!R9!b<&=@7wbehB4pF5W@R~b30DPWz-RHDc>9rHVk2k*h>TN*J}E)M0J=Y zpT0*%;cVS^LdDsd_tH?%J5YqMPLk{*sohgMaXP3z^0mXRZTD)KpBsvB`**nivyV4+ z9Tedf8%O6^w*5z4_Sy)W97J7m<9kDDWs~zrB)0QY1FeY?gc7FiHN1!na?^GGXpNpz za~+O|V+_QqpDMzgkd528AcHQNE^ERf_khUOFyEL2jMpfKp=^Ihgb zGZcC<#ZS5{BcC>YNOnp8ct&&-JsS0ciO;Q{y4tXNQNnSxY-w5lHqvnw`%KvTkOD3Sg|JL%X$C5zYkd++OGTwY?-~* z#?3>xjPh;E(>j9V?idQ9X7mKLdq5rbW~s8<34j7Q#c;m2lojoy0d_yaz6%Qc#8YP` z5gB`WS=gger3*aB;m)UL+EW4IW%J4XaN*Q(W}1Ypsn&g?KPO8ATjxKlzlGTeug^bH z5J^4XCJ@T=Z+0 z$`E4sX1+LUqw=Dk$>q#kTsPtLWmorO!P=vkwBR-A#4bkGq0*L=;u2dHO0u(hKHo}5 zoSJnwc!-hviRuPZV9U{BpYph$aJu>0s@JMYvt78Z=HJiT?!H=aYA9PS)l*)!FNJ5n zZlejW4TWQqA=WZL%%m())c0FmX410<4^@oK7C?4ya5)@TOtI+67iz}W{T_6wsFYZp$nFue}{Qi^*uf;Zc{-xp*(#fHNs60*Am`)^Fv?9Mys_)Z_LXQjP6^YlD zYIu1sMBjg9+hjF%HbQ&}I3L?N61EHcATkD;#NdGs&LH&*Cuv6?;f;mx=M+#MSWsM} z4>CGEnNF+AaUYK6nfRS&u~G;#UM74b+*6;$usFpBT>?3@qej`8@2!E@ zuL2>s+Yi5jR;n5OYRXyvLAeDsPa;pwBnN1!%phB^`cOsax1kmj6#61h{O?=UeZP}c z1L6%Qo<&(Ip)nT7jwotev#KyBw+3B9%j@-S-(?3P<+sT z!(QB)@b+tNU!OHdxhO|U>l`ccg7W*NW`O~Avhm_PIPtvCM@*B1qF2dP zhe;J|5S0d^9s^XV!1H}O1*)wK#BMV?-P5L(PRA$l?>%Q?YjW-X(%g<~(8_tbBFUS4 zyk{TMu>>j>z2SshYylX?Cizbo!6iOKW6k|$Y=+SF%E+6cgyMC%vgE&y$T`J4j#{bn z`kk;vIq3;&!r%5Kl`)0k&^o=oFuN};WBd9&{K8-bAcj`#Yu!+^gjy)7-oRu|33=|; zib6p32eVcfJ8On)1sQF;aDjXC&vY9}lDc?^2YDAx+>Vvkx^ikc-e6X6X9D|}3X;pg zG4g8pnVN-P$LYJs%*e2(>vd!pJlhkiFu`ZJ{x84zAwkzdR@FL23d>S7sZo}E#Mfc* zZhz=3c?~#)+h=F<=H1}WWqVGpIvgy-#|Z_rKkw145fS@+2W@GgkX^HlXqo~*5!GrN zkS_L~kCHx_uD0m?ho(r z;of7sHnL?kid9i%oF*U!yFeOIGiQ>1H{!#I_=k|Y3DXh?nSm1A`Ix+T6Z7YigQpj; zw)PjE$m87J8mITks`k ziA3wRyV+&_$#UJ7M$LBgWS!gYKPfX%n4KJ=V_v~GiG@Nwkk@;L*^~W-Eko{cyOm@P z)PpmS8IT0(<@qy6$E%L@iwGBoun>C%DYpYGG9Cn`%=9a{77i$d#gh@(#6c|SZdPf{I6k1MK?Uw&>EysJs^lOygO4?F!4Ah){O z!k3;W+~FePB=@)rZ$QOm+$z%b)06zeo%Aosn+z!@0+h7vQ`Eim}A~) z_1?EPzRO>^w)C`AefSRKHz01E>NUW=ilc0^I3n6&onHc_oWekAAFD!sEKfL^_a=GorLb8l5C7AU z#$~07;yii8A=JvyZRHTMlI|Fv2#a4<|{9wH#21KD~A zWJd-WPm}GILt+}k5)`?-747S$q<66hmwh9oSV>*y81dN!hNR=@YEkl^MvA}Be_sh3 z;~eEqb>^~#I?9w!Hi`&7(w8rQ3Zj;KC)j>)91F9n)=dggmda8f$+i1%RICrVL8p$y4wcx%U_7j)E@H|wNF~GL9 z;u&wabTETopbp%_*gMR)A>bIid5PcJVnbDjLseXUtG**cMG=HWqU@>e7>3p9oKmMg zP+KE4EFgqdKg9NuJA%#OStKiVP<5DC_*}W`+Of2OFD-r45KcJC6=8+Fkx!L*hn$ls z^~x9tX(=#j)YTf*n&e3uH5%3WHta7?7hG8xs}too&ZL|UI7&umKZYfSUMHX4 zG%(7zK*s@XcPRe2mDY|sBy$W0z8gE+{0i35OmtC-#cfL%eZ93!`p+p0BaC}4GyvHQPV=F$E8+=6m;_ZRl+k4QueWQ7#4?)Z#QyHORc zY|%lwKRPH&b$q4Jo8{WEX%MKSBEnwX7Z#NN`T!inb#yM^RFQK3W^kiX+gKU=n_NSF-TfEVsW;hCVIC?iLBIr|0KAf)*_DIoC6D$;bv*NGV=B! zc;S?>kWy6phdx6_kN_%T?2i8mUXl!5N#TEg19as2$k-jk&4K9Z;R5CHP|QVE8Fz^e z)-lC5Lk6CzP}L{OLDN79BtxvTlvkX>zup1zy-}kq!eFepBjUz`fm~vz>!y`pDRel< zU_HGF1RCV!uoexzFB)M0(O$PBL^fW$MNzE#Z=XaJbXWF@4a^KV5B@6)00QCfL7)-$ zhbdj;J?k99HNMbEb_GmamRk&lZ`+NizlA8&V03yMk=?SYSeQ~mwXxvm?i;Cn5_FBza%ewhSOTi0LSX!X_Q zc#0M~j3gaLA~&F8PKM*woF53eI2q}QY_vD>STfSFAs>VR2ZyyT^t3fok^H%ST$Y^@ zr|G;-61&_t$duBBT{QnPP+SsOV=N8v)4tf?hd6X)3LZooh43j6JvW%*LTVY3X1Ivy zyqJLq@v)2)Q{YK3L?g2U!I78*V1`)MgZ9e*eq7;S{L)q7w~nz#|E>d`lqN005_ZHI z9ehWPm#l$*@{7Sko|+InpFslBgM!#{IzL;o>%?*!EX^g7>TJYtoBM(aLLt2-P|l@WO08IYNI$ULrH@Pi>f8fMWEh`BohfJSU4 z&ADl5H$LXpuV^?fmKF}geJ1!!z&IxWPwsnl*yozV)ZAP@b)(UrhI3c7qVY_h#c?1< z)q0rKxjQ59ESn34ZhfPE&lbGn0L~8M!2<~tCLy$iTsFrps^}^&_7cC_i-=uqQV$Xj zk}?mHUr)2qTaHT|Bc#KmL4`hgplfc~_fG_{}L=O;#u>MfR9?&_S$$hA{OeCNKu zOhD`O5Q2nTbyJz)`NNZ;ug-V0{m7%L_j2;fhW{Xr8@ut=&|ZPW8d$`CyByZe;$Z)n zP)r~p>Ict%e6@0F*2U*QbKrBmNz9xFl1q?H0`By#7+P+$Z6VYD_)_sDXo3!z#!&1P_wYX zkfw7v)KNw6>0t}JMvi5(t8vC~IB*c%T}EPGDVfAL{`a8_U>k_o(}3Stp!sYoO7RE& z0D&3`>Nii>(kbZHSaHw@d_*FSA*>9syZDLx{Ur9U>kYn_2yO@nAZCWVY$R9CU)QbP zPtgcDvW*Z@CHcfuA`u2n&Z-<}aieL|>;U%Kl%sgD_g`TNCu4h-CuNQC`=7 zZ9)C-7xi5_czO0}NgVhRKLYIjJwumqdI1_`+NZ>6{KjmI(=nFt^^o&bfTe1ggLg+Z z1eP-)G>yDM^pB~h^L1OqogiRYS?Ig?O;~~AqiInj9#N_8I6qX zL!v3+ugQUDrj&Y^sI}~b2%!D`e&-yUMZ~TeFYyvt^i|ET4N^OXyx&Rcw zY*l|%J*>fqdT5Yk?O@?{_l4&soXA<$2-hqk1zlx~;nfkuf&a>wFgS%2C~zATZca32 zAh-HlvwUYJs{>&JvYY%06hF?9&?s^sX&eJa3Wm_g=Ux76UG2gN(vjcinfp^ZAtuE9 zIHQP=m;q=uf;rYk-OKO2Oh>vkSSg=`xffQB*K|2H(Zgv)x#Ft{O=yvN0?IQOijp^a zd_J2?MZ3Ibz^$PxC}bbpsfvOFmUT=`j~p*KOLcCXBc0rzlaf`|1e_8t{u%@$<3o@X z;G>_;@w-7VHd~z;rXo_Fk}&{7-p{+NLM=4ph#SUAcD1PB8_Y39q7RFNB+Fqz1o-gH zLhYd$jiTiCHIyG(5cpNJYq3_{-!m8!jP#O63V_Cck4oC@#Nry68$@qrIiPmDNp4$s z2wr~41MA)2onlaH?{vb-q4!80u(ORy10TdmBLQCbH;@?O3{7{~=F%@4MDZX8B8O?H z?EL@2A^&(PUctzodkOBFWRR+;@#ChD+)`+K^}%aVV|eVDYnI7+ewZa? zfV8yxuJ?V}1Kxvp_OhsSAK2H_aShb)*-v@qt!2L?tZ`B?1Wc; z`$)CAS;$lcTPpf4ViQq|1Py}$j0do+M3Byqaimw@vIjf}H|1yMCA_D>bTb#`P9Y=i zk;zx34yR(C-{F`BQn7CHp3r>r^ke-(kY;Y3iQlheW&BMB&-Hb3i&S3t*h%1l>P*yT zdvWX+qg~)Fj=zZ?oQ(Febb>4kT(bE}Pmv-gJhx5n~;8Y+Pkm<%x(>y@-e_r%%n>i<%DXnp%Uw<-yUW)xu^WD-}XvORIJmglKeM&^}$X z|C-)d@j{xStS9UFCo#<)pyficTX1<$vh_85v4(WK#_}t6ey>cD-fM({DNF z5jgJ*r$vVTY}I(ZVf;F7QX*0vll2N%9m}q9qvA?J!#=D$^2Xyj8>Lz(%Q$B6MlDaj zMh_D%9xQXBI)6Q>M_+zucF2&YhAX13xJ7Dh3-*d${Sy3-hvz1Gqux5WOrq;UqRmL9 zTH)DxRx{$&bHnl1C&@3#P(5VmqpXEgq=~7i3B(^<27bs^Pc5-<_9kn@B+J;~G!{Rz z<&jdqB_c;w{M)cZ)!f%??abcb)*(j~G8aaW|NbtpjSv2SuY4N^5Ai2mLFCNdFn(_1pYj`SjtTf$jA@@G>K4aQziUU09o zBS!-!uN?0~mBgyYQJ&WHE>=7&NjxLz=mY-swsT_thyEQX86PB7KYiHA^}Qob(D109 zm$)1o`hq;Y7_Y$kMZ{ovN9p{3ELeOeeq3AD^Zarq^5ybdBESYP! z0n*+|qwPo4f-Vp7sQ|Apgh+Cb`Rb4E+Ou)ziZc_^k3NJ}!oqwAfND}AZF+QczY>l_ z8`gQ{H*|B*`(TOF|KJXP0ne@rC43RUtZZFD>s2f_45Ad;$7mv0k~ozH(s(7zDxaM$ z71f_s{wPX<8OHRIX6Hm1Z#gg>bn-tLWN~L2254P3?oh^8ON^l`)sQzbYM$ekaRm72 zd6Gp)lPZQtt%>v16}NFjWnMqpP*YD@iMJNwX7$$OR1Tt=B0*)KL80itnu|k^pXac# zV=z|1zOX4~*T{A%dKgTp9x{9$bMo=+Cnm0cnBXCJOOC&P!sB;a)lU%B);J2e3>Lc? zMD_D?FS|HaI(_fBQqw-3fDb{h{igbYd#!Oe$^3UY@>CM;yu{uqg$t4*`rj-Btn5-> z?GJb3gZ@H2*%N)ce5ulvlRA^dwFq7`aZa>e(C-Io;N+5lIL&lx)+N>(uGl}jBO%NpHO@tlO6R;IJ(ey_F5LL?KAH2MZGOCyJm zubuAzfa1}4)k4u$z8keSAgJXG4ET(-eVJYA?DVElA3k^Lt0PjU-n5Kh%9f-Q8O2<# zGy7To?nkY8f4~X+_zruL2+{bP-h?Q7Mr2l7}DCx^x0+Wt9Bm(mafX-HM9T z!5VPlHBvFXC*E<;*Hk!!|s&IvF~@S`q&PGPyv zW0^9w%r5*JM303eQ2e7N5P$Xit~Yy-HToh=4T ztvKb&=)R#Fj|3F>_LvljK^^mmK{2S6ByR8O%SVnBkIw16JwK_CT5SipX8I#mz4L?# zw>6fJA=Ue6F4E;dP+g5GECp?Uzb>GWWd;LTZtsQ_?9TEl%AXa^Q4R7hCBT*A@@1{J zogj=#+^cjsgnNuM7t?2+-*Rs@Oc*~`q#SJZYs>(Olh~pb%-6I?{Zp&P8~65q7w|`kBh43xck#7Gwu)4 z$evE(JPp<1S8W@8Ii+ebHjR*%OQcpnc4=K;QUv_<@FH8XwRBW{m}O?P=wHeS&NaYF z-J_2|n&S|g@{dmbOD5`)O-`*Y&mtet&ogB*u!Txn@8Z$jmWM0^U*a#@P%{*%g_GvK$Ljw@r`*|ae0g!x$?`DMl|RU` z@h~(;Kx(YF!~TA{QhN1Q?FSK~G^smJRRk{-L@4dzo8K501YP4$Tk(4U3#N;?>D!Wn zujsjGQz=mOtGky>bxdYFU9eA_*)MXjJ8U3+=FoM4b2Oduci5_9*^JCS<~^*>z|puW zc1p*^U+wAXlAr?ys3Pz@eZlyWKl&{-(H_D#CHDCJ)jogwMw@S=7pwl{^5Y%y*XKXc zMZ_d>Wxp@vwAhLIWx)p-i7Djf@(PidTlF|Mu5Iz|TRAga`&S)0@An@ifw|AmY$r9S zrj0I=i%=-4uN6YS!mTOMul1(jo{|2yAa(I+acGA>(ThO|ByL#XD3L4yXJH(2i+lZb z8Yi0`BL7JiB2aDpDFF`w6pd@ z0zy_U)C$q*$C)E-_J99`8j~ZpYc9l!E%*6BGO@CNbSz*NSPFQNtNHEqJPEw$Wbtdu zn_q2NhcDFw@LdR&O(aipUtr~byc7}UJZCSI>&u^!(ZbYz%5`?g_?~@ZqHeBB-EqCdb7U! zQ`Go`jv0C5^_h4W*7|aJeso`oDY4_pA=Aqor)kH7zzm!)}; z5EgTuuL8VbtojDxOrd7u07!0f1cGcjk)B{lwb zzx%3ym3zlgg#DqeIn(wU+VE(BlVWf;~(Z%y4 zA^=^@s&pQEa{LOqxjFLq?BX~OA3PTEZo$zHx*d54<|s%hq}+$#J2FqYf55}F<2ZQI z^})id9&CZ{AkEfbMXU)Q(QAosp#iLo|SWoMJKqE7}V()jew&;ADYyF-0Y&rfbQk70Fr% z(Z$570;07+=3&F?7H3aabXry;4A?Jr*p>(8b3(5lVIZH7Ye|A$NWVSQ9_G6vN&7J* zWbE8deY&dj_z0<;ZkgPC05LAXKqVtR_+oen0w@sSlJ8DLV~OVwc(Z^WuU!whF&}{< zc-4Ud2@BOdTC)O>iwI83vGKrrx`BLGcMmIlc@n~jE=_4rk$Sy?h_6aW8hryajgQSc zc6jAC7#omlTiJW6kSOTFrmJE9`44O57p4lV&UnWL!fI15>?`(Y%YAXdKG60My>Ur{C5DFzn z6-ezuc*AmhB_K;4ib{KG_nGOR52=jW2_=d);In_iev*>vhAxK;3O-&Vl5Durv~3&2 zSv?g-vW!1Q|Kldz$6)01g`BR60N}L(%6L|kEG21K%aQ*sz#uab^hV_Gr>OrqTL7k_ z@H-b?OrGir&E}cCNRV{G-zUQ3)qc`k2To2Bxp&I(2x0qPn~41l86{I*js)RdPSDBY z%lP(=Wb?$(Ap~OGWN|vu#dG3d!*oN9d)tG2VvWrD=0CM>ynL6#**Gl}Jft>vMx2qs zDTjZ6k0~yWA$}(e4;cSH5swOqxCEjQuQTC}2!U~*7H_%&T?sO?>zKM6dK9N0Cs58u zT#hsWCDI*%Nvd&Z`@dAgqQNfnjj*%4A=2!9`thf*hnY|o-jEUJe)tE`ApjDZc@eRv zAb$~_T1fc};gO7kl4azlYJCjkquGMi`)u8~w?0QS2>73!t|y{eNeERTkGS*s=cCH1 zWHw$gC=~IPqK_CjxkrLQL$w(M$+QAAiff@u-`|?^8RupvHu88601Kr9>q)O0HpG{K z2QFp?vSD<*Ury+h0ihxP6b_jbU{1zPWf*yjd|n^p;qRW-;c!GFxyWFy`#5|kdCyTc zNj$R&fUm7-Sx)8yU;DX3Ld~*9yb95{LsniE1rNm08cS%|qnI4#@{fwJ zfh6q6KB>i_MVW2VwXCGQD)%o`qPLjaBV0hh{*}Swjx5Snvp3&W&nyF3E znGpB;AepMeV%wD3=C!Sy^vUukn!{pip(;bC8JSwNqt8QCZrl9`b6@4$yisfS)Fa@F zF(0tDLADYZH{(_M4-0<>cXFvx)jK6R$@37dqP*(Y2;nphveBPdFn(#OkV0F1k$yEb zR1V?Laev7NVJv0~`#6*rI?TkwPAjnGy85rT!6UZ94HVw~LWCR~QvlkpVR78?c@l4; zU&dO(1r?)C#J>D5w&byKXjRm-r1Jx0H~t@gumD)w5FNXVoaFzH=)aAv3ux}hC93v*Yt3??%Ars;;kqKxQSfmq8PkI1mH`DdR(tjNPfE8FuZ9*#EU z&q}2P*y6pbNKEBFbWoD`?E91GomUM7ET{~^4tHpnf-)C8WslB|J<>#Eg9u=cl%>dI z(8Cz~N#_q7J8_ady-CRsL)Qs_wlET+80l5^JZIzDPcWq=fF`gLv@3kRV$p8Tp#!ZDZbB z2S0Kf?2Y=7XK6!n?(~389#p^Hfb&259Y68rq;2p#XqyI>fxt-t8<^|EXJ8&i@?Q~~ zdNLWCA6@~UK2V4~V_Fzv6LPre-6J>0!`tMcKMk^Ged|Od#sDsYG=xlA&TXVc6bf6e ziSANYf>=ICGF_yEj7~(XjD$ZTaf{G{A}rGVY~9?=W5miOi6wGZzMS=L{jELSKp0u# zgMD>pNL2qBIT8u)^D&8^=H7t{4k$D28}&a(4^alv$#7dD20b8!MR3-q?@cH6=WCE5 z274zcn{iX)ziEgS(*pT?zHP&>ug7NjzEFk~La`$!*=h&d`4+AHlhjkz^83~QZ6NQ| z1&ZmIl`*$=yi_z`lS-oSeO}klnX7uB;B%0eq!O|li*I?12}#K);w3gjc$86*agZBK zBf?`iH5Y`9)Va|zFYPtTAUx9>qLsUVs@d$9%kD6ja*0O_ix7KoVi(G->B?@ABq+hB ze9MTku0#FV-z|oCv(q$@Armq%6%@kOQ-CMD_^s@UAkaZVBgdHgViu`{6M8XSzi*(|+ z7pR#FL5$=6i>30*04D1qj0vOWQs5XoMIPN1NoCNp3o@#gr%s%LcP`}b;v0FdS3xEjnMoGTc z3WSeN_)SqEq9ep5gZSa+L9Yc`Na+R1UFhE_NK5>*Zhc{f@M?hbg%PkOlQDngdc1?& z7PK?ZJn11SLpP09Uo~;!viX&w?mdu%=_1kfunv0?LQkUakma@j7iTP5E-~N!#~{-k zX*_h4a5|0;owi)h=stf8PrlSr?sS(_YXbyssnQFYg+e7xBU3u}lrfWd&!f~98KTkD;%f5A(eO6O$R z!4~jJ`5buN5gW=kgxCsEf#@80D*juja}=W&^u>jEvwOAEgsra$;GRrOLd?4|H6KL9 z)DCitn6{Avc_ZX}V$xI{LoxFftmQ&AHiIaY2*@sjzvQ}rTGm%kx9g8sVv?y`1&N== zGaeXY_--7hnsa_?dY%5{5(j`E~(Dd~1k@V}(NI$Nl?Ev{Aw-Ho{yzy@)2c!qb;yxIyxVtyU zP7mtD+)&f#+vC43S01|<>Iyo^C6$hn1qR%Uix7eCyfg))L%{hA?j&T7&4RD-Cz%-F zi_NVqifHwCYRUqgE<{p>>?)e1=ZIj1vCWp|K-uSAP;pa?2j3gf@9Be8SX zY&`OyS%v-|{?&|*+Oy2KaKgY2ny45#F^tel(|kv|qb7q2B+{{SR0%3((H2;m3DY;b z#0Uc9DEJbZQJp+!fuvei9f=&3We7-FtK=0l7-NTb8ha*u$j;!0w41h{eoPY83vpG9m#GTwJ)8_ zYF0_BC&&PXF>EQ3koB`0hi`Ge!;;MC`xp|eLUo4;eVxPk8?+c=*{~VxTg^1nX`b9< z_cC@MQlz)RI<%^|$|%JWAw2K~W1_`0($s$^_KU3M^rwgw=PvQ*8W$(PFdQaX^V%gF z2>;=vh$jJO6lpZ;S$LMu8*}fr?G~Snw7Do&;RkFAFgk!$5u_F@GCj~5S8t3}5KGXZ zif~guP2cIwj1Q$VWY#OoDS5Wn$IE3er;UM8b00&jrb(8|tl?d^1G-up=|H?u4dp$n z8hi#arUdgOH%WD1y?s8Q`pO~}ZZ`XYJ8nJ2`*`IgK4)C%HPo1?1E1Q714D4wZ)%&3 z5{$^3(;Vnl91&?Kb3JklNm1A}@fmaj+`I_OCgriO^AWk~nS>C}z(|_JBNg{d5iRx2 zHUlNMfsnQXP2YFz41W#{Uc^vt-*K3yvCp1C5Pj$r{^O4&O?w#-5%Y|l#67qHd)Mg6 z2)n-rPKs2F-e905u3FNT6w`F2B2q?81|NsKGSL@XC$Go9)c(0d+Ua(wY|OG@0mC_H zC5WWrt*?Fk6K4JgZ(Axx+^!Yry*ZhuU2@;9%W%8r&Xm+S^&MgdI0+0Ezire^6}o%cl~3B8oIa@&bzKqd8-`gQbu; zl2-&XGWe_rZLV#hZ2*Gx$_7b0eY>6_*4;xt3h*s4upG8##lgzN3lFmeMq@VF7^^nL zC?0$)Yf`*Gg623wR#V?IgGOC)=aUSh6U|vvJvvtKDnSYOH{Xu6xcl9uvx92M#PZUD z*ZJQiOdryFH#AC(hDJzG-Eqe^@~|%*%vmrV?DL^L7)4u*p8T5M`O*f9T3mA{Bp)xm zX0DRJn+?5QD*jm*-fK*Bs%qEz@3UZIo@t9OPh){KS3nR_#=x1d)c2gF$#thXaY?eT z_!X$htZr5Gz3i7SL0mIJGtMR#6C3%nBEH!Qj;6ICxmRP!SOE^qqk^=X5?7%sQn`wb zeDsG_pLyThBd1{3@d;W>+4F+!>_CS5Z6S_KE7SCxq610{lQ~wGJzUr!kJEXp^mM}> zgTd$Y;l+XF`?;u#3Wq4HcN$PYKBX{!`JG++<8AyD<q{jI;PX1r{aG$nC_`HGCRggL*Z#_m2dne+4~Mc!fgIi~>iYioOZ|7u^sZGhYVg7+D(_n%nxh+!LnoixX6N*LujFZfjHH;aK7tm z!2oYxkp!Lq%n-Hi60@$yV;{^gU+$0vrvXZ_;1qzijfw@0OP!PFNv6n9&iESGTmfac zcyDL&hGbU!%T1-SdsWV$h}(10#VbzF`^u1vm#QU-!)330btZyl;rVz)A;%8~zuehM zp&nM^pZzxjL)&YgFcG_@EAIRx;GGw%a7MgFE-BcrUB>GW!-2#6i*Z<>JbF4HGB6{=mQrup$e?~WzLtf{t5T(DTE*>_<>qcGJUs-M8MaC9@dP#SlG9!q_I z%o2E}AEiECTK{H}$RM=H#H1bk4*OJ7VudxPgky>FWo5c}I%rS@xELRx;nLd!tQ9hN z%KsN#&7A82Q#7A(qsGhe;{deL%c(^DY&gUWU$9FpDn&U=t_6v_ajx$O*&>uTBeo?# zSF30CY}mzk75U5GgXjHFAH&@gDf?x$=s(j-RQT8g1skDe*AUX+s)8Qdn}r!zo42Im zd)b1h(sr=ErExxUlWQRP;H>xg8=<~U-Wb?FV!w;rPb4(@;udT9;EbY&6#)TGvHVX; z^jeU5!@uP}Cl%k7y?}pfmgwwze_`+=7RJvwy;Hb0gk!t?5Jf+cW~%ZU zX;G^DoR#!6*Xgu=J47}OD^9`;YoJ5$e?yzcalS?I?-~fKSVSVmXr8 zfW(IB#2gwuHhzB)p-G*}@Vk$A<5m&S9ODFT@W zKx8wtp1%5;!de+$Cl_&5&@pft1S*)>j7)p*!!KXRqJ9xr-auvh#^NK7G)peL0)a!Cv~zfKEeN0DY3-qRku zhGn_6b$#Ily;@9S9&GOq5}^`*v10SyLI0KX{TKx`{EIz);gvFoEV;T`c1(LOl7`D& zA?Q#y)9i5Z&FNev2uc&YHKSh_v6ypm-%SxY4uW=Vv4U;VlH97H-wgPFuA|Fou^Uno zBs0nVXi>e(F|<0$X#1MHUvlQ?Z!G9lrz@@YKcTf@?aL0Lw?PFI#hkaRPEq&hvD`}w z-j>TO#A%j)w!1}>S2F5iUPA9MLV);7ynkory!f7Qze z;!tNcc-UB|%TlNWpAr~v&k?t^%8iRI1=U_B4Fj(pR`t)n`&86Mh(c*D^V$;9Z7_e> z>bfdQaYDm&Q$vE=h``k+R@th0K_bY>l=NHey(Ni|w%VeY>?j4joZ4%PfVTSf4HOOx zZK>S7mih^e=UQrbO1L5#HFwOjcl~M574;>4|N{{NX7^P9!<(gtS zz{sTHY6wTfw9#Dtv(ERQf>ABq&m04KbkV5QS;gxnn0&(?kVneG+;mXMJr(O6^F{kN z95=9TFvNgK%RgMr`Y!<`NA%6ezUemCP7T~xwutUXufJcUeF{XgN_^Kt_GH?#gfN}2 zkjR)+M`ANZ?a_DO!Q9~r=cS&%$!Mbs;3Km6TK9&Deo+IFGH38iq57~C>Q!x8vwOdvXlHzFS+>2 z0uRi*$M76ZXUgbIy{qLx?vp=23FK{BXf)UIJHY9;blbz+SP`Z_?`Igc55NW)!;|go z(FK<@GP5Zicr587UpcLA3QR@u1t=cUv|XRQ`K;u(q%t^4~<%7+0ChLTWrvJTD2$Z4Pp zl9C=JD_C$}#_FXOjFu}SPXMSHLcEv^!&;nzn6^Mt7cADUV8@2q4oR|MclDS%VsU+* z=wuBKA>N=?U0M-KDV&D+$PAL+0O&u?@4d>g4I5=9U=mkbIg1mXW1P~wd;U?qhZ{jV z{>0_-AuWR`eCT92;UvV)r!xU}x_@4C!ndY+U|mLS1*5qq`Va1$y8(DEI^hzds?GW*96rX>)owB18aMAVyw#~|&49zDw3ZjiQkcyH80M2*MW|+Kb zA_FV{DSuKJ{bGl^6CtYrpBJg^^~72iAm%~78b6{fTidCz8RE$SVJnh|b$-0YJG9Wl zM>zSBh_52em%4?q-j;iEDuKX6ll0GiP8G2;duIkIU-r-O8}|=qj~dQ;IGhuZFB5F{ zUTfj2CkiNbOYN#B3fsh~r%2rA&Atf|g0*K1FzDQYQP~e3<4#h1_aGF|0@xwdsn`Fx zsLUKf)rW~AL0};8U=A-mSd4)siHU`bypiBKL@abHQjU#>QyBXKKy(B^tKfdsNrLYl zhqj23KvBVMVuT&TnxJN?8B}?BQ4fLT5Ds1>cm3hNVK~6NdFKZGAlB3W;g(nO?AOmy zqpi|4kW)L_o+t2=I|!n4-LGdY0Fzn+mWBwACP5i+poHci4WU9ffdWI+BE|y|hN1f* z{A~coZra!E$3aeDFSdAlN)Qa6+aSWu9H<^UuwgAlqFCS>_$g3EXmi-*IGtI@6T3v` zm|$bt-2|Egc36m0hQ2)=vM=|ZotayJZfyrCz0j}K4pxb4V|I6yHdI$8q7dC1BIq17 zCq-3?$8y}2|E!h9gQV-h1b(7R-+U6*p}=jlpSi$hzgs}~CstRpO`9iqq6|0-eNbE> zS{TG^yPM~q0X{42U;58vWF(ps~~ph~Ke8NS0OOz5(R!m6kA&s>rHc*R{rWDh2t zaX_KX1~_aZ__~`iSG6+~=LB`|<$(yvy9cmqIo%$Tyi9Dm4Lm}{L%X&xq7VWgR69`fI|-$KZ2AnXGG7maG$sVV+!u zw3=f!!%GVRVyVbm~AV(y|J9Kq7)ed9Jy$S>iHS73~6>I3GC$>O; zHz=}>C{cJuwyYHerdXrV(A6Jgv=Oii6lBaV^n$L?kB#xUe4cULWQU^Zq$(2&R&^Pc zFePhe8=t7)u2AO)-wQL4OCnH=$k6ww4!3C(dL`(FN(-le$8Ft^rtKPhkT8Ua<>0>Z zk21uD=Jy-fjS%wEJ=heTo{}U7m34sir}@G9h*8yo-|-VFRi;EN=y@@`|8W( zjHhfVg(ayTi?p`5tB4*62tLwgf2SiM;mM-#iEz--7c&Bs)OH|CTo0tSa;C| z$m2ZZC97GF+NlzHjX?I|Uf(FJIu!TF6B!lMZ>%B>%x+?O=L#^vT@$~u=mxV$vd(=P z6()K_4og(JJSPAt1qwD*LiUqJ03&5i=vnqV6>HyDBdXB8=N^uAe%*J0V9V_ocq}$! z@+0lo{_gJbfzH7;-z$}-H{K%~%@4d&s6$`kOII_35sKiCOVsy1Akf*g3C?JTLvjPM z$`a*7tWntUG|sT(2ip*=Dq$_d=LmEo0iU)-@!j7b)#z6qi$P7a4|aOiRUj{`aWMrx z(x`j3w5G@=?2`00@6GSvAcPk+018VpoVZL|540iZ^(=@+O}O9`81gDiukcKq5V{&c zi93j4Q>3xOw8_Of#i85Gf^MS-I$=D153ojMN)n!4FC}OpOqerGlvwoC`|w7b2@~J_ zJzU4*JaNXQS3u5o0!q$us2lvQG{qO@WPgM4gr_}qe{($}#4@S=0YnJ{7Q*INXXLOG z<)-le+rR-6s-sL=VD-yoN7KrNxUfOZOOVI%VShmpJ>yr{f&~(OSvIp4CSM%QO3IEd z3pn2|lkXuUkqIX=W;Yl86DS0teTNVRSSj7Eqc?wj@S*H5`VmAJmRd5nw~K>Cj+bH( zokQ(cJE}ouKR{P?5MP@bu72K_`vEusI_`=au$yOpn_tj zL48Y)63T6JdOC-#t?=syeAiLP>-m5tG`)3qQVZQ#rt=IO-@h%vbv&_CA!+xv#-8N8 ziS00%uV5UDh9TxvbyB`koG&k*Bfm@ZJi!l?Gg z@{~jIs%b0SH-{o`--y)tC!&PtA>)7elnI@eG~$bs<$rqtzzQQMou8AEuXzau@j9QB zh>(nOBn6vZk5@b0*FHxMEhHY{cKWtL)l)i+Pf-RwRe$N)5MwP^V_#7wlE-RDqhn7d z=CT}JV*UFL@D@LnN>#rqr!U>kZHP21V67gDv5- zEI=!MXOmwOmzo+Q9~%np(6TnDku@@Bg-&=vfg9?bSNKEVL6sxqtvunmIZso82~gW1 z(T*nam08tQQZ{j23NetV^!<9|s_ku6*Du zwKeSfU{xq%WZ;9kQIxuqmj6W~1G%OVUv}g8J48)`yr|#7QM*O$0{`oiqVu% ztv%gQJ1gCN%`$vDAPE~j?a;0ADZKTmI^*f*XvShE`)f<(vywM>!z-rXyhSL?wv0`` z6vKg{677bu+*8mA?XSL}3lz4-)yEri*XvX^jf9Y!EzW z^7Z=-`tfU^HSZN6JG;9&tR@adC>yl7u+1S#>XuZrd}zy6#Qk6;wgKbqDX>T1$@-kf zPW5~fU54Of1VJIV`Rf9tR#V6$aw0$(kvre%Q{bkT?w zhbiy`E-EHgAO?(BRB+I_F1G-)pDQD$o-`HhSaA3x(2Jd`KTGszE7!f55$hRdx4yKQ zU8p$Q;)ew1C!3+-98h*3heYDFKf5PC8-@@QL9l6m2VZnM^uJj^JeJ9x%{?0Mb#4wV zEQ&$_4#VC{Rqy1=rTXNxW;*C-1-y=3{=$4d6Z9j$cul}<#jk`^uhJNcaCVa}>`ur9Lr2n=Rt9~Vf=713J4=A~auKSVg zicWdz@{VLhjc$_2o;t)1OWCq(=NSlZM4LiSX28081v%pw@1 zD907W=bbcTCs@FMk;!YqzD2&l+VnV0&keo)x)`i-!2>LJY$!1L@Wx&GHJqU094!ow z;gD%YI75<&e*t@*RS$}P>syZRfN$-&A5zI)eZ!3BjX3Ax=Y&rJ(cmc!MFmfvOl=R4 zp|SuU=&N+Lw4Eb*Y-OSn&n$ynHKXX`g+~}j%e*(6%oC z@uT~>h5HNgElxjXh2U1iUmJRx%#{eNQ8XD22&UB_#Ycikdcs zWh-U1f(e}Rqb58;N=ViI=jnKsxYtTn{5(Px6WIg1yNy000Yld46f8dCDj>fir&TJ8i@S)*)j2tLV=S{p1%PEskKVZV~fuF{FY7Q@6S|cpC`(@ zL$8CZVSG|O$h8LhA~^_C;fBb|z8keiI&INn$Q^Y@bGiqh;lz_xU3s-$xHInD`Qp3O(R4s4dn=_1 zN&)K&dVF>JPln4Kclj+9FT%yS7K^|b=Mr2=mjBnN{!@79^RVZ~6n9Ig`Mf^`bhgyb zboBY1$Dh)1jCS+{uD_sg2i3qlusiwz~HMKkCW7(beHQ}k$nrG037f_EzCgB}O4pFe8_BaKo0n$T7K;-4 zpV)!&pp7+JG%=K)+lRu}@F>Z?Rv*cM^+4(hsM0ZPxr2}JO+tskbc?=) zn#YJ^k){ke7=iWDT$pOnl`Az-_Uxcin;O_0+sB-W1}f%|~?Ef1?EqI{@U>PIvDt#X`LHx$p4+RWDNN zNi=?aFNkcp%O_vjL;@jB3Zu967Nc_qcX=xez`qlHz|F3;?77MbyBOlDwQ|82c zqJ63HpoBC8sd&uIZ$Q(I2X4}mn2gW4Cn)=Fu^upzIy@1-(VPLf+`r~n#ESfbjexvv zrWO4#+BX7*#G@t-qHY+yI(`R@2qYbk29A;;Kg);tgKIEE@ce@)X0Cz1o4`0H0OSwJ zQ4bYqs)Lk)NTcdcrBLhncaK`xIeBb=)R+GVQF6Q@4@7_R>Ih;;N<{Fb2}g4r(y-(y zD!^=@w1LJmR>%}A5#a6uCrDicGb7RBqH2Ks;~$0N9y0yt$xooZ^MFY*2`7rr5r~DN zfC4nPB%?dy2G5iF%HuDV=`BW9@+?;KP1OTNfV?cSmol|f0g1@bsYv~Z6(N6k0zs;; z)aK!+u&Ps%Zwd|9S`gng9beJ_gOIm)+sX9!D0h)KZIdT~*Vhm};fIo=z7xopalluk zyRq?_B{CW~b&o(p@x5-3ZU!AE2{8(Q!q}SL)2j87{Vj~t>u4H%h_mwJR~y3OP^*`6 z5)Tu>)abq3+@;A7S{mvB;!I|BE8G>#&2hG7ych^B=xXy(kUo@V-1 zVk{Dt@Y9CstS_M`oQ*nN3meY>UmkEa=?sDs>ng1JCWWr0Avr^Ac~LIm6v&u|9#S&0 zw8S%#=lg6q2w*;(XN0~+`_qH4ZjS{TB;U%BW%w(ni_8AZ`S7+?QW1VE~8&Bx&N<;6)cA+X?#w>P!%lX#Pyr$ zaWF2KPN>hZewlf)#ekm~gM#mI|LN~>ipbmGUeDCqDzTCo(`jnge;cjrTS4_KxM0VE z6DjuQ8*`&AJe-pU$S^hoU(RgEy}d!wilodoO@I) zCqPmz9aPP{_5_8YDl26#AHj$t3(^PL74lTIYv}Grm)Ly+2DK3rFLzNkMT{JFL@8nP z_#J;i;|`POLJ%7jTOr{JGuqq<1Mrs@Q=U2WVZXV23(F;+Te|GJS!M!jmWg8qzbh z*$wio7E&jMpL+JC$A1;3;F5^igd{qiB@l1%r zEBO{16X?yXtc7M|9=M}&aWN4nWmm+vz7_FUhKVHk@S{t_G(wLZn=Tt<(HcK4IIjVU zYyGby$tEm=a=Sd?0Dt?2h3xv74$z)GBx@IWTcVfVtWGSfWOojdzJmjdOJ{ z?`HE&TL*n@W4&N&QR}M=A)vs@$0|4#8ZY9*D}I<~kd@MfqQ9YFjgLsv>qJc^(K`GT zus!D!`OTER=t?PJd}sH){yF2AKKClh$d9=Oi;ul|oW}X-0MTM+hD?P?WJKkG@Q@LO z<9gEM_^w?dFvr0M_Q!lJFg2+vY+B+eo+YDXa4^^mknkGt%T)9k32LFHS&)G){LwpS zAM;P3VlvpmFCRWYL&u3v8x3`sBEafROQ>bj@kT@;P;cDLd8zR4^R3UiaStS`$_(=!W^lAHg#h4;H_7~DxNw{)dih)_iH24F2C;oF<6q%7!M z9dG7Gs3}<851PoYxf@!jTtOce;5X=+zRHtcJ3aFj#x2vb7+Vm|kTm1(@p9OsLM3#VF>G zjv!phQs=FN=<4rg@_Mnpq&GaE`uB6_98}NU;5Pw}rft&JZEqIQFESaB;!K?gRxho(OT_#ld=m%7g#~oA~-P z1kNrhBV@V{6l;Fn{n~1^xK|*SsGyWd*m*eFU9c6UYPOe#}_||Rx64)g9kO~E?Y^HfCQx<1i@P7muq=|x2qNyr0&7BY0 zjn7se#>@_MbuppnW2VK-{a$Lw+Ty&((z${`k)77Zml3cx(&IfyGBITmK=c(vr`MSa zVDS4P-*e$+?Oc0zMBwwK_g0Uxlb!D9BvL z^sc?qucNhFJDmq*fY=6^zLvrv6=oBnwf~Yt3?bxy_ z)X&WMX9T;&8hBS3lp5-Z*5_3MbZ+6PyJ8jfYc+%gViJp?#yyr&EW)_U5>~fdu3c2i znXq2_^orJ29&fF|@u@f#Hgm5smOP`c47wL4{A@YK4>?}~lI?HlBs=W2tt8!4=f2zm9z)5XO7L}a@drgSH+xAf=(MO{XI;rza1 z(+QjBoN{7gV~6}(I8CwG~4=7OL8hWH%eI>SW&DDtMK8w@%BVh8WCZqmyx zE+`Hi`^gddJZb6GV6dq7Hn%mniFWx=WC33qX18i&klX z+-lc{^(?@_sYCG-r+4RNdFpIR4E|HqS7CB#)BXfJG(#c{pM^*2l68V8-s+F4?q$$g z(NiR#7yVMA|J}~0v1_JYo4q6Lgwo*fxJ)fZ z=$KF2GNkCqbZ`}i>qEJ}^tXCN!(ITS=`VL4^b3i&O?oU9a=*oG(>NAG<7W`V1hta+ z#bC7Q>7`H1TOfR)aZ^($_m?q`LU=esoXL!ZI48Pk$M<5jqxmG7B(Q|!AL^L~IlgT2 z`CRnTaHh-QUSm~GKVGjDDzXc((e&;@7Tv*_2j&(<#FYzihIZ;x2}O$OGp(=7;V~#A zRo}z+Byu#BbeHT93I#ump}JU8TwbvoHVwy}QNpOt95-*L6Q74)wx{voDI0#`{nmC= zJ?(U!{VWXYv%^gndcP}f!=}$4%0Iq=c%(sMO5}D@$I}gc`qKp4-D2@>>u-5%-s@!& z*Hs@rym;63_Gmsx;w0WgEPns37|zmHSEVO)GscariVlJ9m3vCZ&8MENH%F70cSVc0)M zo*eDx`n`T`5Kq&Xogrm%&3Qccxsz7#?5E^IP6megvUuK5~hCYrbsIE7`5E ziuGLg!IH?NyP5r1(J_@vBnxp#Zf($1<7F3~S5F&_3LY^OGrAByQfQ{PtEd;fU7?wM znOz`qD&3hOhh$iw`({Z1iqHFYIUa*!YB0{jw7-Q{0^hIR#`-X?H7ZjQ=1+99A&tHF>Zk#9UeciZ-nrY$nT2ZSlW)Lgl_O6C)SL$J3FiS5EwQO0wLJm3MT#v+Y z{CY`fEskNFw8*bI^(Kq2<66eaMBk=yM;EzP>Z{UoB~^_HLpT^KLgZwV{b=@n19I8t zVUfBY!T7t5YjAHn^~3DRD-YamlF!DoCFvBcxcNetF>!G(2SrYW$m}uy>AaM3E7|^j z1YWzFNr%4*J9P*{N?LE*VygtJ5Kd0I*xf-;#ZUQVQT%9QOFGIM%sI{Ysrze%&lW!P z$(b@31=1&TU)si#b5ieV5~p)n1&rme+mPTDt$a1IA6EFf?;#=oVj15WJ*j z*v-vSl5Y04E$$LAD+@1b$a!!d)x4!jMhTT(TDZ6tWh-mJj|2oT^HG{(5e{Oc35~ zYJL(w+v9nbdC|y(BLfo6y1K+$varKsy=a|z{t5;@o)^jIG+wT8rzS#1hG%Ml4DT63 z70%SOX|66hYkqkctU_34x%?gw(qkJgrr|7<5PNLWhL08A3=MNqbqXR?^vd46^2a?y z_WSJMM!Nr=G@tb!C_lGYj+YYzi%4+q(s(F-LQ|f5U}(FLOeblgbxyhDtGu1ov&lYzun69&$=(!?f#pWq$K;fkZoOWfQgGS)fW zXkOtOFaO5tn(Q$Dq>SrzxyJ)Dxs$gBgi4BzgJa%Yh~=^=q$`A-wXa^eSSi_^Wm;x< zsXXy*^?KV=6b~E8Y|Hj(D*+)@LHFwYA-C`0rCQG&{3qx$^r=6|w;jHgH7>ODn(djF zw>v`Lp1bMBwe^(oh`&iDt7PiKJ?U-Bi=$%IGmaQ4uS1`+As^wo(*@qM^&CEes9VIV z#s_si;_B>dL72on(kb_pvRJpsN-oWo_hOXyu7MZl!Q^Q{NNSnOU!Ayr~VkMI;eWlLrI9jjS z))M)GTT!4udX=v$ru>y=MO4qfqL`DYVwl*@sD0tjlNW7jJe7@Og#}}*Hn#k;e9Ko@ zNR?^#^&VFpq&ALp;Vq`W2!Cc#aSjs6HD#Xk7XbE^UQDINPN?gjid7g*E4zIobj$5H zrLjrVk*El`o3ojFg?UvaauRB(CL`qp7if4Q3#STJcahK93 ziakzMjNj?KYGw6Od*?RFRNlEKQSs|qoECk3&D~Nv+#743BMc79KHT*jw52j3 zkyvJ0V_+N9h_X`1?ap4IYA-_oJD`(2%A@Cp{LcPs(stdURuhIz-Afrf=xvfT=?W(P z;32H%M?1{f8HVN>A%SZ*aLeJ7rjBUNr-9!h@O8*rh+=#v;?JWue>QQgcTj`6+)gOo zm1TUL%J8{q-SO}I(ImSnt2;w&rr7e2u<7|QZt`JD@?oj*rL^BV)%(8pqdltd0H40O z#ujVaQheY?d)k7N?wRgvi;Ft^5$6GC)>&Mn_BjPCF3;dc8#jE2W6IlhXIq?}y?D>~ ztw4*Vbl@u~Q+6n`8=gvWf|Yh)HI2Fq-RI3Dt?rNS%i{#RC&;+v-OBN;I?9e1ueAC3DaG6++6d3ceC=1S2H~y2H6L zd33Yah&gct$^s<6cG?wM{(zzLtL)4qCF>VswxW_YgIsH$#lz`JQ!jk5P~_@xH;!ll z;=@|aOy18w-y556(L0IltG|LRyuHM++$1~3=F_aEKan!qvRc4ErJHn`@FY!V)V^Vd z&iU~vZ9w1hVM$xsC$%)HfFpjQ{Gu1-ZBh4!vo$5BktpCFP?iiUQt3BVxkj%q$zJ^B z6c+fFs}p(5V9%4Fv+QPCNWfL#>pGS`_6giLs<`u%BdYe7+Aux--QK&0JGt7t7}fY6 ziFM<1`Uh-R-5)VG$u+wPA_y=XOTpT*m+0VHG41{}xy3&5DT%is-;wR4j`p5bMqwc!y zos&F5p4lFOFA?YJ^rzxg1CIQ!8Rs0n&VB#LJ_(YST@G|~S-&7nwB+Wz9~`{AZ2jdX^1C5r#N8b@)@1)gooO9uOsW0XFFNd#*r zjjP6k5l%*@8rx=<4{~&gE5I}V=T!$3y7w*Ha)Q^!l#f_qW@Eo^{@_T0iQYI*>41(K zj3CSHI_4j95%>IzZ++9*s(BT;<*FH-Q!+g5a;xaOL;APVs*KG?{_93=mz{EpPP%_$ zv<)h@jDO_zYhY1tnyw-$hcp)7yy3N2`&W`%r^6FEUy~TkHgxacisEQ&TN2*c7}pi` z8BDynZI-eeN%bP-KR3ZIPUX{_KF}@gx@-BUl3aE$XnlEmunYONy2YjumA<6ysL)BX zXzj052nYH=E>?B^n>=6)XY#81w4b-*8Blq~fHheY=koP6F}Y?{=-@dm)={rR=B9u{ror@`yU%3bp_`zeSRJ#zL=mH5j>ZNCBRG{ zcER#Z{*cTho98(i@mnB6c{~lVdgB+qF};}K4&GKHpfCR|N!BAa7hY}Xnx}xL{Xoq7 zp`k5ETHH8aGyb?ow_S9v3pQcStN=+a(b*o;R+kgc->3Snc1=xzOUwTt`>iwYsPK+ZQrI_Z@7$j%QNVc7M^}RPXwSu9o|k|Lf_F zG2L!V3(#Kasl4&G_^VME<2U)D8uzcWEq6C4@H+p{x#b1IOt9y9VH1@Fpc{gK92YyX zyf=8O5E`e9inu3`b~3&cuKUb57|yO7gx=AvEH)bhWaYXcmOcE$Xd>Fn$3bXiz5cv@7TKOME#pO z{M9dkFgD587Nulzvw-#yfeR0GlE>e7bB=8@V40I#-l5 zAh{MW8(VB(JTLe=I#l~0FzWlmKvdfK9=mhf_4owjj6tlkN+VWL=t?d703(RL+9-Xf z*hj6b@b6O7p=m%IpI^KjOuSZxk>t053U*;mhOc`O=vbl%z{R*ff}Qx;pAalt_+c0Q zC=Pep#dvYlGFsHB^0+-t;B#&g-fRn-_SICSiLzX7Z-&&lhu>ezhLL_ff2nILz-k{L z+Z6SCP5n-I=T&zA&vmYQjxjscJ@)>)tb=I(oGRsfs*$GkSYy~I>DMe3pWy&@>YvJ; zm5J%HdA%m5FM~`v-x1$=8f)O70G)8ly#@|2jHE z8cP!>hshc@eF`3>F}4EpR__>F7JOoBICR|nj+%JAO&|RnbWKoFEu!eDx-)_={wC0b zIuYq$Btpi9^Da*6E^MYw7e&Vuh{d0-8(onlmfDhA3nnrg!u!JVl9Amp2FGe&dQUMM zqkElie{AY}aA@W8R*Rd7Lcw)@<|Y}kd_zfAHiNantYRRpS?5()wX6ZCRVn-{R-pO;P4=={G8Y#b+S%W1}po937;4jnBxGq*DG<1+$%p zbL76Ux9W$##4<5AjIksAL6@Ly3H<-iHW4XY|DGi>ze0&#Hex~l10Y+H&>Y$92|7$S zU|y~VA@Pcp?6YYXr%QA5((45c>%c&*~qyjsi#+CgdFzV>&b#XFv zXche+O(D(YMp!ZZ%gz07oVW~DX8(C8O>PboNwhz}ip$U?zXx6U6q-)+37782PaA9t zornIjj- zcl79A0d3mKy-ep;8UlF4_~}l3-JZ?rKw?5i=HK7>-sX_|WKqYmG&Xr4vRivtW6#wV za`k1ak;^Z|ML*oDe8@JR)=tS&MV#Y^(`0{%ALvbU$qyDGmuAOCE3dGb?p)-%;QC8Q zkJ?qBf+-;OE^vmYjOm3pnOa~r&;=HS$s(3_%0iq z?>ea8lAq`X3FAjs*c$%ch$6givUiiudNg9UYVPuJ~w z7tYf3;r=h-)vkDG&eyZ<_~;IHx2ZnlLpwIEuPhR(3`b4hF@^^469O%VQ|xJzLe-$77`JAbCSZ%l%z( z{RZV~52Irv*+~EU3mKqCpQ%U&-*|ZCb#-=`yPNnAm)yQ>`qHnS~Oso#Q24>9aK%DCQx;rh(*9-dfV!~!%cx04Gu zL}44??&ufa(p=~%v(>-k^R6zI$veD1=j&g7LnUO7L;h;>U7_t4EV|lt`+4?%ORLz} z9xwPvn25>pyof9$^$);b3N_^N9*^yS_~sUh1q4sAx0IlW7_U3+s4Ee>f#)J79-s1a zg-W#aW?sp=y6%Vf9WS70OR=NB7kZs`N!hxl99fN?2Z@a|YTllY2 zSXhs9UOR;yYnoO|#GNk!28`5>?{NFR2r_{5-i<)?j5`p4CCDWLH=l0r2CU36A$!jTZa;QDbhl07)X5f)ij@4iG zp;|M#g1n<}3DS-HH0F3WM`o<;(%O7mMuVkZ1^i-tX zXeV~V5|wZJgSQ9Awxc?lXb8-)kI(-2*o%xjrD$+rn>BgEtlNoa?{hMCwhfDPw`Hrz zi}~K=L!@5@^dp2cPxgGCU6#L_;*U>Drr@UOVN1kLl6(6FiKB!fy?f2qn2b)&i?0f= z1vBruo-)gP<`>NS`-$=Fi=(}8gkkgq-+qF_dST1)mF(f`{8%o%5!wVkT_3}ER(&Y6 z@Rol!Q5!RyADcbM7Q0LNBD_|hDPd(;zs4!kieZFK)Vej>FgCI-$ILj^gX7|}qRr2^ zg%zXlE3bcx7jg>BTeDt*ANncx>O@W!degnut4b!@`E{K;_#RhYnHI^xdLmAh4 z!*?8UHmw`tf`kGH7o6W-JWb_B-su4jKHF&U$BMDp9+M8XkAwG;!d*`6I(A&FZ|uz7 zeq=r81KP=0#%)W-snRbRRJ!(TaSHG@93D;=h3-V`$TrgU(OmytZMpkgpDcvz&(paU zsoW1$gPGhmjp!|-dY%3GlHh={?X4kLHa!WjP8Y3Z^2`vfvo6Nju@ZZ-gZw)@yS;?E zDG%-z>3G}q`(&aKyt9tFG)t=NVwfG7b4sfoxT(9WCz{r;8E^@i#b_G-evSGc54-jd zPF5V{O{4~#Q<|kYEbFSs$!yk`6WrtuHP*eJi}}C$y7G7^*SAjzStcS`M%Jt$vPBHC zXLlqdN=$Z|v1iM^lgYj$Te9acmPEFaJ+d5Y_FeX&!FxYW=ktEv_xJ1cm!9XDdG2lQ z`?|j0?{!`D8N5=j4Lzg`#Mi?PmtA?q98yZ0_47W3(WW_C_A^{IBKN&Va}DF$-yScx zG{g|R1h-BoKYL3;bwQHFZiJ^cHmdG(!b>$0X=Qhiq2@^VXOMtJRQIc&Ben*|CHf1! z1+{DR_d;0;2SWR1$Sfy<1<0+58I*gvrs#t~@kLkGoY+c<@Au0r@Ip*(Wo-VbD^oon zf!mZmR)7Zae5;VHu+-(pOd)UOexX)6-tiR`3Uci*U zMH4GO=vTA*4CRs-R?Uf$t92OvU>tuX55;NR2Ae(4v{S-o%Ja_B@@rOB2#3dOW)8FA zcVPIIbmm=_(s0y)AtCJP)9r6pTAi%l*6DF0=+q#G?=w!^=yYcW)XjN;=pKu!6Od}jCm z@&Jh$0_p{;1dyJqSyjTd7^;7E1jB$Ls2OgG&-UvknJ>#Ysy6^jC{o&RCj`dtNUQLM z-9fqLVP{GGp7G2eJ-O~rj%*w~%!Df)+Vx|e)d4E0t#xUTaJ#NBc--`pY0-}!KcnP zov$P2pkUCq(&3+Rl)_bz+rN{7%%oC%rK%zd#C-m}lW(;L-pEUa~!eiHQ$9+hhLBj%S&u zvdk=4#*WtX{$RV{a&8*7H1Lv`8E=N4zAWoGAAF;pp`N{(nX?NNXfvl*?hL&nwS?iw zmVwB5-wRr2niT7s9>Tvoeyz1RSkJRg?>!EdOeZmuow*OLnI%Qr_ppB+(0aQo!@}V` z0uMIk_|!j5iq3Yd(a)5=($^%iBY~mp&Wq-AR^Oy4^Dp?BefHM3KXWYRCqA$nBYfX zzE##8DO13m-Nq*sPil^qbK!(j5htTJ3DrX|rmO&&uo!Q`?xjxRt(JDSuAsFV`yN19>McB^qfsnJ|ZZRYdoR-*Gt@D~$_z;9-5-4tE`t2(speuFdBhSZ-w z{VzOg%tU2ttpX?IsDAa`@&wPleY!rW>CJ_=;Xl15e`ds7J{Sw7U4e&NY;AK0r8`Sl z9amFx?8V-Om3Qkd&RK?VE=+Zd=^kl+9kZ+GTJ@UP&RFKiq~frSrL!KS?q$R3GDAzZuN|VO_Xg-Rr3qbP; ztV6dT1z%Zzs7LoQ6i} zz@Hw)N4E^$z{(XLbs}Z;>$-K@FfL`gv&GSVS`jSUH4@tSJn`>6;|#4dM@4oJFCp;x zi(r5``*G(`jTA0^%du8d*(iUDy9aZA`V)TdO49tuyLb3SV&Z#P-K1QZ;QC>}0`b+}f1Fmv2CtbO-(!??(MC!>iq@Us*seL@<@cT^K^acjxN;Al9MTkr_&6&* z09OkFX_DKV(^?p{5P<-;MXu+gWsS91y}`9cBd@19^2RwvDsuUxklr|V zj0F5_-9Jvloj8VwO{1kGH1gkuG7*lA60PKAz+UFQMF(?TpFYP(G7Pg*wF3iQ&V2u? zGPl#0{xH}fHNSqRH(U3ubdC&go~FuDCKufhzh9_+Io!@@1P!(j;T2^Jv9qnT9t)dik*+EXzkL6vY{!xO9blVl7dK)!j=XI(QCIeI`jdxq zvU?sl=jT3!Ee-58Q2dSQGI`MY(j}tfx~Xr2LHr60;-kmJlWacl$c~l@aBjDVF5}ID zS@hTXSr3?88qWpP&!^ZLOVp!cZ%GZIhy+1SS;WO6!H{8Ach!hKWKQw*1FFWm<~Q`1 zADj|lOudxEqnc}!`RKIAq&ihDAFij6CgPmg+2c2SjeaqBekHgYtim^AEDun$qAfAm zuZCif9lr=#|0G-mi|37S47iCAZG~m znFbxl{+tmEjfYX*Tiuv$1#E>Km#C#s%YTz&I2EGyBpgjF*#QKy+odl*(;JDBHIzT zxNXI$NRW6erwyf$k`qNCX+wJ$Lr_J?_EqX6g2gm6Be2ac)0 zS~wa=y;U$eukzS-j8NtED%Anl2;0i)xJ^_;Ou1_EdWDF_QqIf+-IthyWZpq)!vRyn z%sg<^dfmBHETgQJEdWRD%+amo2a}=16)x>w}^>- zWnihl1PcpjTno@F-*P+4)EQtqyj9~Qdnr^xb(ry&GH2-Ua8SMlSMxH*PS zFE9LknI%0e4luOC;b&ofW6`-jwXBZq5CXSd)8pM;ovvX~)-S^>KLw)1_Bb$O(Sh?I z8^>n0@_c54*h`U|uFsbq%X2Afvt=vzQZ*8rlP9?@yRPqwpE2p1^DDB8SRcH2-C@&u zJ3bSi(=n}HMM$b+g<-Z-HPXwoC*HJ3T{Mh$9m19K82%y+B2GyIfPoK-`CNNp2w}zh zrEXDRV8@4&+0O&kVa?+XTo2MWx+R<<)}Ruw7z9z(0E{Ez`oL}iH3TD2P=MwC6KB z0E$=J(N7ok+7t0`d$|I1yb!X0j^e|GKQ^!RBXi{WBKrU)u_Xu|C4d42MKGwmuiC=0WN{` zYGXhSLrj-o&FqVW@rT1cwS|NuvD8L!3?uCU3RtRFz(FYZIZR+De+huPapKZw18Rtk zr~`$;Mo-!2*6?DJ=92IymsEmCb+|B;mWoC-=Y04h>kr?w#C)?bc8PR>&|CEp%QBvm z)5f2&cl?08tBoxdoT&XJUkQII2$2s*D=u3|<{BErpuf$o^B~~ThPiL3yB1QJKRQGQ zP8_YfjRM(J&&3l^K1Ott?#PjzkfIBOVpa^n?@mtVC^m|W5VlaPa$e`UHRU@ z`&jNESL#=Q9xhlJICz`R3Iq~W0Zb7|Oy1TFK=EUO(}4A2_mSfCUI>N zt3Z%ApKi!<@L6RQAXXzf%ip*IR%Z!S;2!Lbv$|O~iQGGsO!Kt*3v#0MhgL{Wci~jN z1;}Lq{EBK_r%1;z@W{v3L2jMBhU1k6z3jdt4t9chb8QA4IzSlr4*@ws!)(egIfAB= z4n8^j-tN9cd*-)02**fpmIzzVXaEEHRE|)P6o$~JE<8TwI;ikmT8PrcOA2vmW&%C| z=t$CC8iStK0sf(QH0Pt+prMiFrJq74TS8F8`h`T7B0_(F{(k}GN+v#3)V5r_GYqMP zj{yo?%9iW;np0>t?0p{_cj>HAmMqbmiUHH*>*;FCqAqpav5#0Sq^m9Gyg@?uRp(F4 zNJBGk7sRw9pi6wzcc26p;_5PnegwlJYw#joGlKVu zBN!s%_OdT0y;II#2BBY7Kt|2dEFcx_*C>D;dvrl&M`uzkw|t}Kxipr5fTBZ9QT`8c zp)!D_<8!XCwK36esXn+(!c5Vq@DNS(CUrn}na?F5PG#VGp0ZyqBvDv@^QPJ!WZI?} zs0}(<1G3)5A#QX;?O)`A)r}wuydzr~F_%elyNpPG(1*wZXfMh#i-7(IY?l8}&wP)j zdvvKgeLvVAcP5r(6i$EgbfPQ=w@)=dbX$gdJjd4}?vJ7Dw~=X>ZxdI2is*X6%E7q; zdO0CEbp}WE8S?ZHJ6RV-K&07#Hg`^d?|`mKyYof4aLL+qt5sm1O*6BfN)pf709xeF z%%fFg{%oyR=Vk5(XX-X&QQ9QOIk$?qN*In`6 z(f!%w=+pmU#dKp!>Qn7wVBppr^~Lx+uF0bcA%MhE{%n@#^US5cIng58mNfcIO=tdA zWe7B}&eh^>oGbpm?;3^(xiivvY12YX(k<+lu!J$E$T9n^?@g%`)0v(=l&CSq^ryZb zp4r6qR=)^(1G3{?x4dUV-#0?*@01#o_nfKW7G z9!vmgsKw#_l}(FVv!)ARc+sHpFVpG-X;-a#FYy@fF`PiXyFhRo89f|jb`jBbd&2V9 z&gPDu-k|kuv-sw69ND8*4@6e|Kh{JLcOPbYuK9J)T_rXLNq*Wi5nJi+Af0dZxIrfv zH#_lhwEo8ZrjLQm)*zywTULtKx6ZzWl733#DQdk7m~XZj-g`zB1A2EnkYB;TZ7@4d zWnYK;Ls>ngr+fL+5*+%>XY#6&H38VEQ7S&>bbA}9?|s}k<7dA>^z%lnV*c|7{xPZZ z$VLK^tDcpB*LP`>Pd?p0G-H(W`~Do6RER+K zH7B|68u_x>j>E#TwngQ^&bDjcuBsPrf-EmHr@HnYvtDWCwdoxgOx5ll_*0r6b6XA= zd%x`crz!f!yqm}dT*WgriJv*Ra>TErq{}8vc)G`D6WM>L%O$gE(EurTM6H>fX!r@3 z2SFKv4(WV)d1_gZG@z`(B`b(-i-T`W%VS=^(Iet}sJAB1s5c$1X2amvizi>R`Y9b7F<=9LI1ro5W&P&grp-K|oCtg99?!GZ?D@Y^2W#0k9 zmF*<0abPJ-$SdTZ#|Ow}>1#v*O0BQh=$qs-2XEqSkn25iiMHDqP3AoJ^zQh03KUg(h3GpMMFb-`=O^4ZcEL zpDJ!|JTn(^G<>F)7Ih@NWc z_-PyhLt8RA{PI;u*jatxq<6Tr{+2tACe!#$@&l2v`93JN~3p8TRF40kCNrM;x4@{_2$DIQ(P1(QY<~`Vu$e0hsHb{w-%pS zA#$SnDUoWVEh;KflINH1lPtixv9i%sWFDy+D#}-1>whe`v=e?p|Hl^uQCjUH?MmJ` zQ!*8Fc%PMgOq1{;T@DMHx6q_$O+q&x*Hh+0#}XnLvrz#hmTUA{HpS)iu{QfWK6aN+Vm*Ojxil4AUFS;c>sqXMC=`P$XR`d; zZW;eP{$4K>vCyiBR0gY4lF`RN#x%&=xQy@NA>#!Yv21_7b|yDD`g#RNkW)r)a*IE8 znkCx3*)h#>M9UgSc|ek{R&}GpQQ3 z=m3;hq|Iz7>e?c;J|7?+VJDfonjoY?1@0ip%e72%;=F9>LZ(8lPvgwGEiKI$9+!SA zyVsGZ_naL`raU1De2>2n6{$)twGS+e!^%Ni&mF=+3%SX6(Wb{V@1!oTGOpN6U+Z3G z<_JCL39U14mVE8Y*d_i3Q|qP$3{#Qh)r(;WZm;tanFxT7CGjiG{7hIjQ>S^WCbJI049`H_KY@A?h+}W0OWL?b68oA zHRSK?w}E9BsieMd5afre;N!fcEhmSkk6rxaeC!j-%NyiJMRi6&j+B&;jg=_y|NKV| zK^=RxBy*0d;P(%CKKFw=$?$?4WB>2N|NBk7+C;N{E65r(oBw#jKM(n5u(Fce*Ctbc zME0M}`1Q~Z4T$bLG;(-h{NLJo`p_$G($9oL@y}l*{r3(*4;?AW!K!@{Qgi6e{O+N} z^JmDT`4#|yY5{DGn>M1J51?1-@SYjY1BywNPaa?gNCEjPqz`?@d7yqTvY)YucPama zGoC8xFVZpJlOr{PH@?Y4i4Nc`uh@w6ekKOAFt!sWeyw4~RCk|;I=QSs@RQay`}nZ* zq*hRvwiKEMb55c%Z9vZIJ%o>OnA|0ecK{b(odXwzT`Bq=jXDTCzO4p^=z1%)>XI2O zz(#hZsVqWD2x`OcX0h=J=@ZK8z90Slzw0mfBq)V*bL#2$KBeDI5qK%%nWv3W!I>Gq zA33TsHDCr3$zue6KaZsTeN?e$9YpavfBv@3epH{8z%<==N9+D}vgJr4gTdO%{+^%z z`{fxZCkPrn*OVRcFP%3R1q+mk5+Nh-FXQ+W{1yZ+UooXU^ZRegW@9B8dxBfC{+CgV zjR1|V`LX!=Uph}u31&mNSzDVNS}6a0ZvF3rMt20MT>h8NgQ>@}l&i_;bsC5;5P*Mb MN?M8~3g*xM2Mw4c%m4rY literal 0 HcmV?d00001 diff --git a/doc/design/ops/rnn.md b/doc/design/ops/rnn.md new file mode 100644 index 0000000000..a78eea7d45 --- /dev/null +++ b/doc/design/ops/rnn.md @@ -0,0 +1,153 @@ +# RNNOp design + +This document is about an RNN operator which requires that instances in a mini-batch have the same length. We will have a more flexible RNN operator. + +## RNN Algorithm Implementation + +

+ +

+ +The above diagram shows an RNN unrolled into a full network. + +There are several important concepts: + +- *step-net*: the sub-graph to run at each step, +- *memory*, $h_t$, the state of the current step, +- *ex-memory*, $h_{t-1}$, the state of the previous step, +- *initial memory value*, the ex-memory of the first step. + +### Step-scope + +There could be local variables defined in step-nets. PaddlePaddle runtime realizes these variables in *step-scopes* -- scopes created for each step. + +

+
+Figure 2 the RNN's data flow +

+ +Please be aware that all steps run the same step-net. Each step + +1. creates the step-scope, +2. realizes local variables, including step-outputs, in the step-scope, and +3. runs the step-net, which could use these variables. + +The RNN operator will compose its output from step outputs in step scopes. + +### Memory and Ex-memory + +Let's give more details about memory and ex-memory via a simply example: + +$$ +h_t = U h_{t-1} + W x_t +$$, + +where $h_t$ and $h_{t-1}$ are the memory and ex-memory of step $t$'s respectively. + +In the implementation, we can make an ex-memory variable either "refers to" the memory variable of the previous step, +or copy the value of the previous memory value to the current ex-memory variable. + +### Usage in Python + +For more information on Block, please refer to the [design doc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md). + +We can define an RNN's step-net using Block: + +```python +import paddle as pd + +X = some_op() # x is some operator's output, and is a LoDTensor +a = some_op() + +# declare parameters +W = pd.Variable(shape=[20, 30]) +U = pd.Variable(shape=[20, 30]) + +rnn = pd.create_rnn_op(output_num=1) +with rnn.stepnet(): + x = rnn.add_input(X) + # declare a memory (rnn's step) + h = rnn.add_memory(init=a) + # h.pre_state() means previous memory of rnn + new_state = pd.add_two( pd.matmul(W, x) + pd.matmul(U, h.pre_state())) + # update current memory + h.update(new_state) + # indicate that h variables in all step scopes should be merged + rnn.add_outputs(h) + +out = rnn() +``` + +Python API functions in above example: + +- `rnn.add_input` indicates the parameter is a variable that will be segmented into step-inputs. +- `rnn.add_memory` creates a variable used as the memory. +- `rnn.add_outputs` mark the variables that will be concatenated across steps into the RNN output. + +### Nested RNN and LoDTensor + +An RNN whose step-net includes other RNN operators is known as an *nested RNN*. + +For example, we could have a 2-level RNN, where the top level corresponds to paragraphs, and the lower level corresponds to sentences. + +The following figure illustrates the feeding of text into the lower level, one sentence each step, and the feeding of step outputs to the top level. The final top level output is about the whole text. + +

+ +

+ +```python +import paddle as pd + +W = pd.Variable(shape=[20, 30]) +U = pd.Variable(shape=[20, 30]) + +W0 = pd.Variable(shape=[20, 30]) +U0 = pd.Variable(shape=[20, 30]) + +# a is output of some op +a = some_op() + +# chapter_data is a set of 128-dim word vectors +# the first level of LoD is sentence +# the second level of LoD is chapter +chapter_data = pd.Variable(shape=[None, 128], type=pd.lod_tensor, level=2) + +def lower_level_rnn(paragraph): + ''' + x: the input + ''' + rnn = pd.create_rnn_op(output_num=1) + with rnn.stepnet(): + sentence = rnn.add_input(paragraph, level=0) + h = rnn.add_memory(shape=[20, 30]) + h.update( + pd.matmul(W, sentence) + pd.matmul(U, h.pre_state())) + # get the last state as sentence's info + rnn.add_outputs(h) + return rnn + +top_level_rnn = pd.create_rnn_op(output_num=1) +with top_level_rnn.stepnet(): + paragraph_data = rnn.add_input(chapter_data, level=1) + low_rnn = lower_level_rnn(paragraph_data) + paragraph_out = low_rnn() + + h = rnn.add_memory(init=a) + h.update( + pd.matmul(W0, paragraph_data) + pd.matmul(U0, h.pre_state())) + top_level_rnn.add_outputs(h) + +# just output the last step +chapter_out = top_level_rnn(output_all_steps=False) +``` + +in above example, the construction of the `top_level_rnn` calls `lower_level_rnn`. The input is a LoD Tensor. The top level RNN segments input text data into paragraphs, and the lower level RNN segments each paragraph into sentences. + +By default, the `RNNOp` will concatenate the outputs from all the time steps, +if the `output_all_steps` set to False, it will only output the final time step. + + +

+ +

From 2d6233646a753487d4ecf8a570683803c01c1d10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Thu, 14 Sep 2017 09:42:12 +0800 Subject: [PATCH 079/115] Accuracy op (#3907) * init add * add topk op * someupdate * fix style check * add test py file * update top k cuda kernel * follow comments * remove debug print * accuracy_op * fix casting error * fix casting error * fix casting error * fix rename bug... * make it smaller * update cast --- paddle/operators/accuracy_op.cc | 66 ++++++++++++++++ paddle/operators/accuracy_op.cu | 69 +++++++++++++++++ paddle/operators/accuracy_op.h | 77 +++++++++++++++++++ paddle/pybind/pybind.cc | 1 + .../v2/framework/tests/test_accuracy_op.py | 25 ++++++ 5 files changed, 238 insertions(+) create mode 100644 paddle/operators/accuracy_op.cc create mode 100644 paddle/operators/accuracy_op.cu create mode 100644 paddle/operators/accuracy_op.h create mode 100644 python/paddle/v2/framework/tests/test_accuracy_op.py diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc new file mode 100644 index 0000000000..9ca04d4028 --- /dev/null +++ b/paddle/operators/accuracy_op.cc @@ -0,0 +1,66 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/accuracy_op.h" + +namespace paddle { +namespace operators { + +class AccuracyOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Inference"), + "Input of Inference must be initialized."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"), + "Input of Inference must be initialized."); + auto *inference = ctx.Input("Inference"); + auto *label = ctx.Input("Label"); + + PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label must be a vector"); + PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0], + "inference size must be the same as label size"); + + ctx.Output("Accuracy")->Resize({1}); + } +}; + +class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AccuracyOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + // TODO(typhoonzero): support both inference value and indices. + AddInput("Inference", "topk(indices) the network output"); + AddInput("Label", "Label of the training data"); + // TODO(typhoonzero): AddInput("Weight", ... + AddOutput("Accuracy", "The accuracy of current batch"); + + AddComment( + R"DOC(Accuracy. It will print accuracy rate for classification. +The accuracy is: +.. math:: +accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(accuracy, ops::AccuracyOp, ops::AccuracyOpMaker); +REGISTER_OP_CPU_KERNEL(accuracy, + ops::AccuracyKernel); diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu new file mode 100644 index 0000000000..4e6d1ef965 --- /dev/null +++ b/paddle/operators/accuracy_op.cu @@ -0,0 +1,69 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/accuracy_op.h" + +namespace paddle { +namespace operators { + +__global__ void AccuracySingleKernel(const int N, const int D, const int top_k, + const int* Xdata, const int* labelData, + float* accuracy) { + int correct = 0; + for (int row = 0; row < N; row++) { + const int label = labelData[row]; + for (int col = 0; col < D; col++) { + const int pred = Xdata[row * D + col]; + if (pred == label) { + ++correct; + break; + } + } + } + *accuracy = static_cast(correct) / static_cast(N); +} + +template +class AccuracyOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use GPUPlace."); + auto* inference = ctx.Input("Inference"); + auto* label = ctx.Input("Label"); + auto* accuracy = ctx.Output("Accuracy"); + // FIXME(typhoonzero): only support indices currently + // if add support for output values, how to detect the data type? + const int* inference_data = inference->data(); + const int* label_data = label->data(); + float* accuracy_data = accuracy->mutable_data(ctx.GetPlace()); + + size_t num_samples = inference->dims()[0]; + size_t infer_width = inference->dims()[1]; + cudaMemset((void**)&accuracy_data, 0, sizeof(float)); + + if (num_samples == 0) { + return; + } + + AccuracySingleKernel<<<1, 1>>>(num_samples, infer_width, 1, inference_data, + label_data, accuracy_data); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_GPU_KERNEL(accuracy, + paddle::operators::AccuracyOpCUDAKernel); diff --git a/paddle/operators/accuracy_op.h b/paddle/operators/accuracy_op.h new file mode 100644 index 0000000000..fe704efe1c --- /dev/null +++ b/paddle/operators/accuracy_op.h @@ -0,0 +1,77 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +using EigenMatrix = framework::EigenMatrix; + +template +using EigenVector = framework::EigenVector; + +template +using EigenScalar = framework::EigenScalar; + +template +class AccuracyKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* inference = ctx.Input("Inference"); + auto* label = ctx.Input("Label"); + auto* accuracy = ctx.Output("Accuracy"); + + float* accuracy_data = accuracy->mutable_data(ctx.GetPlace()); + + const T* inference_data = inference->data(); + const T* label_data = label->data(); + + size_t num_samples = inference->dims()[0]; + size_t class_dim = inference->dims()[1]; + *accuracy_data = 0.0f; + + if (num_samples == 0) { + return; + } + + int num_correct = 0; + // assume inference is already the topk of the output + for (size_t i = 0; i < num_samples; ++i) { + PADDLE_ENFORCE_GE(label_data[i], 0, "label must >= 0"); + for (size_t j = 0; j < class_dim; ++j) { + if (inference_data[i * class_dim + j] == label_data[i]) { + ++num_correct; + break; + } + } + } + + // FIXME(typhoonzero): we don't accumulate the accuracy for now. + *accuracy_data = + static_cast(num_correct) / static_cast(num_samples); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index fe1e50927a..b0979767f8 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -52,6 +52,7 @@ USE_OP(cos_sim); USE_CPU_ONLY_OP(gather); USE_OP(pad); USE_CPU_ONLY_OP(scatter); +USE_OP(accuracy); USE_CPU_ONLY_OP(concat); USE_OP(top_k); USE_OP(squared_l2_distance); diff --git a/python/paddle/v2/framework/tests/test_accuracy_op.py b/python/paddle/v2/framework/tests/test_accuracy_op.py new file mode 100644 index 0000000000..43d60eb90d --- /dev/null +++ b/python/paddle/v2/framework/tests/test_accuracy_op.py @@ -0,0 +1,25 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestAccuracyOp(OpTest): + def setUp(self): + self.op_type = "accuracy" + infer = np.random.randint(0, 2, (32, 1)).astype("int") + label = np.random.randint(0, 2, (32, )).astype("int") + self.inputs = {'Inference': infer, "Label": label} + num_correct = 0 + for rowid in xrange(32): + for ele in infer[rowid]: + if ele == label[rowid]: + num_correct += 1 + break + self.outputs = {'Accuracy': [num_correct / 32.0]} + + def test_check_output(self): + self.check_output() + + +if __name__ == '__main__': + unittest.main() From e0ca4d7a29533a8ee7a4dc7af4c9623187539707 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Thu, 14 Sep 2017 09:45:10 +0800 Subject: [PATCH 080/115] fix shape of output tensor of cross_entropy_op. --- paddle/operators/cross_entropy_op.cc | 2 +- .../v2/framework/tests/test_cross_entropy_op.py | 16 +++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index ab1e1c101a..337ec41e50 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -29,7 +29,7 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2."); PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1."); PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]); - ctx.Output("Y")->Resize({X->dims()[0]}); + ctx.Output("Y")->Resize({X->dims()[0], 1}); } }; diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index c2fc102a8b..253e7b8a24 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -8,20 +8,22 @@ class TestCrossEntropy(OpTest): self.op_type = "onehot_cross_entropy" batch_size = 30 class_num = 10 + X = numpy.random.uniform(0.1, 1.0, [batch_size, class_num]).astype("float32") - label = (class_num / 2) * numpy.ones(batch_size).astype("int32") - self.inputs = {'X': X, 'label': label} - Y = [] - for i in range(0, batch_size): - Y.append(-numpy.log(X[i][label[i]])) - self.outputs = {'Y': numpy.array(Y).astype("float32")} + labels = numpy.random.randint(0, class_num, batch_size, dtype="int32") + + cross_entropy = numpy.asmatrix( + [[-numpy.log(X[i][labels[i]])] for i in range(X.shape[0])], + dtype="float32") + self.inputs = {"X": X, "label": labels} + self.outputs = {"Y": cross_entropy} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Y') + self.check_grad(["X"], "Y") if __name__ == "__main__": From 0b21b854ecded15161d5281f1d44eb0867bfac92 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 14 Sep 2017 03:07:48 +0000 Subject: [PATCH 081/115] Make the weights of FCOp a fixed 2-D matrix and refine some comments in FCOp. --- paddle/operators/fc_op.cc | 70 ++++++++++++------- .../paddle/v2/framework/tests/test_fc_op.py | 14 ++-- 2 files changed, 49 insertions(+), 35 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 3e6cd8f76a..be0fca3c71 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -41,21 +41,16 @@ class FCOp : public NetOp { "The size of inputs X(%d) should be no less than 1.", n); auto x_num_col_dims = Attr>("xNumColDims"); - auto w_num_col_dims = Attr>("wNumColDims"); PADDLE_ENFORCE_EQ(x_num_col_dims.size(), n, "The size of attribute xNumColDims(%d) should be the " "same as that of inputs X(%d).", x_num_col_dims.size(), n); - PADDLE_ENFORCE_EQ(w_num_col_dims.size(), n, - "The size of attribute wNumColDims(%d) should be the " - "same as that of inputs X(%d).", - w_num_col_dims.size(), n) // mul_out[i] = X[i] * W[i] for (size_t i = 0; i < n; i++) { framework::AttributeMap mul_attr; mul_attr["x_num_col_dims"] = static_cast(x_num_col_dims[i]); - mul_attr["y_num_col_dims"] = static_cast(w_num_col_dims[i]); + mul_attr["y_num_col_dims"] = static_cast(1); AppendOp( framework::OpRegistry::CreateOp("mul", {{"X", {x[i]}}, {"Y", {w[i]}}}, {{"Out", {mul_out[i]}}}, mul_attr)); @@ -95,30 +90,54 @@ class FCOpMaker : public framework::OpProtoAndCheckerMaker { public: FCOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The inputs of FC operator, a ordered vector of 2-D matrix.") + AddInput("X", + "(A vector of Tensors) each input Tensor can be of arbitrary " + "dimension, and will be reshaped to a 2-D matrix of size " + "(minibatch, number_of_input_features) according to attribute " + "xNumColDims.") .AsDuplicable(); - AddInput("W", "The weights of FC operator, a ordered vector of 2-D matrix.") + AddInput("W", + "(A vector of Tensors) the weights of FC operator, a " + "vector of 2-D matrix of size " + "(number_of_input_features, number_of_neurons).") .AsDuplicable(); - AddInput("B", "The 1-D bias vector of FC operator"); + AddInput("B", + "(Tensor) the bias of FC operator, a 1-D vector of size " + "number_of_neurons."); - AddOutput("Y", "The activated output matrix of FC operator"); + AddOutput("Y", + "(Tensor) the activated output matrix of FC operator, a 2-D " + "matrix of size (minibatch, number_of_neurons)."); AddOutput("MulOut", - "The intermediate outputs of FC operator, " - "saving the product of X[i] * W[i]") + "(A vector of Tensors) the intermediate outputs of FC operator, " + "each Tensor saving the product of X_i * W_i.") .AsIntermediate() .AsDuplicable(); - AddOutput("SumOut", - "The intermediate output of FC operator, " - "saving the sum of products, sum(X[i] * W[i])") + AddOutput( + "SumOut", + "(Tensor) the intermediate output of FC operator, " + "saving the sum of the products of X and W, that is sum{X_i * W_i}.") .AsIntermediate(); AddOutput("AddOut", - "The non-actived output of FC operator, saving X * W + b") + "(Tensor) the non-actived output of FC operator, " + "saving sum{X_i * W_i} + B.") .AsIntermediate(); - AddAttr("activation", "The activation type of FC operator.") + AddAttr( + "activation", + "(string, default identity) the activation type of FC operator.") .SetDefault("identity") .InEnum({"identity", "sigmoid", "softmax"}); - AddAttr>("xNumColDims", ""); - AddAttr>("wNumColDims", ""); + AddAttr>( + "xNumColDims", + "(std::vector) The inputs Tensors of FC operator can be of " + "more than 2 dimensions. In that case, each input Tensor `X_i` will be " + "reshaped to a 2-D matrix. The matrix's first dimension " + "(the length of column) will be the product of `X_i`'s last " + "`xNumColDims_i` dimensions, that is " + "`X_i.dims[0] x ... x X_i.dims[xNumColDims_i - 1]`. " + "The matrix's second dimension (the length of row) will be the product " + "of `X_i`'s first `rank - xNumColDims_i` dimensions, that is " + "`X_i.dims[xNumColDims_i] x ... x X_i.dims[rank - 1]`)"); AddComment(R"DOC( Fully Connected Operator, known as Fully Connected Layer or Inner Product Layer @@ -129,15 +148,14 @@ learned weights with a matrix multiplication followed by a bias offset (optionally). Equation: - Y = Act(sum_n{X_i * W_i} + b) + Y = Act(sum_n{X_i * W_i} + B) -where X_i is a 2D matrix of size (M x K), usually M is the minibatch size and -K is the number of features. W_i is also a 2D matrix of size (K x N), -where N means the number of neurons in the fully connected layer. -b is a 1D vector of size N. Thus, the output Y is a 2D matrix of size (M x N). +where X_i is Tensor that will be reshaped to a 2-D matrix of size (M x K), +usually M is the minibatch size and K is the number of input features. +W_i is a 2-D matrix of size (K x N), where N means the number of neurons +in the fully connected layer. B is a 1-D vector of size N. +Thus, the output Y is a 2-D matrix of size (M x N). Activation type can be set to `identity` (default), `sigmoid` or `softmax`. - - The config api is `paddle.v2.layer.fc`. )DOC"); } }; diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index 39906c8b33..f646fad337 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -22,7 +22,7 @@ class TestFCOp1(OpTest): "AddOut": add_out, "Y": identity_out } - self.attrs = {"xNumColDims": [1], "wNumColDims": [1]} + self.attrs = {"xNumColDims": [1]} def test_check_output(self): self.check_output() @@ -34,13 +34,13 @@ class TestFCOp1(OpTest): class TestFCOp2(OpTest): def setUp(self): x0 = np.random.random((16, 4, 8)).astype("float32") - x1 = np.random.random((16, 32)).astype("float32") + x1 = np.random.random((4, 4, 32)).astype("float32") w0 = np.random.random((32, 10)).astype("float32") - w1 = np.random.random((4, 8, 10)).astype("float32") + w1 = np.random.random((32, 10)).astype("float32") b = np.random.random(10).astype("float32") mul_out0 = np.dot(x0.reshape(16, 4 * 8), w0) - mul_out1 = np.dot(x1, w1.reshape(4 * 8, 10)) + mul_out1 = np.dot(x1.reshape(4 * 4, 32), w1) sum_out = mul_out0 + mul_out1 add_out = np.add(sum_out, b) sigmoid_out = 1 / (1 + np.exp(-add_out)) @@ -51,11 +51,7 @@ class TestFCOp2(OpTest): "W": [("W0", w0), ("W1", w1)], "B": b } - self.attrs = { - "xNumColDims": [1, 1], - "wNumColDims": [1, 2], - "activation": "sigmoid" - } + self.attrs = {"xNumColDims": [1, 2], "activation": "sigmoid"} self.outputs = { "MulOut": [("MulOut0", mul_out0), ("MulOut1", mul_out1)], "SumOut": sum_out, From 0597f4de990501c5d73e2201cf9bd321909b222a Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Thu, 14 Sep 2017 11:42:01 +0800 Subject: [PATCH 082/115] auto generate USE_OP() in pybind.cc --- .gitignore | 1 + cmake/cpplint.cmake | 4 +-- paddle/operators/CMakeLists.txt | 31 ++++++++++++++++++- paddle/operators/concat_op.cu | 19 ------------ ...tropy_op.cc => onehot_cross_entropy_op.cc} | 2 +- ...tropy_op.cu => onehot_cross_entropy_op.cu} | 0 ...entropy_op.h => onehot_cross_entropy_op.h} | 0 paddle/pybind/pybind.cc | 26 +--------------- 8 files changed, 35 insertions(+), 48 deletions(-) delete mode 100644 paddle/operators/concat_op.cu rename paddle/operators/{cross_entropy_op.cc => onehot_cross_entropy_op.cc} (97%) rename paddle/operators/{cross_entropy_op.cu => onehot_cross_entropy_op.cu} (100%) rename paddle/operators/{cross_entropy_op.h => onehot_cross_entropy_op.h} (100%) diff --git a/.gitignore b/.gitignore index 9622ab78e0..4f21fefda9 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,7 @@ cmake-build-* # generated while compiling python/paddle/v2/framework/core.so +paddle/pybind/pybind.h CMakeFiles cmake_install.cmake paddle/.timestamp diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake index 8d5d533126..4823dc3e91 100644 --- a/cmake/cpplint.cmake +++ b/cmake/cpplint.cmake @@ -26,9 +26,9 @@ set(IGNORE_PATTERN .*ImportanceSampler.* .*cblas\\.h.* .*\\.pb\\.txt - .*LtrDataProvider.* .*MultiDataProvider.* - .*pb.*) + .*pb.* + .*pybind.h) # add_style_check_target # diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index f9ea25ab04..dd33323ca9 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -1,5 +1,7 @@ file(GLOB GENERAL_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*_op.cc") string(REPLACE ".cc" "" GENERAL_OPS "${GENERAL_OPS}") +set(pybind_file ${PADDLE_SOURCE_DIR}/paddle/pybind/pybind.h) +file(WRITE ${pybind_file} "// Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT!\n\n") function(op_library TARGET) # op_library is a function to create op library. The interface is same as # cc_library. But it handle split GPU/CPU code and link some common library @@ -11,6 +13,7 @@ function(op_library TARGET) set(options "") set(oneValueArgs "") set(multiValueArgs SRCS DEPS) + set(pybind_flag 0) cmake_parse_arguments(op_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) @@ -46,6 +49,32 @@ function(op_library TARGET) cc_library(${TARGET} SRCS ${cc_srcs} DEPS ${op_library_DEPS} ${op_common_deps}) endif() + + # net_op doesn't need pybind + if ("${TARGET}" STREQUAL "net_op") + set(pybind_file 1) + endif() + + # pybind USE_NO_KERNEL_OP + file(READ ${TARGET}.cc TARGET_CONTENT) + string(REGEX MATCH "OperatorWithKernel" regex_result "${TARGET_CONTENT}") + string(REPLACE "_op" "" TARGET "${TARGET}") + if (${pybind_flag} EQUAL 0 AND regex_result STREQUAL "") + file(APPEND ${pybind_file} "USE_NO_KERNEL_OP(${TARGET});\n") + set(pybind_flag 1) + endif() + + # pybind USE_CPU_ONLY_OP + list(LENGTH cu_srcs cu_srcs_len) + if (${pybind_flag} EQUAL 0 AND ${cu_srcs_len} EQUAL 0) + file(APPEND ${pybind_file} "USE_CPU_ONLY_OP(${TARGET});\n") + set(pybind_flag 1) + endif() + + # pybind USE_OP + if (${pybind_flag} EQUAL 0) + file(APPEND ${pybind_file} "USE_OP(${TARGET});\n") + endif() endfunction() add_subdirectory(math) @@ -60,7 +89,7 @@ op_library(identity_op DEPS scale_op) op_library(minus_op DEPS scale_op) op_library(mul_op DEPS math_function) op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc - DEPS framework_proto tensor operator net_op) + DEPS framework_proto tensor net_op) op_library(scale_op DEPS net_op) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) diff --git a/paddle/operators/concat_op.cu b/paddle/operators/concat_op.cu deleted file mode 100644 index 38fee7473d..0000000000 --- a/paddle/operators/concat_op.cu +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#define EIGEN_USE_GPU -#include "paddle/operators/concat_op.h" - -namespace ops = paddle::operators; -// TODO(Yancey1989) Add GPU kernel diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/onehot_cross_entropy_op.cc similarity index 97% rename from paddle/operators/cross_entropy_op.cc rename to paddle/operators/onehot_cross_entropy_op.cc index ab1e1c101a..d2362f7dd5 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/onehot_cross_entropy_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cross_entropy_op.h" +#include "paddle/operators/onehot_cross_entropy_op.h" namespace paddle { namespace operators { diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/operators/onehot_cross_entropy_op.cu similarity index 100% rename from paddle/operators/cross_entropy_op.cu rename to paddle/operators/onehot_cross_entropy_op.cu diff --git a/paddle/operators/cross_entropy_op.h b/paddle/operators/onehot_cross_entropy_op.h similarity index 100% rename from paddle/operators/cross_entropy_op.h rename to paddle/operators/onehot_cross_entropy_op.h diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 16a2368aae..599a25f04c 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/operators/recurrent_op.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" +#include "paddle/pybind/pybind.h" #include "paddle/pybind/tensor_py.h" #include "paddle/string/to_string.h" #include "pybind11/numpy.h" @@ -31,31 +32,6 @@ limitations under the License. */ namespace py = pybind11; -USE_OP(add); -USE_OP(onehot_cross_entropy); -USE_OP(sgd); -USE_OP(mul); -USE_OP(mean); -USE_OP(sigmoid); -USE_OP(softmax); -USE_OP(rowwise_add); -USE_OP(fill_zeros_like); -USE_NO_KERNEL_OP(recurrent); -USE_OP(gaussian_random); -USE_OP(uniform_random); -USE_OP(lookup_table); -USE_OP(scale); -USE_NO_KERNEL_OP(identity); -USE_OP(minus); -USE_OP(cos_sim); -USE_CPU_ONLY_OP(gather); -USE_CPU_ONLY_OP(scatter); -USE_CPU_ONLY_OP(concat); -USE_OP(top_k); -USE_OP(squared_l2_distance); -USE_OP(sum); -USE_OP(reshape); - namespace paddle { namespace framework { From 680da5c9d0b996aeda2099a523e2250798b3ef0b Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Thu, 14 Sep 2017 12:34:29 +0800 Subject: [PATCH 083/115] Fix unitest of pad_op --- python/paddle/v2/framework/tests/test_pad_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 456b765e33..0319e2aec0 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -22,7 +22,7 @@ class TestPadOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', max_relative_error=0.5) def initTestCase(self): self.shape = (16, 16) From 481a8370ac0915cc97267b2e6c64c6a98dbf622c Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Thu, 14 Sep 2017 13:20:46 +0800 Subject: [PATCH 084/115] update unitest --- python/paddle/v2/framework/tests/test_pad_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 0319e2aec0..9052e63b56 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -22,7 +22,7 @@ class TestPadOp(OpTest): self.check_output() def test_check_grad_normal(self): - self.check_grad(['X'], 'Out', max_relative_error=0.5) + self.check_grad(['X'], 'Out', max_relative_error=0.006) def initTestCase(self): self.shape = (16, 16) From fe2ab2ee7fc9c0e139edb3274e8e4e6d7889a6ab Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 14 Sep 2017 05:49:58 +0000 Subject: [PATCH 085/115] Set the default value of xNumColDims and rename the output to "Out" in FCOp. --- paddle/operators/fc_op.cc | 28 +++++++++++++------ .../paddle/v2/framework/tests/test_fc_op.py | 9 +++--- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index be0fca3c71..14a7fa8467 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -41,10 +41,19 @@ class FCOp : public NetOp { "The size of inputs X(%d) should be no less than 1.", n); auto x_num_col_dims = Attr>("xNumColDims"); - PADDLE_ENFORCE_EQ(x_num_col_dims.size(), n, - "The size of attribute xNumColDims(%d) should be the " - "same as that of inputs X(%d).", - x_num_col_dims.size(), n); + + // Set all values or set no values (use the default value) + if (!x_num_col_dims.empty()) { + PADDLE_ENFORCE_EQ(x_num_col_dims.size(), n, + "The size of attribute xNumColDims(%d) should be the " + "same as that of inputs X(%d).", + x_num_col_dims.size(), n); + } else { + x_num_col_dims.resize(n); + for (size_t i = 0; i < n; i++) { + x_num_col_dims[i] = 1; + } + } // mul_out[i] = X[i] * W[i] for (size_t i = 0; i < n; i++) { @@ -81,7 +90,7 @@ class FCOp : public NetOp { auto activation = Attr("activation"); AppendOp(framework::OpRegistry::CreateOp( - activation, {{"X", {Output(add_out)}}}, {{"Y", {Output("Y")}}}, {})); + activation, {{"X", {Output(add_out)}}}, {{"Y", {Output("Out")}}}, {})); CompleteAddOp(false); } }; @@ -105,7 +114,7 @@ class FCOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) the bias of FC operator, a 1-D vector of size " "number_of_neurons."); - AddOutput("Y", + AddOutput("Out", "(Tensor) the activated output matrix of FC operator, a 2-D " "matrix of size (minibatch, number_of_neurons)."); AddOutput("MulOut", @@ -137,7 +146,8 @@ class FCOpMaker : public framework::OpProtoAndCheckerMaker { "`X_i.dims[0] x ... x X_i.dims[xNumColDims_i - 1]`. " "The matrix's second dimension (the length of row) will be the product " "of `X_i`'s first `rank - xNumColDims_i` dimensions, that is " - "`X_i.dims[xNumColDims_i] x ... x X_i.dims[rank - 1]`)"); + "`X_i.dims[xNumColDims_i] x ... x X_i.dims[rank - 1]`)") + .SetDefault(std::vector{}); AddComment(R"DOC( Fully Connected Operator, known as Fully Connected Layer or Inner Product Layer @@ -148,13 +158,13 @@ learned weights with a matrix multiplication followed by a bias offset (optionally). Equation: - Y = Act(sum_n{X_i * W_i} + B) + Out = Act(sum_n{X_i * W_i} + B) where X_i is Tensor that will be reshaped to a 2-D matrix of size (M x K), usually M is the minibatch size and K is the number of input features. W_i is a 2-D matrix of size (K x N), where N means the number of neurons in the fully connected layer. B is a 1-D vector of size N. -Thus, the output Y is a 2-D matrix of size (M x N). +Thus, the output Out is a 2-D matrix of size (M x N). Activation type can be set to `identity` (default), `sigmoid` or `softmax`. )DOC"); } diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index f646fad337..ed8d869a40 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -20,15 +20,14 @@ class TestFCOp1(OpTest): "MulOut": [("MulOut0", mul_out0)], "SumOut": sum_out, "AddOut": add_out, - "Y": identity_out + "Out": identity_out } - self.attrs = {"xNumColDims": [1]} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(["X0", "W0", "B"], "Y", max_relative_error=0.01) + self.check_grad(["X0", "W0", "B"], "Out", max_relative_error=0.01) class TestFCOp2(OpTest): @@ -56,7 +55,7 @@ class TestFCOp2(OpTest): "MulOut": [("MulOut0", mul_out0), ("MulOut1", mul_out1)], "SumOut": sum_out, "AddOut": add_out, - "Y": sigmoid_out + "Out": sigmoid_out } def test_check_output(self): @@ -64,7 +63,7 @@ class TestFCOp2(OpTest): def test_check_grad(self): self.check_grad( - ["X0", "X1", "W0", "W1", "B"], "Y", max_relative_error=0.01) + ["X0", "X1", "W0", "W1", "B"], "Out", max_relative_error=0.01) if __name__ == '__main__': From 7953ad623c07d336f7869c5699ab3c410c762d87 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 14 Sep 2017 14:05:10 +0800 Subject: [PATCH 086/115] delete unused dpends --- paddle/operators/CMakeLists.txt | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index f9ea25ab04..3f714fac27 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -51,17 +51,10 @@ endfunction() add_subdirectory(math) set(DEPS_OPS - identity_op - minus_op - mul_op - recurrent_op - scale_op) -op_library(identity_op DEPS scale_op) -op_library(minus_op DEPS scale_op) -op_library(mul_op DEPS math_function) + recurrent_op) + op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS framework_proto tensor operator net_op) -op_library(scale_op DEPS net_op) list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) From 6c59f948d0bde4df8056b5bb42e0aa772041a65c Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 14 Sep 2017 14:21:31 +0800 Subject: [PATCH 087/115] update --- paddle/operators/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 3f714fac27..23179ab807 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -7,7 +7,7 @@ function(op_library TARGET) set(OP_LIBRARY ${TARGET} ${OP_LIBRARY} PARENT_SCOPE) set(cc_srcs) set(cu_srcs) - set(op_common_deps operator op_registry) + set(op_common_deps operator op_registry math_function) set(options "") set(oneValueArgs "") set(multiValueArgs SRCS DEPS) From 989e8358b3803c5c15ae4ec0a3fa93fd7b915302 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 14 Sep 2017 06:50:08 +0000 Subject: [PATCH 088/115] Reuse the output of mul when there is only one input in FCOp. --- paddle/operators/fc_op.cc | 23 +++++++++++-------- python/paddle/v2/framework/tests/op_test.py | 14 ++++++----- .../paddle/v2/framework/tests/test_fc_op.py | 16 ++++--------- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 14a7fa8467..5549a836c9 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -66,22 +66,25 @@ class FCOp : public NetOp { } // sum_out = X[0] * W[0] + ... + X[n-1] * W[n-1] + auto sum_out = mul_out[0]; if (n > 1) { - AppendOp(framework::OpRegistry::CreateOp( - "sum", {{"X", {mul_out}}}, {{"Out", {Output("SumOut")}}}, {})); + sum_out = Output("SumOut"); + AppendOp(framework::OpRegistry::CreateOp("sum", {{"X", {mul_out}}}, + {{"Out", {sum_out}}}, {})); } else { - AppendOp(framework::OpRegistry::CreateOp( - "identity", {{"X", {mul_out[0]}}}, {{"Y", {Output("SumOut")}}}, {})); + if (Output("SumOut") != framework::kEmptyVarName) { + this->Rename(Output("SumOut"), framework::kEmptyVarName); + } } // add_out = sum_out + b auto b = Input("B"); - std::string add_out = "SumOut"; + auto add_out = sum_out; if (b != framework::kEmptyVarName) { - add_out = "AddOut"; + add_out = Output("AddOut"); AppendOp(framework::OpRegistry::CreateOp( - "rowwise_add", {{"X", {Output("SumOut")}}, {"b", {Input("B")}}}, - {{"Out", {Output(add_out)}}}, {})); + "rowwise_add", {{"X", {sum_out}}, {"b", {Input("B")}}}, + {{"Out", {add_out}}}, {})); } else { if (Output("AddOut") != framework::kEmptyVarName) { this->Rename(Output("AddOut"), framework::kEmptyVarName); @@ -89,8 +92,8 @@ class FCOp : public NetOp { } auto activation = Attr("activation"); - AppendOp(framework::OpRegistry::CreateOp( - activation, {{"X", {Output(add_out)}}}, {{"Y", {Output("Out")}}}, {})); + AppendOp(framework::OpRegistry::CreateOp(activation, {{"X", {add_out}}}, + {{"Y", {Output("Out")}}}, {})); CompleteAddOp(false); } }; diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index c6e4c59881..41690961b5 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -193,12 +193,14 @@ class OpTest(unittest.TestCase): actual, expect, atol=1e-05), "output name: " + out_name + " has diff") else: - actual = np.array(self.scope.find_var(out_name).get_tensor()) - expect = self.outputs[out_name] - self.assertTrue( - np.allclose( - actual, expect, atol=1e-05), - "output name: " + out_name + " has diff") + var = self.scope.find_var(out_name) + if var is not None: + actual = np.array(var.get_tensor()) + expect = self.outputs[out_name] + self.assertTrue( + np.allclose( + actual, expect, atol=1e-05), + "output name: " + out_name + " has diff") def check_output(self): places = [core.CPUPlace()] diff --git a/python/paddle/v2/framework/tests/test_fc_op.py b/python/paddle/v2/framework/tests/test_fc_op.py index ed8d869a40..9f56fe5049 100644 --- a/python/paddle/v2/framework/tests/test_fc_op.py +++ b/python/paddle/v2/framework/tests/test_fc_op.py @@ -7,27 +7,19 @@ class TestFCOp1(OpTest): def setUp(self): x0 = np.random.random((16, 32)).astype("float32") w0 = np.random.random((32, 10)).astype("float32") - b = np.random.random(10).astype("float32") mul_out0 = np.dot(x0, w0) - sum_out = mul_out0 - add_out = sum_out + b - identity_out = add_out + identity_out = mul_out0 self.op_type = "fc" - self.inputs = {"X": [("X0", x0)], "W": [("W0", w0)], "B": b} - self.outputs = { - "MulOut": [("MulOut0", mul_out0)], - "SumOut": sum_out, - "AddOut": add_out, - "Out": identity_out - } + self.inputs = {"X": [("X0", x0)], "W": [("W0", w0)]} + self.outputs = {"MulOut": [("MulOut0", mul_out0)], "Out": identity_out} def test_check_output(self): self.check_output() def test_check_grad(self): - self.check_grad(["X0", "W0", "B"], "Out", max_relative_error=0.01) + self.check_grad(["X0", "W0"], "Out", max_relative_error=0.01) class TestFCOp2(OpTest): From f657e21f57b7934decc9bdb6268662cbecf8f387 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Thu, 14 Sep 2017 14:46:46 +0800 Subject: [PATCH 089/115] update new_op_cn.md, fix type error --- doc/howto/dev/new_op_cn.md | 30 ++++++++++++------------------ paddle/operators/CMakeLists.txt | 2 +- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index e3892849ab..afefa5baa0 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -241,24 +241,12 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, - 绑定Python - 在 [`paddle/pybind/pybind.cc -`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.cc) 使用`USE_OP`告知编译器需要链接的Op,具体解释参考[代码注释](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/op_registry.h#L81)。 - - ``` - USE_OP(mul); - ``` - 如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP`: - - ``` - USE_CPU_ONLY_OP(gather); - ``` - - 如果OP不带Kernel,则使用`USE_NO_KENREL_OP`: - - ``` - USE_NO_KENREL_OP(recurrent); - ``` - + 编译器 [paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动为Op绑定Python,生成[`paddle/pybind/pybind.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.h),绑定规则如下: + + - net_op 不绑定Python + - 如果OP不带Kernel,则使用`USE_NO_KENREL_OP` + - 如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP` + - `USE_NO_KENREL_OP`优先级高于`USE_CPU_ONLY_OP` - 生成库 @@ -367,3 +355,9 @@ make test ARGS="-R test_mul_op -V" ```bash ctest -R test_mul_op ``` + +## 注意事项 + +- 为每个Op创建单独的`*_op.h`(如有)、`*_op.cc`和`*_op.cu`(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。 +- 注册Op时的类型名,需要和该Op的名字一样。即不允许在`A_op.cc`里面,注册`REGISTER_OP(B, ...)`等,这将会导致单元测试出错。 +- 如果Op没有实现GPU Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。 diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index dd33323ca9..972034515f 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -52,7 +52,7 @@ function(op_library TARGET) # net_op doesn't need pybind if ("${TARGET}" STREQUAL "net_op") - set(pybind_file 1) + set(pybind_flag 1) endif() # pybind USE_NO_KERNEL_OP From 74f460fd0f5e540635e5566aed5167106cda771f Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 14 Sep 2017 16:07:01 +0800 Subject: [PATCH 090/115] Fix specialization of template member functions in the non-template class in GCC 5.0. --- paddle/framework/operator.cc | 7 ++++--- paddle/framework/operator.h | 20 +++++++++++++++++--- paddle/operators/sequence_avg_pool_op.cc | 2 +- paddle/operators/sequence_avg_pool_op.h | 4 ++-- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 27e7784940..c57537be4b 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -209,8 +209,7 @@ const std::vector InferShapeContext::MultiInput( template <> Tensor* ExecutionContext::Output(const std::string& name) const { auto* var = OutputVar(name); - if (var == nullptr) return nullptr; - return GetTensorFromVar(var); + return var == nullptr ? nullptr : const_cast(GetTensorFromVar(var)); } template <> @@ -222,7 +221,9 @@ std::vector ExecutionContext::MultiOutput( std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { auto var = scope().FindVar(sub_name); - return var == nullptr ? nullptr : GetTensorFromVar(var); + return var == nullptr + ? nullptr + : const_cast(GetTensorFromVar(var)); }); return res; } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index bbf9930f0a..adae7bfc3d 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -327,13 +327,13 @@ class InferShapeContext { return res; } - Tensor* GetTensorFromVar(const Variable* var) const { + const Tensor* GetTensorFromVar(const Variable* var) const { if (var->IsType()) { - return const_cast(&var->Get()); + return &var->Get(); } PADDLE_ENFORCE(var->IsType(), "The Input(%s) must be LoDTensor or Tensor."); - return const_cast(&var->Get()); + return &var->Get(); } private: @@ -341,6 +341,13 @@ class InferShapeContext { const Scope& scope_; }; +template <> +const Tensor* InferShapeContext::Input(const std::string& name) const; + +template <> +const std::vector InferShapeContext::MultiInput( + const std::string& name) const; + template struct EigenDeviceConverter; @@ -397,6 +404,13 @@ class ExecutionContext : public InferShapeContext { const platform::DeviceContext* device_context_; }; +template <> +Tensor* ExecutionContext::Output(const std::string& name) const; + +template <> +std::vector ExecutionContext::MultiOutput( + const std::string& name) const; + class OpKernel { public: /** diff --git a/paddle/operators/sequence_avg_pool_op.cc b/paddle/operators/sequence_avg_pool_op.cc index 59a361761a..c15a5833de 100644 --- a/paddle/operators/sequence_avg_pool_op.cc +++ b/paddle/operators/sequence_avg_pool_op.cc @@ -66,7 +66,7 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); PADDLE_ENFORCE_EQ(og_dims.size(), x_dims.size(), "The rank of output grad must equal to Input(X)."); - for (size_t i = 1; i < og_dims.size(); ++i) { + for (int64_t i = 1; i < og_dims.size(); ++i) { PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch."); } auto* x_grad = diff --git a/paddle/operators/sequence_avg_pool_op.h b/paddle/operators/sequence_avg_pool_op.h index ba68b5e4b9..6e343b87e2 100644 --- a/paddle/operators/sequence_avg_pool_op.h +++ b/paddle/operators/sequence_avg_pool_op.h @@ -38,7 +38,7 @@ class SequenceAvgPoolKernel : public framework::OpKernel { out->mutable_data(context.GetPlace()); auto place = context.GetEigenDevice(); - for (int i = 0; i < lod[0].size() - 1; ++i) { + for (int i = 0; i < static_cast(lod[0].size()) - 1; ++i) { Tensor in_t = in->Slice(static_cast(lod[0][i]), static_cast(lod[0][i + 1])); Tensor out_t = out->Slice(i, i + 1); @@ -64,7 +64,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel { in_g->mutable_data(context.GetPlace()); auto place = context.GetEigenDevice(); - for (int i = 0; i < lod[0].size() - 1; ++i) { + for (int i = 0; i < static_cast(lod[0].size()) - 1; ++i) { auto in_g_t = in_g->Slice(static_cast(lod[0][i]), static_cast(lod[0][i + 1])); auto out_g_t = out_g->Slice(i, i + 1); From 59d661b9a93f214fd0dc10e3d032a7d9e6442e00 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 14 Sep 2017 17:11:48 +0800 Subject: [PATCH 091/115] Fix enforce test failed Note: If no symbol with a suitable value is found, both this field and dli_saddr shall be set to NULL. --- paddle/platform/enforce.h | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 64fcbd93b6..df5f71ed76 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -25,6 +25,10 @@ limitations under the License. */ #include "paddle/string/printf.h" #include "paddle/string/to_string.h" +#ifdef __GNUC__ +#include // for __cxa_demangle +#endif + #ifndef PADDLE_ONLY_CPU #include "paddle/platform/dynload/cublas.h" @@ -42,6 +46,19 @@ limitations under the License. */ namespace paddle { namespace platform { +namespace { +#ifdef __GNUC__ +inline std::string demangle(std::string name) { + int status = -4; // some arbitrary value to eliminate the compiler warning + std::unique_ptr res{ + abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free}; + return (status == 0) ? res.get() : name; +} +#else +inline std::string demangle(std::string name) { return name; } +#endif +} + struct EnforceNotMet : public std::exception { std::exception_ptr exp_; std::string err_str_; @@ -61,8 +78,8 @@ struct EnforceNotMet : public std::exception { Dl_info info; for (int i = 0; i < size; ++i) { - if (dladdr(call_stack[i], &info)) { - auto demangled = info.dli_sname; + if (dladdr(call_stack[i], &info) && info.dli_sname) { + auto demangled = demangle(info.dli_sname); auto addr_offset = static_cast(call_stack[i]) - static_cast(info.dli_saddr); sout << string::Sprintf("%-3d %*0p %s + %zd\n", i, From 65bc5fcd0693c8bb0846b4b818a3bf449e5de76b Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Thu, 14 Sep 2017 17:17:58 +0800 Subject: [PATCH 092/115] update new_op_cn.md --- doc/howto/dev/new_op_cn.md | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index afefa5baa0..c6570b89ae 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -34,7 +34,7 @@ Kernel实现 | CPU、GPU共享Kernel实现在`.h`文件中,否则,CPU 注册Op | Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,GPU实现在`.cu`文件中 -实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。 +实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。**系统会根据文件名自动构建op和其对应的Python扩展。** 下面以矩阵乘操作,即[MulOp](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/mul_op.cc)为例来介绍如何写带Kernel的Operator。 @@ -224,33 +224,15 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, ### 5. 编译 -- 简单**无特殊依赖**的OP无需修改CMakeList.txt文件。[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动将 `paddle/operators` 目录下新增的 `*_op.cc` 文件加入编译。 -- 较为复杂、**有额外依赖** 的operator仍需要修改[paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt)。如,`mul_op` 依赖 `math_function`,需要在`CMakeLists.txt`中添加如下内容: +运行下面命令可以进行编译: - ``` - op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS math_function) + - ``` - -- 运行下面命令可以进行编译: - - ``` - make mul_op - ``` +``` +make mul_op +``` ## 绑定Python -- 绑定Python - - 编译器 [paddle/operators/CMakeLists.txt](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/operators/CMakeLists.txt) 会自动为Op绑定Python,生成[`paddle/pybind/pybind.h](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/pybind.h),绑定规则如下: - - - net_op 不绑定Python - - 如果OP不带Kernel,则使用`USE_NO_KENREL_OP` - - 如果只实现了CPU版本,则使用`USE_CPU_ONLY_OP` - - `USE_NO_KENREL_OP`优先级高于`USE_CPU_ONLY_OP` - - - 生成库 - - `paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。 +系统会对新增的op自动绑定Python,并链接到生成的lib库中。 ## 实现单元测试 @@ -361,3 +343,4 @@ ctest -R test_mul_op - 为每个Op创建单独的`*_op.h`(如有)、`*_op.cc`和`*_op.cu`(如有)。不允许一个文件中包含多个Op,这将会导致编译出错。 - 注册Op时的类型名,需要和该Op的名字一样。即不允许在`A_op.cc`里面,注册`REGISTER_OP(B, ...)`等,这将会导致单元测试出错。 - 如果Op没有实现GPU Kernel,请不要创建空的`*_op.cu`,这将会导致单元测试出错。 +- 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。 From d757c1cd02736def384618c1281919c37f1c5628 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 14 Sep 2017 17:24:46 +0800 Subject: [PATCH 093/115] Fix accuracy_op. --- paddle/operators/accuracy_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 9ca04d4028..4a6c6381b0 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -34,7 +34,7 @@ class AccuracyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0], "inference size must be the same as label size"); - ctx.Output("Accuracy")->Resize({1}); + ctx.Output("Accuracy")->Resize({1}); } }; From 39d79e64196049b6879612305bed604faac8a2dd Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 14 Sep 2017 14:20:33 -0700 Subject: [PATCH 094/115] modified codes --- paddle/framework/tensor.h | 11 +------ paddle/framework/tensor_test.cc | 8 ++--- paddle/operators/CMakeLists.txt | 3 +- paddle/operators/cond_op.cc | 55 ++++++++++++++++++++------------- paddle/operators/cond_op.h | 17 ++++++++-- 5 files changed, 53 insertions(+), 41 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 20f019892b..4b5a2ae523 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -78,9 +78,6 @@ class Tensor { template inline T* mutable_data(DDim dims, platform::Place place); - /*! Size of a single element in data() */ - inline size_t element_size() { return holder_->element_size(); } - /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; @@ -132,7 +129,6 @@ class Tensor { virtual ~Placeholder() {} virtual void* ptr() const = 0; virtual size_t size() const = 0; - virtual size_t element_size() const = 0; virtual std::type_index type() const = 0; virtual platform::Place place() const = 0; }; @@ -143,8 +139,7 @@ class Tensor { : ptr_(static_cast(memory::Alloc(place, size)), memory::PODDeleter(place)), place_(place), - size_(size), - element_size_(sizeof(T)) { + size_(size) { PADDLE_ENFORCE_NOT_NULL(ptr_, "Insufficient %s memory to allocation.", (is_cpu_place(place_) ? "CPU" : "GPU")); } @@ -153,7 +148,6 @@ class Tensor { virtual platform::Place place() const { return place_; } virtual void* ptr() const { return static_cast(ptr_.get()); } virtual std::type_index type() const { return std::type_index(typeid(T)); } - virtual size_t element_size() const { return element_size_; } /*! the pointer of memory block. */ std::unique_ptr> ptr_; @@ -163,9 +157,6 @@ class Tensor { /*! the size of memory block. */ size_t size_; - - /*! the size of a single element */ - size_t element_size_; }; /*! holds the memory block if allocated. */ diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index 8491536e6f..e2ec738de3 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -36,7 +36,7 @@ TEST(Tensor, DataAssert) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "holder_ should not be null\nTenosr holds no memory. Call " + "holder_ should not be null\nTensor holds no memory. Call " "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { @@ -59,8 +59,6 @@ TEST(Tensor, MutableData) { // initialization p1 = src_tensor.mutable_data(make_ddim({1, 2, 3}), CPUPlace()); EXPECT_NE(p1, nullptr); - // check tensor type - EXPECT_EQ(src_tensor.element_size(), sizeof(float)); // set src_tensor a new dim with large size // momery is supposed to be re-allocated p2 = src_tensor.mutable_data(make_ddim({3, 4}), CPUPlace()); @@ -114,7 +112,7 @@ TEST(Tensor, ShareDataWith) { } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = - "holder_ should not be null\nTenosr holds no memory. Call " + "holder_ should not be null\nTensor holds no memory. Call " "Tensor::mutable_data first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ++i) { @@ -276,4 +274,4 @@ TEST(Tensor, ReshapeToMatrix) { Tensor res = ReshapeToMatrix(src, 2); ASSERT_EQ(res.dims()[0], 2 * 3); ASSERT_EQ(res.dims()[1], 4 * 9); -} \ No newline at end of file +} diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 4e83eea4ac..e3e934bccc 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -80,8 +80,7 @@ endfunction() add_subdirectory(math) set(DEPS_OPS - recurrent_op) -set(DEPS_OPS + recurrent_op cond_op) op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS framework_proto tensor net_op) diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index a3e4a2506f..b2e1ca395d 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -28,6 +28,7 @@ namespace operators { using Scope = framework::Scope; using Variable = framework::Variable; using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; using DDim = framework::DDim; void CondOp::CreateScope(const Scope& scope) const { @@ -41,8 +42,9 @@ void CondOp::CreateScope(const Scope& scope) const { void CondOp::CreateIndexTensor(const Scope& scope) const { auto index_tensors_var = scope.FindVar("IndexTensors"); PADDLE_ENFORCE(index_tensors_var != nullptr, ""); - auto& index_tensors = *index_tensors_var->GetMutable>(); - index_tensors.push_back(Tensor()); + auto& index_tensors = + *index_tensors_var->GetMutable>(); + index_tensors.push_back(LoDTensor()); } void CondOp::InferShape(const Scope& scope) const { @@ -65,8 +67,8 @@ void CondOp::InferShape(const Scope& scope) const { for (auto& input : Inputs("Xs")) { // Create a new tensor in sub-scope for input-type tensor Variable* v = sub_scopes[i]->NewVar(input); - Tensor* sub_input = v->GetMutable(); - sub_input->Resize(scope.FindVar(input)->GetMutable()->dims()); + LoDTensor* sub_input = v->GetMutable(); + sub_input->Resize(scope.FindVar(input)->GetMutable()->dims()); } for (auto& output : (*sub_net_op_[i]).Outputs()) { @@ -80,33 +82,40 @@ void CondOp::InferShape(const Scope& scope) const { } for (auto& output : Outputs("Outs")) { - Tensor* tensor_t_out = sub_scopes[0]->FindVar(output)->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should be NULL"); - Tensor* tensor_f_out = sub_scopes[1]->FindVar(output)->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "True output should be NULL"); + LoDTensor* tensor_t_out = + sub_scopes[0]->FindVar(output)->GetMutable(); + PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should not be NULL"); + LoDTensor* tensor_f_out = + sub_scopes[1]->FindVar(output)->GetMutable(); + PADDLE_ENFORCE_NOT_NULL(tensor_f_out, "False output should not be NULL"); auto* tensor_out_var = scope.FindVar(output); PADDLE_ENFORCE_NOT_NULL(tensor_out_var, "Output not found"); - Tensor* tensor_out = tensor_out_var->GetMutable(); - PADDLE_ENFORCE_NOT_NULL(tensor_t_out, "True output should be NULL"); + LoDTensor* tensor_out = tensor_out_var->GetMutable(); + PADDLE_ENFORCE_NOT_NULL(tensor_t_out, + "True output tensor should not be NULL"); + // check output size should be same PADDLE_ENFORCE_EQ(tensor_t_out->dims(), tensor_f_out->dims(), "Outputs not of the same shape"); tensor_out->Resize(tensor_t_out->dims()); - tensor_out->mutable_data(tensor_out->dims(), platform::CPUPlace()); + // tensor_out->mutable_data(tensor_out->dims(), + // platform::CPUPlace()); + tensor_out->mutable_data(platform::CPUPlace()); } } void CondOp::Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const { - auto sub_scopes = scope.FindVar("SubScopes")->Get>(); - auto index_tensors = - scope.FindVar("IndexTensors")->Get>(); + auto* sub_scopes_var = scope.FindVar("SubScopes"); + auto sub_scopes = sub_scopes_var->Get>(); + auto* index_tensors_var = scope.FindVar("IndexTensors"); + auto index_tensors = index_tensors_var->Get>(); std::string cond_name = Input("Cond"); Variable* cond_var = scope.FindVar(cond_name); PADDLE_ENFORCE_NOT_NULL(cond_var); - const Tensor* cond = cond_var->GetMutable(); + const LoDTensor* cond = cond_var->GetMutable(); // Step 1: get the true/false index at runtime // index_[0]: vector, contains all index for cond[i] == true @@ -139,11 +148,11 @@ void CondOp::Run(const Scope& scope, // find Tensor Variable* v = scope.FindVar(input); PADDLE_ENFORCE_NOT_NULL(v); - Tensor* tensor_parent = v->GetMutable(); + LoDTensor* tensor_parent = v->GetMutable(); v = sub_scopes[i]->FindVar(input); PADDLE_ENFORCE_NOT_NULL(v); - Tensor* tensor_child = v->GetMutable(); + LoDTensor* tensor_child = v->GetMutable(); // Resize child DDim dim = tensor_child->dims(); @@ -157,7 +166,9 @@ void CondOp::Run(const Scope& scope, } // Step 3: run - for (int i = 0; i < 2; ++i) sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); + for (int i = 0; i < 2; ++i) { + sub_net_op_[i]->Run(*sub_scopes[i], dev_ctx); + } // Step 4: merge output results for (int i = 0; i < 2; ++i) { @@ -166,11 +177,11 @@ void CondOp::Run(const Scope& scope, // find Tensor Variable* v = scope.FindVar(output); PADDLE_ENFORCE_NOT_NULL(v); - Tensor* tensor_parent = v->GetMutable(); + LoDTensor* tensor_parent = v->GetMutable(); v = sub_scopes[i]->FindVar(output); PADDLE_ENFORCE_NOT_NULL(v); - Tensor* tensor_child = v->GetMutable(); + LoDTensor* tensor_child = v->GetMutable(); ScatterUpdate(dev_ctx.GetPlace(), tensor_child, &index_tensors[i], tensor_parent); @@ -192,7 +203,9 @@ class CondOpProtoAndCheckerMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Sample dependent Cond Operator: -The equation is: Out[i] = subnet_t[i], if Cond[i] == true +Given Cond[i] as a 1/0 vector to indicate true/false +The equation is: +Out[i] = subnet_t[i], if Cond[i] == true Out[i] = subnet_t[i], if Cond[i] == false )DOC"); } diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h index 27a6e9e3c3..001096d31a 100644 --- a/paddle/operators/cond_op.h +++ b/paddle/operators/cond_op.h @@ -24,6 +24,17 @@ limitations under the License. */ namespace paddle { namespace operators { +/* + * @brief CondOp is a dynamic if-else Operator + * + * It has a input tensor named cond indicating which netop each instance will + * run. + * + * if cond == 1, it will run true_net, which is a NetOp. + * + * if cond == 0, it will run false_net, which is another NetOp. + */ + class CondOp : public framework::OperatorBase { public: CondOp(const std::string& type, const framework::VariableNameMap& inputs, @@ -45,18 +56,18 @@ class CondOp : public framework::OperatorBase { void CreateIndexTensor(const framework::Scope& scope) const; - /** + /* * InferShape must be called before Run. */ void InferShape(const framework::Scope& scope) const override; // Set True Block - void set_truenet(std::unique_ptr net) { + void set_truenet(std::unique_ptr&& net) { sub_net_op_[0] = std::move(net); } // Set False Block - void set_falsenet(std::unique_ptr net) { + void set_falsenet(std::unique_ptr&& net) { sub_net_op_[1] = std::move(net); } From c557402855dca954aa65b20827d27f258d9106b2 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 14 Sep 2017 14:51:45 -0700 Subject: [PATCH 095/115] cond_op modify --- paddle/operators/cond_op.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h index 001096d31a..7eeec84996 100644 --- a/paddle/operators/cond_op.h +++ b/paddle/operators/cond_op.h @@ -61,12 +61,16 @@ class CondOp : public framework::OperatorBase { */ void InferShape(const framework::Scope& scope) const override; - // Set True Block + /* + * Set True Block + */ void set_truenet(std::unique_ptr&& net) { sub_net_op_[0] = std::move(net); } - // Set False Block + /* + * Set False Block + */ void set_falsenet(std::unique_ptr&& net) { sub_net_op_[1] = std::move(net); } From 98c35729cee39adf86df565ebfd2aa7bf167d960 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 14 Sep 2017 15:33:24 -0700 Subject: [PATCH 096/115] remove empty line --- paddle/operators/cond_op.h | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/operators/cond_op.h b/paddle/operators/cond_op.h index 7eeec84996..b09e32331e 100644 --- a/paddle/operators/cond_op.h +++ b/paddle/operators/cond_op.h @@ -34,7 +34,6 @@ namespace operators { * * if cond == 0, it will run false_net, which is another NetOp. */ - class CondOp : public framework::OperatorBase { public: CondOp(const std::string& type, const framework::VariableNameMap& inputs, From 0620b00eb6a9a3ed9311c6b3e1acc5b6a0cc289e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 14 Sep 2017 15:46:25 -0700 Subject: [PATCH 097/115] Fix Clang compile error --- paddle/operators/cos_sim_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 318b63f370..bcf6f758ca 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -56,7 +56,7 @@ class CosSimKernel : public framework::OpKernel { x_norm.device(place) = x.square().sum(row_along).sqrt(); y_norm.device(place) = y.square().sum(row_along).sqrt(); if (rows_x == rows_y) { - auto xy = (x * y).sum(Eigen::array({1})); + auto xy = (x * y).sum(Eigen::array({{1}})); z.device(place) = xy / x_norm / y_norm; } else { Eigen::DSizes bcast(rows_x, 1); @@ -134,7 +134,7 @@ class CosSimGradKernel : public framework::OpKernel { out_grad_y->mutable_data(context.GetPlace()); auto dy = EigenMatrix::Reshape(*out_grad_y, 1); auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast; - dy.device(place) = (dz_bcast * grad).sum(Eigen::array({0})); + dy.device(place) = (dz_bcast * grad).sum(Eigen::array({{0}})); } } } From 773cfe62903e36726c0138c5e139b48b535f54ed Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 15 Sep 2017 12:05:13 +0800 Subject: [PATCH 098/115] Delete unused `USE_OP` in combination op, and unused include in elementwise_mul_op.h --- paddle/operators/elementwise_mul_op.h | 2 -- paddle/operators/minus_op.cc | 2 -- 2 files changed, 4 deletions(-) diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h index e9ed679179..6d58da580b 100644 --- a/paddle/operators/elementwise_mul_op.h +++ b/paddle/operators/elementwise_mul_op.h @@ -13,10 +13,8 @@ limitations under the License. */ #pragma once -#include #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 8a583f24ed..1eac8f133b 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -77,8 +77,6 @@ class MinusGradOp : public NetOp { } // namespace operators } // namespace paddle -USE_OP(scale); -USE_NO_KERNEL_OP(identity); namespace ops = paddle::operators; REGISTER_OP(minus, ops::MinusOp, ops::MinusOpMaker, minus_grad, ops::MinusGradOp); From b0d9b68a5ffe08615414d412f464bc64c9f18497 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 14 Sep 2017 20:34:21 +0800 Subject: [PATCH 099/115] unify functions of mkldnn_fc and refine comments --- paddle/gserver/layers/MKLDNNConvLayer.cpp | 9 +- paddle/gserver/layers/MKLDNNFcLayer.cpp | 277 ++++++++++++++-------- paddle/gserver/layers/MKLDNNFcLayer.h | 59 +++++ paddle/math/MKLDNNMatrix.h | 11 +- 4 files changed, 251 insertions(+), 105 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/gserver/layers/MKLDNNConvLayer.cpp index f8c06c5f86..9088744bee 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/gserver/layers/MKLDNNConvLayer.cpp @@ -285,10 +285,9 @@ void MKLDNNConvLayer::resetWgtBiasValue( wgt = MKLDNNMatrix::create(weight_->getW(), pd->weights_primitive_desc()); VLOG(MKLDNN_FMTS) << "Weight value format: " << wgt->getFormat(); - bias = nullptr; - if (biases_ && biases_->getW()) { - bias = MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc()); - } + bias = (biases_ && biases_->getW()) + ? MKLDNNMatrix::create(biases_->getW(), pd->bias_primitive_desc()) + : nullptr; } void MKLDNNConvLayer::resetOutValue( @@ -356,6 +355,7 @@ void MKLDNNConvLayer::resetBwdWgtPD( void MKLDNNConvLayer::resetBwdDataPD( std::shared_ptr& pd) { + pd = nullptr; if (inputLayers_[0]->getOutput().grad == nullptr) { return; } @@ -476,6 +476,7 @@ void MKLDNNConvLayer::resetWgtBiasGrad( << "primitive desc of weight grad and value should be equal"; VLOG(MKLDNN_FMTS) << "weight grad format: " << wgt->getFormat(); + bias = nullptr; if (biasVal_ == nullptr) { return; } diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index f70343251a..f60e221a6e 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -17,9 +17,6 @@ limitations under the License. */ using namespace mkldnn; // NOLINT typedef memory::format format; -typedef inner_product_forward fc_fwd; -typedef inner_product_backward_weights fc_bwdWgt; -typedef inner_product_backward_data fc_bwdData; namespace paddle { @@ -93,35 +90,88 @@ void MKLDNNFcLayer::reshape( printSizeInfo(); } -void MKLDNNFcLayer::resetFwd(std::vector& pipeline, +void MKLDNNFcLayer::resetFwd(std::vector& pipeline, MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out) { - pipeline.clear(); - bool hasBias = biases_ && biases_->getW(); - const MatrixPtr& wgtVal = weight_->getW(); - const MatrixPtr& biasVal = hasBias ? biases_->getW() : nullptr; - const MatrixPtr& outVal = output_.value; + resetFwdBuffers(in, wgt, bias, out); + + resetFwdPD(fwdPD_, in, wgt, bias, out); + + resetFwdPipeline(pipeline, fwdPD_, in, wgt, bias, out); + + printValueFormatFlow(); +} + +void MKLDNNFcLayer::resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + std::shared_ptr bwdWgtPD; + std::shared_ptr bwdDataPD; + + resetBwdBuffers(in, wgt, bias, out); + + resetBwdWgtPD(bwdWgtPD, wgt, bias, out); + + resetBwdDataPD(bwdDataPD, in, out); + + resetBwdPipeline(pipeline, bwdWgtPD, bwdDataPD, in, wgt, bias, out); + + printGradFormatFlow(); +} + +void MKLDNNFcLayer::updateInputData() { + inVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); +} +void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) { + weight_->getParameterPtr()->incUpdate(callback); + if (biases_ && biases_->getWGrad()) { + biases_->getParameterPtr()->incUpdate(callback); + } +} + +void MKLDNNFcLayer::resetFwdBuffers(MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + resetInValue(in); + + resetWgtBiasValue(wgt, bias); + + resetOutValue(out); +} + +void MKLDNNFcLayer::resetInValue(MKLDNNMatrixPtr& in) { if (inputIsOnlyMKLDNN()) { - const MatrixPtr& inVal = getInputValue(0); - in = std::dynamic_pointer_cast(inVal); + const MatrixPtr& dnnIn = getInputValue(0); + in = std::dynamic_pointer_cast(dnnIn); CHECK(in) << "Input should be MKLDNNMatrix"; } else { CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet"; - const MatrixPtr& inVal = getInputValue(0, CPU_DEVICE); + const MatrixPtr& cpuIn = getInputValue(0, CPU_DEVICE); in = MKLDNNMatrix::create( - inVal, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_); + cpuIn, {bs_, ic_, ih_, iw_}, format::nchw, engine_); } in->downSpatial(); +} + +void MKLDNNFcLayer::resetWgtBiasValue(MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias) { wgt = MKLDNNMatrix::create( - wgtVal, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_); + weight_->getW(), {oc_, ic_, ih_, iw_}, format::oihw, engine_); wgt->downSpatial(); - bias = hasBias ? MKLDNNMatrix::create(biasVal, {oc_}, format::x, engine_) - : nullptr; - out = MKLDNNMatrix::create(outVal, {bs_, oc_}, format::nc, engine_); + bias = (biases_ && biases_->getW()) + ? MKLDNNMatrix::create(biases_->getW(), {oc_}, format::x, engine_) + : nullptr; +} + +void MKLDNNFcLayer::resetOutValue(MKLDNNMatrixPtr& out) { + out = MKLDNNMatrix::create(output_.value, {bs_, oc_}, format::nc, engine_); // change original output value to mkldnn output value output_.value = std::dynamic_pointer_cast(out); if (!outputIsOnlyMKLDNN()) { @@ -129,46 +179,59 @@ void MKLDNNFcLayer::resetFwd(std::vector& pipeline, // just share point getOutput(CPU_DEVICE).value->setData(output_.value->getData()); } +} - // create forward handle +void MKLDNNFcLayer::resetFwdPD(std::shared_ptr& pd, + MKLDNNMatrixPtr in, + MKLDNNMatrixPtr wgt, + MKLDNNMatrixPtr bias, + MKLDNNMatrixPtr out) { + CHECK(in); + CHECK(wgt); + CHECK(out); prop_kind pk = prop_kind::forward; - fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk, - in->getMemoryDesc(), - wgt->getMemoryDesc(), - bias->getMemoryDesc(), - out->getMemoryDesc()) - : fc_fwd::desc(pk, - in->getMemoryDesc(), - wgt->getMemoryDesc(), - out->getMemoryDesc()); - fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); - if (hasBias) { - fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *bias, *out)); + fc_fwd::desc fwdDesc = bias != nullptr ? fc_fwd::desc(pk, + in->getMemoryDesc(), + wgt->getMemoryDesc(), + bias->getMemoryDesc(), + out->getMemoryDesc()) + : fc_fwd::desc(pk, + in->getMemoryDesc(), + wgt->getMemoryDesc(), + out->getMemoryDesc()); + pd.reset(new fc_fwd::primitive_desc(fwdDesc, engine_)); +} + +void MKLDNNFcLayer::resetFwdPipeline( + std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); + + if (bias) { + fwd_.reset(new fc_fwd(*pd, *in, *wgt, *bias, *out)); } else { - fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *out)); + fwd_.reset(new fc_fwd(*pd, *in, *wgt, *out)); } - printValueFormatFlow(); pipeline.push_back(*fwd_); } -void MKLDNNFcLayer::resetBwd(std::vector& pipeline, - MKLDNNMatrixPtr& in, - MKLDNNMatrixPtr& wgt, - MKLDNNMatrixPtr& bias, - MKLDNNMatrixPtr& out) { - pipeline.clear(); - if (!needResetBwd_) { - return; - } - needResetBwd_ = false; - bool hasBias = biases_ && biases_->getWGrad(); +void MKLDNNFcLayer::resetBwdBuffers(MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + resetOutGrad(out); + + resetWgtBiasGrad(wgt, bias); - /// backward weight - CHECK(inVal_) << "Should have input value"; - const MatrixPtr& wgtGrad = weight_->getWGrad(); - const MatrixPtr& biasGrad = hasBias ? biases_->getWGrad() : nullptr; + resetInGrad(in); +} +void MKLDNNFcLayer::resetOutGrad(MKLDNNMatrixPtr& out) { // TODO(TJ): merge outgrad int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; // for MKLDNN device: @@ -178,66 +241,88 @@ void MKLDNNFcLayer::resetBwd(std::vector& pipeline, // for CPU device: // fc do not need to convert from cpu device since output is always nc format // only need create from cpu device - const MatrixPtr& outGrad = getOutput(device).grad; - out = MKLDNNMatrix::create(outGrad, outVal_->getPrimitiveDesc()); - wgt = MKLDNNMatrix::create(wgtGrad, wgtVal_->getPrimitiveDesc()); - bias = hasBias ? MKLDNNMatrix::create(biasGrad, biasVal_->getPrimitiveDesc()) - : nullptr; - - // create memory primitive desc - fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward, - inVal_->getMemoryDesc(), - wgt->getMemoryDesc(), - out->getMemoryDesc()); - fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); - fc_bwdWgt::desc bwdWgtDesc = hasBias - ? fc_bwdWgt::desc(inVal_->getMemoryDesc(), - wgt->getMemoryDesc(), - bias->getMemoryDesc(), - out->getMemoryDesc()) - : fc_bwdWgt::desc(inVal_->getMemoryDesc(), - wgt->getMemoryDesc(), - out->getMemoryDesc()); - fc_bwdWgt::primitive_desc bwdWgtPD = - fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD); - - if (hasBias) { - bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias)); - } else { - bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt)); + CHECK(outVal_); + out = + MKLDNNMatrix::create(getOutput(device).grad, outVal_->getPrimitiveDesc()); +} + +void MKLDNNFcLayer::resetWgtBiasGrad(MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias) { + CHECK(wgtVal_); + wgt = MKLDNNMatrix::create(weight_->getWGrad(), wgtVal_->getPrimitiveDesc()); + + bias = nullptr; + if (biasVal_ == nullptr) { + return; } - pipeline.push_back(*bwdWgt_); + bias = + MKLDNNMatrix::create(biases_->getWGrad(), biasVal_->getPrimitiveDesc()); +} - /// backward data +void MKLDNNFcLayer::resetInGrad(MKLDNNMatrixPtr& in) { + in = nullptr; const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad; if (inGrad == nullptr) { return; } - if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) { - // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done - } else { - in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc()); - } - - fc_bwdData::desc bwdDataDesc = fc_bwdData::desc( - inVal_->getMemoryDesc(), wgt->getMemoryDesc(), out->getMemoryDesc()); - fc_bwdData::primitive_desc bwdDataPD = - fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD); + // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done + CHECK(inVal_); + in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc()); +} - CHECK(wgtVal_) << "Should have weight memory"; - bwdData_.reset(new fc_bwdData(bwdDataPD, *out, *wgtVal_, *in)); - printGradFormatFlow(); - pipeline.push_back(*bwdData_); +void MKLDNNFcLayer::resetBwdWgtPD( + std::shared_ptr& pd, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + CHECK(inVal_); + fc_bwdWgt::desc bwdWgtDesc = bias ? fc_bwdWgt::desc(inVal_->getMemoryDesc(), + wgt->getMemoryDesc(), + bias->getMemoryDesc(), + out->getMemoryDesc()) + : fc_bwdWgt::desc(inVal_->getMemoryDesc(), + wgt->getMemoryDesc(), + out->getMemoryDesc()); + pd.reset(new fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, *fwdPD_)); } -void MKLDNNFcLayer::updateInputData() { - inVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); +void MKLDNNFcLayer::resetBwdDataPD( + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out) { + pd = nullptr; + if (in == nullptr) { + return; + } + CHECK(wgtVal_); + fc_bwdData::desc bwdDataDesc = fc_bwdData::desc( + in->getMemoryDesc(), wgtVal_->getMemoryDesc(), out->getMemoryDesc()); + pd.reset(new fc_bwdData::primitive_desc(bwdDataDesc, engine_, *fwdPD_)); } -void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) { - weight_->getParameterPtr()->incUpdate(callback); - if (biases_ && biases_->getWGrad()) { - biases_->getParameterPtr()->incUpdate(callback); +void MKLDNNFcLayer::resetBwdPipeline( + std::vector& pipeline, + std::shared_ptr& bwdWgtPD, + std::shared_ptr& bwdDataPD, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); + CHECK(inVal_); + if (bias) { + bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt, *bias)); + } else { + bwdWgt_.reset(new fc_bwdWgt(*bwdWgtPD, *inVal_, *out, *wgt)); + } + pipeline.push_back(*bwdWgt_); + + if (bwdDataPD == nullptr) { + return; } + CHECK(wgtVal_) << "Should have weight memory"; + bwdData_.reset(new fc_bwdData(*bwdDataPD, *out, *wgtVal_, *in)); + pipeline.push_back(*bwdData_); } + } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index 3119f86349..c76878aafa 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -18,6 +18,9 @@ limitations under the License. */ #include "mkldnn.hpp" namespace paddle { +typedef mkldnn::inner_product_forward fc_fwd; +typedef mkldnn::inner_product_backward_weights fc_bwdWgt; +typedef mkldnn::inner_product_backward_data fc_bwdData; /** * @brief A subclass of MKLDNNLayer fc layer. @@ -32,6 +35,9 @@ protected: // if has already init the weight bool hasInitedWgt_; + // save forward primitive_desc, which can be used backward + std::shared_ptr fwdPD_; + // fc weight and bias std::unique_ptr weight_; std::unique_ptr biases_; @@ -67,6 +73,59 @@ public: void convertWeightsFromPaddle() override; void convertWeightsToPaddle() override; + +protected: + /** + * Forward functions: reset buffers(input, output, weight and bias), + * reset primitive descriptor, + * reset pipeline. + */ + void resetFwdBuffers(MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + void resetInValue(MKLDNNMatrixPtr& in); + void resetWgtBiasValue(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias); + void resetOutValue(MKLDNNMatrixPtr& out); + void resetFwdPD(std::shared_ptr& pd, + MKLDNNMatrixPtr in, + MKLDNNMatrixPtr wgt, + MKLDNNMatrixPtr bias, + MKLDNNMatrixPtr out); + void resetFwdPipeline(std::vector& pipeline, + std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + + /** + * Backward functions: reset buffers(input, output, weight and bias), + * reset primitive descriptor for backward weight, + * reset primitive descriptor for backward data, + * reset pipeline. + */ + void resetBwdBuffers(MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + void resetOutGrad(MKLDNNMatrixPtr& out); + void resetWgtBiasGrad(MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias); + void resetInGrad(MKLDNNMatrixPtr& in); + void resetBwdWgtPD(std::shared_ptr& pd, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); + void resetBwdDataPD(std::shared_ptr& pd, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& out); + void resetBwdPipeline(std::vector& pipeline, + std::shared_ptr& bwdWgtPD, + std::shared_ptr& bwdDataPD, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out); }; } // namespace paddle diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h index 0aa130b4a0..c843115eb9 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/math/MKLDNNMatrix.h @@ -66,11 +66,12 @@ public: /** * Create reorder primitive. * Create a mkldnn::reorder handle for converting src MKLDNNMatrix to dst. - * checkData: for whether to check the data handle of src and dst is the same. - * if true, means check it and do not want support inplace reorder; - * otherwise do not check data which means the created reorder - * maybe inplace buffer and do not guarantee the logical is correct - * since not all format or conversion support inplace. + * checkData: whether to check the data handle of src and dst. + * if true, it will check the data and do not allow them equal; + * otherwise, it will not check them, then the reorder created + * may have inplace buffer. + * Do not set false, if you can not guarantee the inplace logical + * would work with your reorder. */ static std::shared_ptr createReorder( const MKLDNNMatrixPtr& src, From 2a110af3097ed04cf492fbe565602f8ccb71c109 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 15 Sep 2017 16:11:13 +0800 Subject: [PATCH 100/115] Use Matrix::addBias. --- paddle/gserver/layers/ExpandConvLayer.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 20de475fc3..7c1190a414 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -155,11 +155,12 @@ void ExpandConvLayer::forward(PassType passType) { /* add the bias-vector */ if (biases_.get()) { - if (sharedBiases_) { - addSharedBias(); - } else { - addUnsharedBias(); - } + MatrixPtr bias = Matrix::create(biases_->getW()->getData(), + 1, + biases_->getW()->getElementCnt(), + false, + useGpu_); + output_.value->addBias(*bias, 1.0, sharedBiases_); } /* activation */ @@ -171,7 +172,13 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) { MatrixPtr outGrad = getOutputGrad(); if (biases_ && biases_->getWGrad()) { - bpropBiases(outGrad); + // bpropBiases(outGrad); + MatrixPtr bias = Matrix::create(biases_->getWGrad()->getData(), + 1, + biases_->getWGrad()->getElementCnt(), + false, + useGpu_); + bias->collectBias(*getOutputGrad(), 1, sharedBiases_); /* Increasing the number of gradient */ biases_->getParameterPtr()->incUpdate(callback); } From eef1ccbf08605f6fb784a472540ba9c1cc959a67 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Fri, 15 Sep 2017 08:18:28 +0000 Subject: [PATCH 101/115] Add the check of inputs and outputs in all operators. --- paddle/operators/accuracy_op.cc | 11 +++++++--- paddle/operators/add_op.cc | 7 +++++++ paddle/operators/concat_op.cc | 3 +++ paddle/operators/cond_op.cc | 21 ++++++++++++++----- paddle/operators/cos_sim_op.cc | 12 +++++++++-- paddle/operators/elementwise_mul_op.cc | 10 +++++++-- paddle/operators/fill_zeros_like_op.cc | 7 +++++++ paddle/operators/gather_op.cc | 7 +++++++ paddle/operators/gaussian_random_op.cc | 8 +++++-- paddle/operators/identity_op.cc | 5 +++++ paddle/operators/lookup_table_op.cc | 15 +++++++++---- paddle/operators/mean_op.cc | 4 +++- paddle/operators/minus_op.cc | 7 +++++++ paddle/operators/mul_op.cc | 7 +++++++ paddle/operators/onehot_cross_entropy_op.cc | 10 +++++++++ paddle/operators/pad_op.cc | 5 +++++ paddle/operators/reshape_op.cc | 6 +++++- paddle/operators/rowwise_add_op.cc | 7 +++++++ paddle/operators/scale_op.cc | 5 +++++ paddle/operators/scatter_op.cc | 9 ++++++++ paddle/operators/sequence_avg_pool_op.cc | 9 +++++--- paddle/operators/sgd_op.cc | 7 +++++++ paddle/operators/sigmoid_op.cc | 5 +++++ paddle/operators/softmax_op.cc | 5 +++++ paddle/operators/squared_l2_distance_op.cc | 18 ++++++++++------ paddle/operators/sum_op.cc | 5 +++++ paddle/operators/top_k_op.cc | 7 ++++++- paddle/operators/uniform_random_op.cc | 4 ++++ .../{test_add_two_op.py => test_add_op.py} | 0 .../tests/test_gaussian_random_op.py | 2 +- .../v2/framework/tests/test_identity_op.py | 20 ++++++++++++++++++ ...ookup_table.py => test_lookup_table_op.py} | 0 .../v2/framework/tests/test_minus_op.py | 2 +- ..._op.py => test_onehot_cross_entropy_op.py} | 2 +- ...le_and_identity_op.py => test_scale_op.py} | 15 +------------ .../paddle/v2/framework/tests/test_sgd_op.py | 2 +- .../v2/framework/tests/test_sigmoid_op.py | 2 +- .../v2/framework/tests/test_top_k_op.py | 6 ++++++ .../framework/tests/test_uniform_random_op.py | 2 +- 39 files changed, 229 insertions(+), 50 deletions(-) rename python/paddle/v2/framework/tests/{test_add_two_op.py => test_add_op.py} (100%) create mode 100644 python/paddle/v2/framework/tests/test_identity_op.py rename python/paddle/v2/framework/tests/{test_lookup_table.py => test_lookup_table_op.py} (100%) rename python/paddle/v2/framework/tests/{test_cross_entropy_op.py => test_onehot_cross_entropy_op.py} (95%) rename python/paddle/v2/framework/tests/{test_scale_and_identity_op.py => test_scale_op.py} (56%) diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 4a6c6381b0..0c813748b2 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -23,10 +23,15 @@ class AccuracyOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Inference"), - "Input of Inference must be initialized."); + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("Inference"), + "Input(Inference) of AccuracyOp should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"), - "Input of Inference must be initialized."); + "Input(Label) of AccuracyOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Accuracy"), + "Output(Accuracy) of AccuracyOp should not be null."); + auto *inference = ctx.Input("Inference"); auto *label = ctx.Input("Label"); diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index b43c09d4f0..e83c1efeaf 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -23,6 +23,13 @@ class AddOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of AddOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), + "Input(Y) of AddOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of AddOp should not be null."); + PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), ctx.Input("Y")->dims(), "Two input of Add Op's dimension must be same."); diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 72fd179354..223bb0ffe6 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -25,6 +25,9 @@ class ConcatOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of ConcatOp should not be null."); + auto ins = ctx.MultiInput("X"); auto *out = ctx.Output("Out"); size_t axis = static_cast(ctx.Attr("axis")); diff --git a/paddle/operators/cond_op.cc b/paddle/operators/cond_op.cc index b2e1ca395d..8262a7a5c8 100644 --- a/paddle/operators/cond_op.cc +++ b/paddle/operators/cond_op.cc @@ -33,7 +33,8 @@ using DDim = framework::DDim; void CondOp::CreateScope(const Scope& scope) const { auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE(sub_scopes_var != nullptr, ""); + PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, + "Output(SubScopes) of CondOp should not be null."); auto sub_scopes = sub_scopes_var->GetMutable>(); auto& sub_scope = scope.NewScope(); sub_scopes->push_back(&sub_scope); @@ -41,7 +42,8 @@ void CondOp::CreateScope(const Scope& scope) const { void CondOp::CreateIndexTensor(const Scope& scope) const { auto index_tensors_var = scope.FindVar("IndexTensors"); - PADDLE_ENFORCE(index_tensors_var != nullptr, ""); + PADDLE_ENFORCE_NOT_NULL(index_tensors_var, + "Output(IndexTensors) of CondOp should not be null."); auto& index_tensors = *index_tensors_var->GetMutable>(); index_tensors.push_back(LoDTensor()); @@ -49,7 +51,8 @@ void CondOp::CreateIndexTensor(const Scope& scope) const { void CondOp::InferShape(const Scope& scope) const { auto sub_scopes_var = scope.FindVar("SubScopes"); - PADDLE_ENFORCE_NOT_NULL(sub_scopes_var); + PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, + "Output(SubScopes) of CondOp should not be null."); auto& sub_scopes = *sub_scopes_var->GetMutable>(); for (int i = 0; i < 2; ++i) { @@ -63,7 +66,8 @@ void CondOp::InferShape(const Scope& scope) const { // branch CreateIndexTensor(scope); - PADDLE_ENFORCE(!Inputs("Xs").empty(), "Inputs can't be empty"); + PADDLE_ENFORCE(!Inputs("Xs").empty(), + "Inputs(Xs) of CondOp can't be empty."); for (auto& input : Inputs("Xs")) { // Create a new tensor in sub-scope for input-type tensor Variable* v = sub_scopes[i]->NewVar(input); @@ -108,13 +112,18 @@ void CondOp::InferShape(const Scope& scope) const { void CondOp::Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const { auto* sub_scopes_var = scope.FindVar("SubScopes"); + PADDLE_ENFORCE_NOT_NULL(sub_scopes_var, + "Output(SubScopes) of CondOp should not be null."); auto sub_scopes = sub_scopes_var->Get>(); auto* index_tensors_var = scope.FindVar("IndexTensors"); + PADDLE_ENFORCE_NOT_NULL(index_tensors_var, + "Output(IndexTensors) of CondOp should not be null."); auto index_tensors = index_tensors_var->Get>(); std::string cond_name = Input("Cond"); Variable* cond_var = scope.FindVar(cond_name); - PADDLE_ENFORCE_NOT_NULL(cond_var); + PADDLE_ENFORCE_NOT_NULL(cond_var, + "Input(Cond) of CondOp should not be null."); const LoDTensor* cond = cond_var->GetMutable(); // Step 1: get the true/false index at runtime @@ -171,6 +180,8 @@ void CondOp::Run(const Scope& scope, } // Step 4: merge output results + PADDLE_ENFORCE(!Outputs("Outs").empty(), + "Outputs(Outs) of CondOp can't be empty."); for (int i = 0; i < 2; ++i) { // i= 0/i for True and False branches respectively for (auto& output : Outputs("Outs")) { diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 253b17d8a1..72c4464936 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -26,8 +26,16 @@ class CosSimOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { // notnull check - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of CosSimOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), + "Input(Y) of CosSimOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of CosSimOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("XNorm"), + "Output(XNorm) of CosSimOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("YNorm"), + "Output(YNorm) of CosSimOp should not be null."); // shape check auto x_dims = ctx.Input("X")->dims(); diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index e37c582adb..ee6e975b44 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -25,8 +25,14 @@ class ElementWiseMulOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of ElementWiseMulOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), + "Input(Y) of ElementWiseMulOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Out"), + "Output(Out) of ElementWiseMulOp should not be null."); + auto x_dim = ctx.Input("X")->dims(); auto y_dim = ctx.Input("Y")->dims(); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 0c9734892a..ba7857cc65 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -23,6 +23,13 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("Src"), + "Input(Src) of FillZerosLikeOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Dst"), + "Output(Dst) of FillZerosLikeOp should not be null."); + ctx.Output("Dst")->Resize( ctx.Input("Src")->dims()); } diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index 8883d6d5fe..d445b61c16 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -24,6 +24,13 @@ class GatherOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of GatherOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Index"), + "Input(Index) of GatherOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of GatherOp should not be null."); + int batch_size = ctx.Input("Index")->dims()[0]; PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx.Input("X")->dims()); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 25b0776a37..c0e161bbc0 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -43,8 +43,12 @@ class GaussianRandomOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext& context) const override { - auto* tensor = context.Output("Out"); + void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Out"), + "Output(Out) of GaussianRandomOp should not be null."); + + auto* tensor = ctx.Output("Out"); auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); diff --git a/paddle/operators/identity_op.cc b/paddle/operators/identity_op.cc index 7d9d4fa519..b67ca5f6f8 100644 --- a/paddle/operators/identity_op.cc +++ b/paddle/operators/identity_op.cc @@ -42,6 +42,11 @@ class IdentityOp : public NetOp { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : NetOp(type, inputs, outputs, attrs) { + PADDLE_ENFORCE_NE(Input("X"), framework::kEmptyVarName, + "Input(X) of IdentityOp should not be null."); + PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName, + "Output(Out) of IdentityOp should not be null."); + AppendOp(framework::OpRegistry::CreateOp( "scale", {{"X", {Input("X")}}}, {{"Out", {Output("Out")}}}, {{"scale", static_cast(1)}})); diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index b3d15f1ec9..07f6dfabca 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -22,10 +22,17 @@ class LookupTableOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext &context) const override { - auto table_t = context.Input("W"); - auto ids_t = context.Input("Ids"); - auto output_t = context.Output("Out"); + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("W"), + "Input(W) of LookupTableOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Ids"), + "Input(Ids) of LookupTableOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of LookupTableOp should not be null."); + + auto table_t = ctx.Input("W"); + auto ids_t = ctx.Input("Ids"); + auto output_t = ctx.Output("Out"); output_t->Resize({ids_t->dims()[0], table_t->dims()[1]}); } diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 3e523d31b6..7d7eeb59a2 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -24,7 +24,9 @@ class MeanOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), - "Input of MeanOp must be initialized."); + "Input(X) of MeanOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of MeanOp should not be null."); ctx.Output("Out")->Resize({1}); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 8a583f24ed..61fe49ce32 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -27,6 +27,13 @@ class MinusOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of MinusOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), + "Input(Y) of MinusOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of MinusOp should not be null."); + auto *left_tensor = ctx.Input("X"); auto *right_tensor = ctx.Input("Y"); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 015e13de9a..b6d320b415 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -26,6 +26,13 @@ class MulOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of MulOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), + "Input(Y) of MulOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of MulOp should not be null."); + auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); int x_num_col_dims = Attr("x_num_col_dims"); diff --git a/paddle/operators/onehot_cross_entropy_op.cc b/paddle/operators/onehot_cross_entropy_op.cc index a9baada1cd..f38be3549f 100644 --- a/paddle/operators/onehot_cross_entropy_op.cc +++ b/paddle/operators/onehot_cross_entropy_op.cc @@ -23,6 +23,16 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("X"), + "Input(X) of OnehotCrossEntropyOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("label"), + "Input(label) of OnehotCrossEntropyOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Y"), + "Output(Y) of OnehotCrossEntropyOp should not be null."); + auto *X = ctx.Input("X"); auto *label = ctx.Input("label"); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 6cf7bd6f35..a0b1c6b631 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -25,6 +25,11 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of PadOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of PadOp should not be null."); + auto x_dim = ctx.Input("X")->dims(); auto paddings = Attr>("paddings"); PADDLE_ENFORCE_EQ(x_dim.size() * 2, int64_t(paddings.size()), diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index d281702092..0d05e34414 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -28,7 +28,11 @@ class ReshapeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { // input check - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of ReshapeOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of ReshapeOp should not be null."); + auto shape = ctx.Attr>("shape"); PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); for (auto dim : shape) { diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index c6101685a3..2a3fd3be94 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -25,6 +25,13 @@ class RowwiseAddOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of RowwiseAddOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("b"), + "Input(b) of RowwiseAddOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of RowwiseAddOp should not be null."); + auto x_dims = ctx.Input("X")->dims(); auto b_dims = ctx.Input("b")->dims(); PADDLE_ENFORCE_GT( diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index 35e6b70ba9..d1f42e8662 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -27,6 +27,11 @@ class ScaleOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of ScaleOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of ScaleOp should not be null."); + auto *in = ctx.Input("X"); auto *out = ctx.Output("Out"); out->Resize(in->dims()); diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index 0f7510983e..8820262732 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -24,6 +24,15 @@ class ScatterOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Ref"), + "Input(Ref) of ScatterOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Index"), + "Input(Index) of ScatterOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Updates"), + "Input(Updates) of ScatterOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of ScatterOp should not be null."); + PADDLE_ENFORCE_EQ(ctx.Input("Index")->dims().size(), 1, "Update Index should be 1-D."); PADDLE_ENFORCE_EQ(ctx.Input("Ref")->dims().size(), diff --git a/paddle/operators/sequence_avg_pool_op.cc b/paddle/operators/sequence_avg_pool_op.cc index c15a5833de..eb3e37655b 100644 --- a/paddle/operators/sequence_avg_pool_op.cc +++ b/paddle/operators/sequence_avg_pool_op.cc @@ -23,9 +23,12 @@ class SequenceAvgPoolOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), - "Input of SequenceAvgPoolOp" - "must be initialized."); + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("X"), "Input(X) of SequenceAvgPoolOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Out"), + "Output(Out) of SequenceAvgPoolOp should not be null."); + auto* x = ctx.Input("X"); auto dims = x->dims(); auto lod = x->lod(); diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 7997bf6907..1232e64c7f 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -23,6 +23,13 @@ class SGDOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("param"), + "Input(param) of SGDOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("grad"), + "Input(grad) of SGDOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("param_out"), + "Output(param_out) of SGDOp should not be null."); + PADDLE_ENFORCE_EQ(ctx.Input("param")->dims(), ctx.Input("grad")->dims(), "Two input of SGD Op's dimension must be same."); diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index de6a1ba773..992b19965e 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -23,6 +23,11 @@ class SigmoidOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of SigmoidOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"), + "Output(Y) of SigmoidOp should not be null."); + ctx.Output("Y")->Resize( ctx.Input("X")->dims()); } diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 239d3d141e..c67eb028c8 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -23,6 +23,11 @@ class SoftmaxOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of SoftmaxOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"), + "Output(Y) of SoftmaxOp should not be null."); + PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, "The input of softmax op must be a matrix."); ctx.Output("Y")->Resize( diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index ebe5bd352e..39f4305877 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -23,12 +23,18 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& ctx) const override { - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), - "Input of SquaredL2DistanceOp " - "must be initialized."); - PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), - "Target of SquaredL2DistanceOp " - "must be initialized."); + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("X"), + "Input(X) of SquaredL2DistanceOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.InputVar("Y"), + "Input(Y) of SquaredL2DistanceOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("sub_result"), + "Output(sub_result) of SquaredL2DistanceOp should not be null."); + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Out"), + "Output(Out) of SquaredL2DistanceOp should not be null."); auto* x = ctx.Input("X"); auto x_dims = x->dims(); diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 7170e7256c..41e05c27f9 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -22,6 +22,11 @@ class SumOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE(!ctx.MultiInputVar("X").empty(), + "Input(X) of SumOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of SumOp should not be null."); + auto ins = ctx.MultiInput("X"); auto *out = ctx.Output("Out"); int N = ins.size(); diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index ff0e77a344..169b815fef 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -24,7 +24,12 @@ class TopkOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), - "Input of TopkOP must be initialized."); + "Input(X) of TopkOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), + "Output(Out) of TopkOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Indices"), + "Output(Indices) of TopkOp should not be null."); + auto *input = ctx.Input("X"); const int k = static_cast(ctx.Attr("k")); diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index ed79736936..184bcbc29c 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -48,6 +48,10 @@ class UniformRandomOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE_NOT_NULL( + ctx.OutputVar("Out"), + "Output(Out) of UniformRandomOp should not be null."); + PADDLE_ENFORCE(Attr("min") < Attr("max"), "uniform_random's min must less then max"); auto* tensor = ctx.Output("Out"); diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_add_two_op.py rename to python/paddle/v2/framework/tests/test_add_op.py diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/framework/tests/test_gaussian_random_op.py index 1f9e4db783..1888ee28f9 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/framework/tests/test_gaussian_random_op.py @@ -4,7 +4,7 @@ from paddle.v2.framework.op import Operator import numpy -class GaussianRandomTest(unittest.TestCase): +class TestGaussianRandomOp(unittest.TestCase): def test_cpu(self): self.gaussian_random_test(place=core.CPUPlace()) diff --git a/python/paddle/v2/framework/tests/test_identity_op.py b/python/paddle/v2/framework/tests/test_identity_op.py new file mode 100644 index 0000000000..2e95e7c786 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_identity_op.py @@ -0,0 +1,20 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestIdentityOp(OpTest): + def setUp(self): + self.op_type = "identity" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} + self.outputs = {'Out': self.inputs['X']} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/v2/framework/tests/test_lookup_table.py b/python/paddle/v2/framework/tests/test_lookup_table_op.py similarity index 100% rename from python/paddle/v2/framework/tests/test_lookup_table.py rename to python/paddle/v2/framework/tests/test_lookup_table_op.py diff --git a/python/paddle/v2/framework/tests/test_minus_op.py b/python/paddle/v2/framework/tests/test_minus_op.py index dea797a1fe..c56d7cb548 100644 --- a/python/paddle/v2/framework/tests/test_minus_op.py +++ b/python/paddle/v2/framework/tests/test_minus_op.py @@ -3,7 +3,7 @@ import numpy as np from op_test import OpTest -class MinusOpTest(OpTest): +class TestMinusOp(OpTest): def setUp(self): self.op_type = "minus" self.inputs = { diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_onehot_cross_entropy_op.py similarity index 95% rename from python/paddle/v2/framework/tests/test_cross_entropy_op.py rename to python/paddle/v2/framework/tests/test_onehot_cross_entropy_op.py index 253e7b8a24..fd3cbdb803 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_onehot_cross_entropy_op.py @@ -3,7 +3,7 @@ import numpy from op_test import OpTest -class TestCrossEntropy(OpTest): +class TestOnehotCrossEntropyOp(OpTest): def setUp(self): self.op_type = "onehot_cross_entropy" batch_size = 30 diff --git a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py b/python/paddle/v2/framework/tests/test_scale_op.py similarity index 56% rename from python/paddle/v2/framework/tests/test_scale_and_identity_op.py rename to python/paddle/v2/framework/tests/test_scale_op.py index 05d76d4282..2ea1e18547 100644 --- a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py +++ b/python/paddle/v2/framework/tests/test_scale_op.py @@ -3,20 +3,7 @@ import numpy as np from op_test import OpTest -class IdentityTest(OpTest): - def setUp(self): - self.op_type = "identity" - self.inputs = {'X': np.random.random((10, 10)).astype("float32")} - self.outputs = {'Out': self.inputs['X']} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out') - - -class ScaleTest(OpTest): +class TestScaleOp(OpTest): def setUp(self): self.op_type = "scale" self.inputs = {'X': np.random.random((10, 10)).astype("float32")} diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index 557cf15ace..64e54d1500 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -3,7 +3,7 @@ import numpy as np from op_test import OpTest -class TestSGD(OpTest): +class TestSGDOp(OpTest): def setUp(self): self.op_type = "sgd" w = np.random.random((102, 105)).astype("float32") diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py index 2316e49eff..d65d887db4 100644 --- a/python/paddle/v2/framework/tests/test_sigmoid_op.py +++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py @@ -3,7 +3,7 @@ import numpy as np from op_test import OpTest -class TestSigmoid(OpTest): +class TestSigmoidOp(OpTest): def setUp(self): self.op_type = "sigmoid" self.inputs = { diff --git a/python/paddle/v2/framework/tests/test_top_k_op.py b/python/paddle/v2/framework/tests/test_top_k_op.py index cab799256d..694f37d612 100644 --- a/python/paddle/v2/framework/tests/test_top_k_op.py +++ b/python/paddle/v2/framework/tests/test_top_k_op.py @@ -21,6 +21,9 @@ class TestTopkOp(OpTest): self.outputs = {'Out': output, 'Indices': indices} + def test_check_output(self): + self.check_output() + class TestTopkOp3d(OpTest): def setUp(self): @@ -42,6 +45,9 @@ class TestTopkOp3d(OpTest): self.outputs = {'Out': output, 'Indices': indices} + def test_check_output(self): + self.check_output() + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/framework/tests/test_uniform_random_op.py index 76a5e36e56..9e8898fb59 100644 --- a/python/paddle/v2/framework/tests/test_uniform_random_op.py +++ b/python/paddle/v2/framework/tests/test_uniform_random_op.py @@ -4,7 +4,7 @@ import paddle.v2.framework.core as core import numpy -class UniformRandomTest(unittest.TestCase): +class TestUniformRandomOp(unittest.TestCase): def test_uniform_random_cpu(self): self.uniform_random_test(place=core.CPUPlace()) From b3a50d53c90ebf3174867abf66ff24c3dd82724b Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 15 Sep 2017 17:03:14 +0800 Subject: [PATCH 102/115] Remove useless code. --- paddle/gserver/layers/ExpandConvBaseLayer.cpp | 63 ------------------- paddle/gserver/layers/ExpandConvBaseLayer.h | 13 ---- 2 files changed, 76 deletions(-) diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp index 2b7bef0a75..5cf7de41f9 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp +++ b/paddle/gserver/layers/ExpandConvBaseLayer.cpp @@ -58,67 +58,4 @@ size_t ExpandConvBaseLayer::getOutputSize() { return layerSize; } -void ExpandConvBaseLayer::addSharedBias() { - size_t mapW = getOutputSize() / numFilters_; - size_t mapH = getOutputValue()->getElementCnt() / mapW; - MatrixPtr out = - Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); - - Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); - - out->transpose(transOutValue_, false); // false means no memory allocation - transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_, - numFilters_); - - MatrixPtr bias = Matrix::create(biases_->getW()->getData(), - 1, - biases_->getW()->getElementCnt(), - false, - useGpu_); - transOutValue_->addBias(*bias, 1.0f); - - transOutValue_->reshape(mapW, mapH); - transOutValue_->transpose(out, false); // false means no memory allocation - - out->clear(); - bias->clear(); -} - -void ExpandConvBaseLayer::addUnsharedBias() { - MatrixPtr outValue = getOutputValue(); - MatrixPtr bias = Matrix::create(biases_->getW()->getData(), - 1, - biases_->getW()->getElementCnt(), - false, - useGpu_); - outValue->addBias(*bias, 1.0f); -} - -void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { - size_t mapW = getOutputSize() / numFilters_; - size_t mapH = v->getElementCnt() / mapW; - MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); - - Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); - - vTmp->transpose(transOutValue_, false); // false means no memory allocation - transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_, - numFilters_); - biases->collectBias(*transOutValue_, 1.0f); -} - -void ExpandConvBaseLayer::bpropBiases(MatrixPtr v) { - MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(), - 1, - biases_->getWGrad()->getElementCnt(), - false, - useGpu_); - if (sharedBiases_) { - bpropSharedBias(biases, v); - } else { - biases->collectBias(*v, 1.0f); - } - biases->clear(); -} - } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.h b/paddle/gserver/layers/ExpandConvBaseLayer.h index 01c699d234..74b3296119 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.h +++ b/paddle/gserver/layers/ExpandConvBaseLayer.h @@ -39,19 +39,6 @@ public: const ParameterMap& parameterMap) override; size_t getOutputSize(); - - /** - * Add shared bias. - */ - void addSharedBias(); - - /** - * Add unshared bias. - */ - void addUnsharedBias(); - - void bpropSharedBias(MatrixPtr biases, MatrixPtr v); - void bpropBiases(MatrixPtr v); }; } // namespace paddle From 3616ef77390b250e543cc7ba177860905f1ec975 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 15 Sep 2017 17:52:26 +0800 Subject: [PATCH 103/115] Remove ExpandConvBaseLayer. --- paddle/gserver/layers/ExpandConvLayer.cpp | 36 ++++++++++++++++++++++- paddle/gserver/layers/ExpandConvLayer.h | 7 +++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 7c1190a414..abc36aaef7 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -36,7 +36,35 @@ inline bool isDepthwiseConv(int channels, int groups) { bool ExpandConvLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ - ExpandConvBaseLayer::init(layerMap, parameterMap); + ConvBaseLayer::init(layerMap, parameterMap); + + int index = 0; + for (auto &inputConfig : config_.inputs()) { + const ConvConfig &conf = inputConfig.conv_conf(); + /* Consistent caffe mode for multiple input */ + caffeMode_ = conf.caffe_mode(); + + // create a new weight + size_t height, width; + height = filterPixels_[index] * filterChannels_[index]; + width = (!isDeconv_) ? numFilters_ : channels_[index]; + CHECK_EQ(parameters_[index]->getSize(), width * height); + Weight *w = new Weight(height, width, parameters_[index]); + weights_.emplace_back(w); + index++; + } + if (biasParameter_.get()) { + if (sharedBiases_) { + CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); + biases_ = + std::unique_ptr(new Weight(numFilters_, 1, biasParameter_)); + } else { + biases_ = + std::unique_ptr(new Weight(getSize(), 1, biasParameter_)); + } + } + + getOutputSize(); size_t numInputs = config_.inputs_size(); inputShape_.resize(numInputs); @@ -108,6 +136,12 @@ bool ExpandConvLayer::init(const LayerMap &layerMap, return true; } +size_t ExpandConvLayer::getOutputSize() { + CHECK_NE(inputLayers_.size(), 0UL); + size_t layerSize = ConvBaseLayer::calOutputSize(); + return layerSize; +} + // i is the index of input layers #define BACKWARD_INPUT(i, inputs, outputs) \ backward_[2 * i]->calc(inputs, outputs) diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index a1f943d152..698c37fb3f 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -28,10 +28,9 @@ namespace paddle { * The config file api is img_conv_layer. */ -class ExpandConvLayer : public ExpandConvBaseLayer { +class ExpandConvLayer : public ConvBaseLayer { public: - explicit ExpandConvLayer(const LayerConfig& config) - : ExpandConvBaseLayer(config) {} + explicit ExpandConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {} ~ExpandConvLayer() {} @@ -41,6 +40,8 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback) override; + size_t getOutputSize(); + protected: std::vector inputShape_; std::vector filterShape_; From 9e74b898776065fcb2504b70b1f5fc3ea137fc8c Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Fri, 15 Sep 2017 17:55:40 +0800 Subject: [PATCH 104/115] update notation in networks.py roughly --- .../paddle/trainer_config_helpers/networks.py | 338 +++++++++--------- 1 file changed, 171 insertions(+), 167 deletions(-) diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index 2f604ee45a..04bb9ce1d3 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -11,10 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" -""" -# from activations import * from activations import LinearActivation, ReluActivation, SoftmaxActivation, \ IdentityActivation, TanhActivation, SequenceSoftmaxActivation from attrs import ExtraAttr @@ -55,13 +52,13 @@ def sequence_conv_pool(input, context_attr=None, pool_attr=None): """ - Text convolution pooling layers helper. + Text convolution pooling group. Text input => Context Projection => FC Layer => Pooling => Output. - :param name: name of output layer(pooling layer name) + :param name: group name. :type name: basestring - :param input: name of input layer + :param input: input layer. :type input: LayerOutput :param context_len: context projection length. See context_projection's document. @@ -70,34 +67,34 @@ def sequence_conv_pool(input, :type hidden_size: int :param context_start: context start position. See context_projection's context_start. - :type context_start: int or None + :type context_start: int|None :param pool_type: pooling layer type. See pooling_layer's document. - :type pool_type: BasePoolingType. + :type pool_type: BasePoolingType :param context_proj_layer_name: context projection layer name. None if user don't care. :type context_proj_layer_name: basestring - :param context_proj_param_attr: context projection parameter attribute. - None if user don't care. - :type context_proj_param_attr: ParameterAttribute or None. + :param context_proj_param_attr: padding parameter attribute of context projection layer. + If false, it means padding always be zero. + :type context_proj_param_attr: ParameterAttribute|None :param fc_layer_name: fc layer name. None if user don't care. :type fc_layer_name: basestring :param fc_param_attr: fc layer parameter attribute. None if user don't care. - :type fc_param_attr: ParameterAttribute or None + :type fc_param_attr: ParameterAttribute|None :param fc_bias_attr: fc bias parameter attribute. False if no bias, None if user don't care. - :type fc_bias_attr: ParameterAttribute or None - :param fc_act: fc layer activation type. None means tanh + :type fc_bias_attr: ParameterAttribute|False|None + :param fc_act: fc layer activation type. None means tanh. :type fc_act: BaseActivation - :param pool_bias_attr: pooling layer bias attr. None if don't care. - False if no bias. - :type pool_bias_attr: ParameterAttribute or None. + :param pool_bias_attr: pooling layer bias attr. False if no bias. + None if user don't care. + :type pool_bias_attr: ParameterAttribute|False|None :param fc_attr: fc layer extra attribute. :type fc_attr: ExtraLayerAttribute :param context_attr: context projection layer extra attribute. :type context_attr: ExtraLayerAttribute :param pool_attr: pooling layer extra attribute. :type pool_attr: ExtraLayerAttribute - :return: output layer name. + :return: layer's output. :rtype: LayerOutput """ # Set Default Value to param @@ -163,45 +160,45 @@ def simple_img_conv_pool(input, """ Simple image convolution and pooling group. - Input => conv => pooling + Img input => Conv => Pooling => Output. - :param name: group name + :param name: group name. :type name: basestring - :param input: input layer name. + :param input: input layer. :type input: LayerOutput - :param filter_size: see img_conv_layer for details + :param filter_size: see img_conv_layer for details. :type filter_size: int - :param num_filters: see img_conv_layer for details + :param num_filters: see img_conv_layer for details. :type num_filters: int - :param pool_size: see img_pool_layer for details + :param pool_size: see img_pool_layer for details. :type pool_size: int - :param pool_type: see img_pool_layer for details + :param pool_type: see img_pool_layer for details. :type pool_type: BasePoolingType - :param act: see img_conv_layer for details + :param act: see img_conv_layer for details. :type act: BaseActivation - :param groups: see img_conv_layer for details + :param groups: see img_conv_layer for details. :type groups: int - :param conv_stride: see img_conv_layer for details + :param conv_stride: see img_conv_layer for details. :type conv_stride: int - :param conv_padding: see img_conv_layer for details + :param conv_padding: see img_conv_layer for details. :type conv_padding: int - :param bias_attr: see img_conv_layer for details + :param bias_attr: see img_conv_layer for details. :type bias_attr: ParameterAttribute - :param num_channel: see img_conv_layer for details + :param num_channel: see img_conv_layer for details. :type num_channel: int - :param param_attr: see img_conv_layer for details + :param param_attr: see img_conv_layer for details. :type param_attr: ParameterAttribute - :param shared_bias: see img_conv_layer for details + :param shared_bias: see img_conv_layer for details. :type shared_bias: bool - :param conv_layer_attr: see img_conv_layer for details + :param conv_layer_attr: see img_conv_layer for details. :type conv_layer_attr: ExtraLayerAttribute - :param pool_stride: see img_pool_layer for details + :param pool_stride: see img_pool_layer for details. :type pool_stride: int - :param pool_padding: see img_pool_layer for details + :param pool_padding: see img_pool_layer for details. :type pool_padding: int - :param pool_layer_attr: see img_pool_layer for details + :param pool_layer_attr: see img_pool_layer for details. :type pool_layer_attr: ExtraLayerAttribute - :return: Layer's output + :return: layer's output :rtype: LayerOutput """ _conv_ = img_conv_layer( @@ -252,48 +249,52 @@ def img_conv_bn_pool(input, pool_layer_attr=None): """ Convolution, batch normalization, pooling group. + + Img input => Conv => BN => Pooling => Output. - :param name: group name + :param name: group name. :type name: basestring - :param input: layer's input - :type input: LayerOutput - :param filter_size: see img_conv_layer's document + :param input: input layer. + :type input: LayerOutput + :param filter_size: see img_conv_layer for details. :type filter_size: int - :param num_filters: see img_conv_layer's document + :param num_filters: see img_conv_layer for details. :type num_filters: int - :param pool_size: see img_pool_layer's document. + :param pool_size: see img_pool_layer for details. :type pool_size: int - :param pool_type: see img_pool_layer's document. + :param pool_type: see img_pool_layer for details. :type pool_type: BasePoolingType - :param act: see batch_norm_layer's document. + :param act: see batch_norm_layer for details. :type act: BaseActivation - :param groups: see img_conv_layer's document + :param groups: see img_conv_layer for details. :type groups: int - :param conv_stride: see img_conv_layer's document. + :param conv_stride: see img_conv_layer for details. :type conv_stride: int - :param conv_padding: see img_conv_layer's document. + :param conv_padding: see img_conv_layer for details. :type conv_padding: int - :param conv_bias_attr: see img_conv_layer's document. + :param conv_bias_attr: see img_conv_layer for details. :type conv_bias_attr: ParameterAttribute - :param num_channel: see img_conv_layer's document. + :param num_channel: see img_conv_layer for details. :type num_channel: int - :param conv_param_attr: see img_conv_layer's document. + :param conv_param_attr: see img_conv_layer for details. :type conv_param_attr: ParameterAttribute - :param shared_bias: see img_conv_layer's document. + :param shared_bias: see img_conv_layer for details. :type shared_bias: bool - :param conv_layer_attr: see img_conv_layer's document. + :param conv_layer_attr: see img_conv_layer for details. :type conv_layer_attr: ExtraLayerOutput - :param bn_param_attr: see batch_norm_layer's document. - :type bn_param_attr: ParameterAttribute. - :param bn_bias_attr: see batch_norm_layer's document. - :param bn_layer_attr: ParameterAttribute. - :param pool_stride: see img_pool_layer's document. + :param bn_param_attr: see batch_norm_layer for details. + :type bn_param_attr: ParameterAttribute + :param bn_bias_attr: see batch_norm_layer for details. + :type bn_bias_attr: ParameterAttribute + :param bn_layer_attr: see batch_norm_layer for details. + :type bn_layer_attr: ExtraLayerAttribute + :param pool_stride: see img_pool_layer for details. :type pool_stride: int - :param pool_padding: see img_pool_layer's document. + :param pool_padding: see img_pool_layer for details. :type pool_padding: int - :param pool_layer_attr: see img_pool_layer's document. + :param pool_layer_attr: see img_pool_layer for details. :type pool_layer_attr: ExtraLayerAttribute - :return: Layer groups output + :return: layer's output :rtype: LayerOutput """ __conv__ = img_conv_layer( @@ -348,10 +349,10 @@ def img_conv_group(input, :param conv_batchnorm_drop_rate: if conv_with_batchnorm[i] is true, conv_batchnorm_drop_rate[i] represents the drop rate of each batch norm. :type conv_batchnorm_drop_rate: list - :param input: layer's input. + :param input: input layer. :type input: LayerOutput - :param conv_num_filter: output channels num. - :type conv_num_filter: int + :param conv_num_filter: list of output channels num. + :type conv_num_filter: list|tuple :param pool_size: pooling filter size. :type pool_size: int :param num_channels: input channels num. @@ -362,18 +363,18 @@ def img_conv_group(input, :type conv_filter_size: int :param conv_act: activation funciton after convolution. :type conv_act: BaseActivation - :param conv_with_batchnorm: conv_with_batchnorm[i] represents - if there is a batch normalization after each convolution. + :param conv_with_batchnorm: if conv_with_batchnorm[i] is true, + there is a batch normalization operation after each convolution. :type conv_with_batchnorm: list :param pool_stride: pooling stride size. :type pool_stride: int :param pool_type: pooling type. :type pool_type: BasePoolingType - :param param_attr: Convolution param attribute. - None means default attribute. + :param param_attr: param attribute of convolution layer, + None means default attribute. :type param_attr: ParameterAttribute - :return: Layer's output - :type: LayerOutput + :return: layer's output + :rtype: LayerOutput """ tmp = input @@ -466,12 +467,14 @@ def vgg_16_network(input_image, num_channels, num_classes=1000): """ Same model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8 - :param num_classes: - :param input_image: + :param num_classes: number of class. + :type num_classes: int + :param input_image: input layer. :type input_image: LayerOutput - :param num_channels: + :param num_channels: input channels num. :type num_channels: int - :return: + :return: layer's output + :rtype: LayerOutput """ tmp = img_conv_group( @@ -560,8 +563,8 @@ def simple_lstm(input, """ Simple LSTM Cell. - It just combine a mixed layer with fully_matrix_projection and a lstmemory - layer. The simple lstm cell was implemented as follow equations. + It just combines a mixed layer with fully_matrix_projection and a lstmemory + layer. The simple lstm cell was implemented with follow equations. .. math:: @@ -575,37 +578,37 @@ def simple_lstm(input, h_t & = o_t tanh(c_t) - Please refer **Generating Sequences With Recurrent Neural Networks** if you - want to know what lstm is. Link_ is here. + Please refer to **Generating Sequences With Recurrent Neural Networks** for more + details about lstm. Link_ is here. .. _Link: http://arxiv.org/abs/1308.0850 :param name: lstm layer name. :type name: basestring - :param input: input layer name. + :param input: layer's input. :type input: LayerOutput :param size: lstm layer size. :type size: int - :param reverse: whether to process the input data in a reverse order + :param reverse: process the input in a reverse order or not. :type reverse: bool - :param mat_param_attr: mixed layer's matrix projection parameter attribute. + :param mat_param_attr: parameter attribute of matrix projection in mixed layer. :type mat_param_attr: ParameterAttribute :param bias_param_attr: bias parameter attribute. False means no bias, None means default bias. :type bias_param_attr: ParameterAttribute|False - :param inner_param_attr: lstm cell parameter attribute. + :param inner_param_attr: parameter attribute of lstm cell. :type inner_param_attr: ParameterAttribute - :param act: lstm final activiation type + :param act: last activiation type of lstm. :type act: BaseActivation - :param gate_act: lstm gate activiation type + :param gate_act: gate activiation type of lstm. :type gate_act: BaseActivation - :param state_act: lstm state activiation type. + :param state_act: state activiation type of lstm. :type state_act: BaseActivation - :param mixed_layer_attr: mixed layer's extra attribute. + :param mixed_layer_attr: extra attribute of mixed layer. :type mixed_layer_attr: ExtraLayerAttribute - :param lstm_cell_attr: lstm layer's extra attribute. + :param lstm_cell_attr: extra attribute of lstm. :type lstm_cell_attr: ExtraLayerAttribute - :return: lstm layer name. + :return: layer's output. :rtype: LayerOutput """ fc_name = 'lstm_transform_%s' % name @@ -643,9 +646,9 @@ def lstmemory_unit(input, lstm_bias_attr=None, lstm_layer_attr=None): """ - Define calculations that a LSTM unit performs during a single time step. - This function itself is not a recurrent layer, so it can not be - directly used to process sequence inputs. This function is always used in + lstmemory_unit defines the caculation process of a LSTM unit during a + single time step. This function is not a recurrent layer, so it can not be + directly used to process sequence input. This function is always used in recurrent_group (see layers.py for more details) to implement attention mechanism. @@ -676,7 +679,7 @@ def lstmemory_unit(input, state_act=TanhActivation()) - :param input: input layer name. + :param input: input layer. :type input: LayerOutput :param out_memory: output of previous time step :type out_memory: LayerOutput | None @@ -684,15 +687,15 @@ def lstmemory_unit(input, :type name: basestring :param size: lstmemory unit size. :type size: int - :param param_attr: Parameter config, None if use default. + :param param_attr: parameter attribute, None means default attribute. :type param_attr: ParameterAttribute - :param act: lstm final activiation type + :param act: last activiation type of lstm. :type act: BaseActivation - :param gate_act: lstm gate activiation type + :param gate_act: gate activiation type of lstm. :type gate_act: BaseActivation - :param state_act: lstm state activiation type. + :param state_act: state activiation type of lstm. :type state_act: BaseActivation - :param input_proj_bias_attr: bias attribute for input-to-hidden projection. + :param input_proj_bias_attr: bias attribute for input to hidden projection. False means no bias, None means default bias. :type input_proj_bias_attr: ParameterAttribute|False|None :param input_proj_layer_attr: extra layer attribute for input to hidden @@ -700,8 +703,8 @@ def lstmemory_unit(input, :type input_proj_layer_attr: ExtraLayerAttribute :param lstm_bias_attr: bias parameter attribute of lstm layer. False means no bias, None means default bias. - :type lstm_bias_attr: ParameterAttribute|False - :param lstm_layer_attr: lstm layer's extra attribute. + :type lstm_bias_attr: ParameterAttribute|False|None + :param lstm_layer_attr: extra attribute of lstm layer. :type lstm_layer_attr: ExtraLayerAttribute :return: lstmemory unit name. :rtype: LayerOutput @@ -758,9 +761,9 @@ def lstmemory_group(input, lstm_group is a recurrent_group version of Long Short Term Memory. It does exactly the same calculation as the lstmemory layer (see lstmemory in layers.py for the maths) does. A promising benefit is that LSTM memory - cell states, or hidden states in every time step are accessible to the + cell states(or hidden states) in every time step are accessible to the user. This is especially useful in attention model. If you do not need to - access the internal states of the lstm, but merely use its outputs, + access the internal states of the lstm and merely use its outputs, it is recommended to use the lstmemory, which is relatively faster than lstmemory_group. @@ -781,28 +784,28 @@ def lstmemory_group(input, gate_act=SigmoidActivation(), state_act=TanhActivation()) - :param input: input layer name. + :param input: input layer. :type input: LayerOutput :param size: lstmemory group size. :type size: int - :param name: name of the lstmemory group. + :param name: name of lstmemory group. :type name: basestring - :param out_memory: output of previous time step + :param out_memory: output of previous time step. :type out_memory: LayerOutput | None - :param reverse: is lstm reversed + :param reverse: process the input in a reverse order or not. :type reverse: bool - :param param_attr: Parameter config, None if use default. + :param param_attr: parameter attribute, None means default attribute. :type param_attr: ParameterAttribute - :param act: lstm final activiation type + :param act: last activiation type of lstm. :type act: BaseActivation - :param gate_act: lstm gate activiation type + :param gate_act: gate activiation type of lstm. :type gate_act: BaseActivation - :param state_act: lstm state activiation type. + :param state_act: state activiation type of lstm. :type state_act: BaseActivation :param lstm_bias_attr: bias parameter attribute of lstm layer. False means no bias, None means default bias. - :type lstm_bias_attr: ParameterAttribute|False - :param input_proj_bias_attr: bias attribute for input-to-hidden projection. + :type lstm_bias_attr: ParameterAttribute|False|None + :param input_proj_bias_attr: bias attribute for input to hidden projection. False means no bias, None means default bias. :type input_proj_bias_attr: ParameterAttribute|False|None :param input_proj_layer_attr: extra layer attribute for input to hidden @@ -848,15 +851,15 @@ def gru_unit(input, gru_layer_attr=None, naive=False): """ - Define calculations that a gated recurrent unit performs in a single time - step. This function itself is not a recurrent layer, so it can not be - directly used to process sequence inputs. This function is always used in + gru_unit defines the calculation process of a gated recurrent unit during a single + time step. This function is not a recurrent layer, so it can not be + directly used to process sequence input. This function is always used in the recurrent_group (see layers.py for more details) to implement attention mechanism. Please see grumemory in layers.py for the details about the maths. - :param input: input layer name. + :param input: input layer. :type input: LayerOutput :param memory_boot: the initialization state of the LSTM cell. :type memory_boot: LayerOutput | None @@ -864,12 +867,12 @@ def gru_unit(input, :type name: basestring :param size: hidden size of the gru. :type size: int - :param act: type of the activation + :param act: activation type of gru :type act: BaseActivation - :param gate_act: type of the gate activation + :param gate_act: gate activation type or gru :type gate_act: BaseActivation - :param gru_layer_attr: Extra parameter attribute of the gru layer. - :type gru_layer_attr: ParameterAttribute|False + :param gru_layer_attr: Extra attribute of the gru layer. + :type gru_layer_attr: ExtraLayerAttribute :return: the gru output layer. :rtype: LayerOutput """ @@ -915,7 +918,7 @@ def gru_group(input, does exactly the same calculation as the grumemory layer does. A promising benefit is that gru hidden states are accessible to the user. This is especially useful in attention model. If you do not need to access - any internal state, but merely use the outputs of a GRU, it is recommended + any internal state and merely use the outputs of a GRU, it is recommended to use the grumemory, which is relatively faster. Please see grumemory in layers.py for more detail about the maths. @@ -924,12 +927,12 @@ def gru_group(input, .. code-block:: python - gru = gur_group(input=[layer1], + gru = gru_group(input=[layer1], size=256, act=TanhActivation(), gate_act=SigmoidActivation()) - :param input: input layer name. + :param input: input layer. :type input: LayerOutput :param memory_boot: the initialization state of the LSTM cell. :type memory_boot: LayerOutput | None @@ -937,16 +940,17 @@ def gru_group(input, :type name: basestring :param size: hidden size of the gru. :type size: int - :param reverse: whether to process the input data in a reverse order + :param reverse: process the input in a reverse order or not. :type reverse: bool - :param act: type of the activiation + :param act: activiation type of gru :type act: BaseActivation - :param gate_act: type of the gate activiation + :param gate_act: gate activiation type of gru :type gate_act: BaseActivation - :param gru_bias_attr: bias. False means no bias, None means default bias. - :type gru_bias_attr: ParameterAttribute|False - :param gru_layer_attr: Extra parameter attribute of the gru layer. - :type gru_layer_attr: ParameterAttribute|False + :param gru_bias_attr: bias parameter attribute of gru layer, + False means no bias, None means default bias. + :type gru_bias_attr: ParameterAttribute|False|None + :param gru_layer_attr: Extra attribute of the gru layer. + :type gru_layer_attr: ExtraLayerAttribute :return: the gru group. :rtype: LayerOutput """ @@ -986,11 +990,11 @@ def simple_gru(input, gru_layer_attr=None, naive=False): """ - You maybe see gru_step_layer, grumemory in layers.py, gru_unit, gru_group, + You may see gru_step_layer, grumemory in layers.py, gru_unit, gru_group, simple_gru in network.py. The reason why there are so many interfaces is that we have two ways to implement recurrent neural network. One way is to use one complete layer to implement rnn (including simple rnn, gru and lstm) - with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But, + with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But the multiplication operation :math:`W x_t` is not computed in these layers. See details in their interfaces in layers.py. The other implementation is to use an recurrent group which can ensemble a @@ -1018,22 +1022,23 @@ def simple_gru(input, gru = simple_gru(input=[layer1], size=256) - :param input: input layer name. + :param input: input layer. :type input: LayerOutput :param name: name of the gru group. :type name: basestring :param size: hidden size of the gru. :type size: int - :param reverse: whether to process the input data in a reverse order + :param reverse: process the input in a reverse order or not. :type reverse: bool - :param act: type of the activiation + :param act: activiation type of gru :type act: BaseActivation - :param gate_act: type of the gate activiation + :param gate_act: gate activiation type of gru :type gate_act: BaseActivation - :param gru_bias_attr: bias. False means no bias, None means default bias. - :type gru_bias_attr: ParameterAttribute|False - :param gru_layer_attr: Extra parameter attribute of the gru layer. - :type gru_layer_attr: ParameterAttribute|False + :param gru_bias_attr: bias parameter attribute of gru layer, + False means no bias, None means default bias. + :type gru_bias_attr: ParameterAttribute|False|None + :param gru_layer_attr: Extra attribute of the gru layer. + :type gru_layer_attr: ExtraLayerAttribute :return: the gru group. :rtype: LayerOutput """ @@ -1071,8 +1076,8 @@ def simple_gru2(input, mixed_layer_attr=None, gru_cell_attr=None): """ - simple_gru2 is the same with simple_gru, but using grumemory instead - Please see grumemory in layers.py for more detail about the maths. + simple_gru2 is the same with simple_gru, but using grumemory instead. + Please refer to grumemory in layers.py for more detail about the math. simple_gru2 is faster than simple_gru. The example usage is: @@ -1081,22 +1086,23 @@ def simple_gru2(input, gru = simple_gru2(input=[layer1], size=256) - :param input: input layer name. + :param input: input layer. :type input: LayerOutput :param name: name of the gru group. :type name: basestring :param size: hidden size of the gru. :type size: int - :param reverse: whether to process the input data in a reverse order + :param reverse: process the input in a reverse order or not. :type reverse: bool - :param act: type of the activiation + :param act: activiation type of gru :type act: BaseActivation - :param gate_act: type of the gate activiation + :param gate_act: gate activiation type of gru :type gate_act: BaseActivation - :param gru_bias_attr: bias. False means no bias, None means default bias. - :type gru_bias_attr: ParameterAttribute|False - :param gru_layer_attr: Extra parameter attribute of the gru layer. - :type gru_layer_attr: ParameterAttribute|False + :param gru_bias_attr: bias parameter attribute of gru layer, + False means no bias, None means default bias. + :type gru_bias_attr: ParameterAttribute|False|None + :param gru_layer_attr: Extra attribute of the gru layer. + :type gru_layer_attr: ExtraLayerAttribute :return: the gru group. :rtype: LayerOutput """ @@ -1145,7 +1151,7 @@ def bidirectional_gru(input, concat_act=None): """ A bidirectional_gru is a recurrent unit that iterates over the input - sequence both in forward and bardward orders, and then concatenate two + sequence both in forward and backward orders, and then concatenate two outputs to form a final output. However, concatenation of two outputs is not the only way to form the final output, you can also, for example, just add them together. @@ -1162,11 +1168,10 @@ def bidirectional_gru(input, :type input: LayerOutput :param size: gru layer size. :type size: int - :param return_seq: If set False, outputs of the last time step are - concatenated and returned. - If set True, the entire output sequences that are - processed in forward and backward directions are + :param return_seq: If set False, the last time step of output are concatenated and returned. + If set True, the entire output sequences in forward + and backward directions are concatenated and returned. :type return_seq: bool :return: LayerOutput object. :rtype: LayerOutput @@ -1230,8 +1235,8 @@ def bidirectional_lstm(input, concat_act=None): """ A bidirectional_lstm is a recurrent unit that iterates over the input - sequence both in forward and bardward orders, and then concatenate two - outputs form a final output. However, concatenation of two outputs + sequence both in forward and backward orders, and then concatenate two + outputs to form a final output. However, concatenation of two outputs is not the only way to form the final output, you can also, for example, just add them together. @@ -1252,13 +1257,12 @@ def bidirectional_lstm(input, :type input: LayerOutput :param size: lstm layer size. :type size: int - :param return_seq: If set False, outputs of the last time step are - concatenated and returned. - If set True, the entire output sequences that are - processed in forward and backward directions are + :param return_seq: If set False, the last time step of output are concatenated and returned. + If set True, the entire output sequences in forward + and backward directions are concatenated and returned. :type return_seq: bool - :return: LayerOutput object accroding to the return_seq. + :return: LayerOutput object. :rtype: LayerOutput """ args = locals() @@ -1303,7 +1307,7 @@ def simple_attention(encoded_sequence, weight_act=None, name=None): """ - Calculate and then return a context vector by attention machanism. + Calculate and return a context vector with attention mechanism. Size of the context vector equals to size of the encoded_sequence. .. math:: @@ -1336,10 +1340,10 @@ def simple_attention(encoded_sequence, :param name: name of the attention model. :type name: basestring :param softmax_param_attr: parameter attribute of sequence softmax - that is used to produce attention weight + that is used to produce attention weight. :type softmax_param_attr: ParameterAttribute - :param weight_act: activation of the attention model - :type weight_act: Activation + :param weight_act: activation of the attention model. + :type weight_act: BaseActivation :param encoded_sequence: output of the encoder :type encoded_sequence: LayerOutput :param encoded_proj: attention weight is computed by a feed forward neural @@ -1411,7 +1415,7 @@ def inputs(layers, *args): def outputs(layers, *args): """ - Declare the outputs of network. If user have not defined the inputs of + Declare the outputs of network. If user has not defined the inputs of network, this method will calculate the input order by dfs travel. :param layers: Output layers. From 88b31422b3385f7aa1d61946660bf966f2173a43 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 15 Sep 2017 17:59:02 +0800 Subject: [PATCH 105/115] Delete ExpandConvBaseLayer.cpp and ExpandConvBaseLayer.h --- paddle/gserver/layers/ExpandConvBaseLayer.cpp | 61 ------------------- paddle/gserver/layers/ExpandConvBaseLayer.h | 44 ------------- 2 files changed, 105 deletions(-) delete mode 100644 paddle/gserver/layers/ExpandConvBaseLayer.cpp delete mode 100644 paddle/gserver/layers/ExpandConvBaseLayer.h diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp deleted file mode 100644 index 5cf7de41f9..0000000000 --- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "ExpandConvBaseLayer.h" - -#include "paddle/utils/Logging.h" -namespace paddle { - -bool ExpandConvBaseLayer::init(const LayerMap &layerMap, - const ParameterMap ¶meterMap) { - /* Initialize the basic convolutional parent class */ - ConvBaseLayer::init(layerMap, parameterMap); - - int index = 0; - for (auto &inputConfig : config_.inputs()) { - const ConvConfig &conf = inputConfig.conv_conf(); - /* Consistent caffe mode for multiple input */ - caffeMode_ = conf.caffe_mode(); - - // create a new weight - size_t height, width; - height = filterPixels_[index] * filterChannels_[index]; - width = (!isDeconv_) ? numFilters_ : channels_[index]; - CHECK_EQ(parameters_[index]->getSize(), width * height); - Weight *w = new Weight(height, width, parameters_[index]); - weights_.emplace_back(w); - index++; - } - if (biasParameter_.get()) { - if (sharedBiases_) { - CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); - biases_ = - std::unique_ptr(new Weight(numFilters_, 1, biasParameter_)); - } else { - biases_ = - std::unique_ptr(new Weight(getSize(), 1, biasParameter_)); - } - } - getOutputSize(); - - return true; -} - -size_t ExpandConvBaseLayer::getOutputSize() { - CHECK_NE(inputLayers_.size(), 0UL); - size_t layerSize = ConvBaseLayer::calOutputSize(); - return layerSize; -} - -} // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.h b/paddle/gserver/layers/ExpandConvBaseLayer.h deleted file mode 100644 index 74b3296119..0000000000 --- a/paddle/gserver/layers/ExpandConvBaseLayer.h +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include "ConvBaseLayer.h" -#include "paddle/math/Matrix.h" - -namespace paddle { - -/** - * @brief A subclass of ConvBaseLayer that is a superclass of both - * ExpandConvLayer and ExpandConvTransLayer - */ -class ExpandConvBaseLayer : public ConvBaseLayer { -protected: - /// The transpose of output, which is an auxiliary matrix. - MatrixPtr transOutValue_; - -public: - explicit ExpandConvBaseLayer(const LayerConfig& config) - : ConvBaseLayer(config) {} - - ~ExpandConvBaseLayer() {} - - bool init(const LayerMap& layerMap, - const ParameterMap& parameterMap) override; - - size_t getOutputSize(); -}; - -} // namespace paddle From dae249b1cbb57e1e148788df5fb6b048404a4b00 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Fri, 15 Sep 2017 10:01:11 +0000 Subject: [PATCH 106/115] Delete USE_OP statements and add more ENFORCE statements to check the inputs and outputs in FCOp. --- paddle/operators/fc_op.cc | 23 +++++++++++++++++------ paddle/operators/identity_op.cc | 4 ++-- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 5549a836c9..e5d0f3c372 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -24,6 +24,15 @@ class FCOp : public NetOp { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : NetOp(type, inputs, outputs, attrs) { + PADDLE_ENFORCE(!Inputs("X").empty(), + "Inputs(X) of FCOp should not be null."); + PADDLE_ENFORCE(!Inputs("W").empty(), + "Inputs(W) of FCOp should not be null."); + PADDLE_ENFORCE(!Outputs("MulOut").empty(), + "Outputs(MulOut) of FCOp should not be null."); + PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName, + "Output(Out) of FCOp should not be null."); + auto x = Inputs("X"); auto w = Inputs("W"); auto mul_out = Outputs("MulOut"); @@ -68,6 +77,10 @@ class FCOp : public NetOp { // sum_out = X[0] * W[0] + ... + X[n-1] * W[n-1] auto sum_out = mul_out[0]; if (n > 1) { + PADDLE_ENFORCE_NE(Output("SumOut"), framework::kEmptyVarName, + "Output(SumOut) of FCOp should not be null when the " + "size of Inputs(X) > 1."); + sum_out = Output("SumOut"); AppendOp(framework::OpRegistry::CreateOp("sum", {{"X", {mul_out}}}, {{"Out", {sum_out}}}, {})); @@ -81,6 +94,10 @@ class FCOp : public NetOp { auto b = Input("B"); auto add_out = sum_out; if (b != framework::kEmptyVarName) { + PADDLE_ENFORCE_NE( + Output("AddOut"), framework::kEmptyVarName, + "Output(AddOut) of FCOp should not be null when Input(B) is set."); + add_out = Output("AddOut"); AppendOp(framework::OpRegistry::CreateOp( "rowwise_add", {{"X", {sum_out}}, {"b", {Input("B")}}}, @@ -176,11 +193,5 @@ Activation type can be set to `identity` (default), `sigmoid` or `softmax`. } // namespace operators } // namespace paddle -USE_OP(mul); -USE_OP(rowwise_add); -USE_NO_KERNEL_OP(identity); -USE_OP(sigmoid); -USE_OP(softmax); - namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(fc, ops::FCOp, ops::FCOpMaker); diff --git a/paddle/operators/identity_op.cc b/paddle/operators/identity_op.cc index 5ab8c0fadc..2cc632205e 100644 --- a/paddle/operators/identity_op.cc +++ b/paddle/operators/identity_op.cc @@ -44,8 +44,8 @@ class IdentityOp : public NetOp { : NetOp(type, inputs, outputs, attrs) { PADDLE_ENFORCE_NE(Input("X"), framework::kEmptyVarName, "Input(X) of IdentityOp should not be null."); - PADDLE_ENFORCE_NE(Output("Out"), framework::kEmptyVarName, - "Output(Out) of IdentityOp should not be null."); + PADDLE_ENFORCE_NE(Output("Y"), framework::kEmptyVarName, + "Output(Y) of IdentityOp should not be null."); AppendOp(framework::OpRegistry::CreateOp( "scale", {{"X", {Input("X")}}}, {{"Out", {Output("Y")}}}, From e4c340e4e7dc7ec898c1ce0f2514c9579cba460d Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 15 Sep 2017 18:01:33 +0800 Subject: [PATCH 107/115] Bug fix. --- paddle/gserver/layers/ExpandConvLayer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index 698c37fb3f..a0873de192 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include -#include "ExpandConvBaseLayer.h" +#include "ConvBaseLayer.h" #include "paddle/math/Matrix.h" namespace paddle { From d6a0280eb99565b39cf6ba079676f465faccd3b6 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Fri, 15 Sep 2017 18:21:42 +0800 Subject: [PATCH 108/115] Enhance unit testing framework for operator with LoDTensor. --- paddle/operators/sequence_avg_pool_op.cc | 4 +++- paddle/operators/sequence_avg_pool_op.h | 13 ++++++++----- python/paddle/v2/framework/tests/op_test.py | 15 +++++++++++---- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/paddle/operators/sequence_avg_pool_op.cc b/paddle/operators/sequence_avg_pool_op.cc index c15a5833de..00b5a6c697 100644 --- a/paddle/operators/sequence_avg_pool_op.cc +++ b/paddle/operators/sequence_avg_pool_op.cc @@ -60,7 +60,9 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext& ctx) const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), - "Gradient of Out should not be null"); + "Gradient of Out should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "The input X should not be null."); auto og_dims = ctx.Input(framework::GradVarName("Out"))->dims(); auto x_dims = ctx.Input("X")->dims(); diff --git a/paddle/operators/sequence_avg_pool_op.h b/paddle/operators/sequence_avg_pool_op.h index 6e343b87e2..ebe0956344 100644 --- a/paddle/operators/sequence_avg_pool_op.h +++ b/paddle/operators/sequence_avg_pool_op.h @@ -21,6 +21,9 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; +template +using EigenVector = framework::EigenVector; template using EigenMatrix = framework::EigenMatrix; @@ -43,8 +46,8 @@ class SequenceAvgPoolKernel : public framework::OpKernel { static_cast(lod[0][i + 1])); Tensor out_t = out->Slice(i, i + 1); int64_t h = static_cast(lod[0][i + 1] - lod[0][i]); - auto in_e = EigenMatrix::From(in_t, {h, w}); - auto out_e = EigenMatrix::From(out_t, {h, w}); + auto in_e = EigenMatrix::From(in_t, framework::make_ddim({h, w})); + auto out_e = EigenVector::Flatten(out_t); out_e.device(place) = in_e.mean(Eigen::array({{0}})); } } @@ -54,9 +57,9 @@ template class SequenceAvgPoolGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* in = context.Output("X"); - auto* in_g = context.Output(framework::GradVarName("X")); + auto* in = context.Input("X"); auto* out_g = context.Input(framework::GradVarName("Out")); + auto* in_g = context.Output(framework::GradVarName("X")); auto dims = in->dims(); auto lod = in->lod(); @@ -71,7 +74,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel { int64_t h = static_cast(lod[0][i + 1] - lod[0][i]); auto in_g_e = EigenMatrix::From(in_g_t, {h, w}); auto out_g_e = EigenMatrix::From(out_g_t, {1, w}); - Eigen::DSizes bcast(h, w); + Eigen::DSizes bcast(h, 1); in_g_e.device(place) = (out_g_e / static_cast(h)).broadcast(bcast); } } diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 9936fd76ba..a0533efacd 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -47,17 +47,24 @@ def set_input(scope, op, inputs, place): if in_name in inputs: if in_dup: sub_in = inputs[in_name] - for sub_in_name, sub_in_array in sub_in: + for sub_in_name, sub_in_val in sub_in: var = scope.find_var(sub_in_name) tensor = var.get_tensor() + sub_in_array = sub_in_val[0] \ + if isinstance(sub_in_val, tuple) else sub_in_val tensor.set_dims(sub_in_array.shape) tensor.set(sub_in_array, place) + if isinstance(sub_in_val, tuple): + tensor.set_lod(sub_in_val[1]) else: var = scope.find_var(in_name) tensor = var.get_tensor() - arr = inputs[in_name] - tensor.set_dims(arr.shape) - tensor.set(arr, place) + in_val = inputs[in_name] + in_array = in_val[0] if isinstance(in_val, tuple) else in_val + tensor.set_dims(in_array.shape) + tensor.set(in_array, place) + if isinstance(in_val, tuple): + tensor.set_lod(in_val[1]) def set_output_grad(scope, op, outputs, place): From 05680d0d5a88adaae83d144558eeee5af1ce0c02 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Fri, 15 Sep 2017 18:31:40 +0800 Subject: [PATCH 109/115] Add unit testing for sequence average pool operator. --- .../v2/framework/tests/test_seq_pool.py | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 python/paddle/v2/framework/tests/test_seq_pool.py diff --git a/python/paddle/v2/framework/tests/test_seq_pool.py b/python/paddle/v2/framework/tests/test_seq_pool.py new file mode 100644 index 0000000000..cf864936af --- /dev/null +++ b/python/paddle/v2/framework/tests/test_seq_pool.py @@ -0,0 +1,51 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestSeqAvgPool1D(OpTest): + def setUp(self): + self.op_type = 'sequence_avg_pool' + # one level, batch size is 4 + x = np.random.uniform(0.1, 1, [11, 23]).astype('float32') + lod = [[0, 4, 5, 8, 11]] + + out = np.zeros((4, 23)).astype('float32') + for i in range(4): + sub_x = x[lod[0][i]:lod[0][i + 1], :] + out[i] = sub_x.mean(axis=0) + + self.inputs = {'X': (x, lod)} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +class TestSeqAvgPool2D(OpTest): + def setUp(self): + self.op_type = 'sequence_avg_pool' + # one level, batch size is 4 + x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32') + lod = [[0, 4, 5, 8, 13]] + + out = np.zeros((4, 3, 17)).astype('float32') + for i in range(4): + sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) + out[i] = np.reshape(sub_x.mean(axis=0), (3, 17)) + + self.inputs = {'X': (x, lod)} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + +if __name__ == '__main__': + unittest.main() From b1bca06667c1e7a05c6db5ad13799c5380225091 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 15 Sep 2017 19:10:24 +0800 Subject: [PATCH 110/115] Refine the ExpandConvLayer. --- paddle/gserver/layers/ExpandConvLayer.cpp | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index abc36aaef7..48dfcb49a4 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -53,14 +53,15 @@ bool ExpandConvLayer::init(const LayerMap &layerMap, weights_.emplace_back(w); index++; } + if (biasParameter_.get()) { if (sharedBiases_) { CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); - biases_ = - std::unique_ptr(new Weight(numFilters_, 1, biasParameter_)); + biases_ = std::unique_ptr( + new Weight(1, numFilters_, biasParameter_, 0)); } else { biases_ = - std::unique_ptr(new Weight(getSize(), 1, biasParameter_)); + std::unique_ptr(new Weight(1, getSize(), biasParameter_, 0)); } } @@ -189,12 +190,7 @@ void ExpandConvLayer::forward(PassType passType) { /* add the bias-vector */ if (biases_.get()) { - MatrixPtr bias = Matrix::create(biases_->getW()->getData(), - 1, - biases_->getW()->getElementCnt(), - false, - useGpu_); - output_.value->addBias(*bias, 1.0, sharedBiases_); + output_.value->addBias(*biases_->getW(), 1.0, sharedBiases_); } /* activation */ @@ -206,13 +202,7 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) { MatrixPtr outGrad = getOutputGrad(); if (biases_ && biases_->getWGrad()) { - // bpropBiases(outGrad); - MatrixPtr bias = Matrix::create(biases_->getWGrad()->getData(), - 1, - biases_->getWGrad()->getElementCnt(), - false, - useGpu_); - bias->collectBias(*getOutputGrad(), 1, sharedBiases_); + biases_->getWGrad()->collectBias(*getOutputGrad(), 1, sharedBiases_); /* Increasing the number of gradient */ biases_->getParameterPtr()->incUpdate(callback); } From 17e1aa8d436bcd57fd707e518bc05648f64982f0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 15 Sep 2017 18:54:00 -0700 Subject: [PATCH 111/115] Do not invoke GPU method when use_gpu=false --- .../gserver/gradientmachines/RecurrentGradientMachine.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index 9f29b97466..b71431b907 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include +#include #include "NeuralNetwork.h" #include "paddle/gserver/layers/AgentLayer.h" #include "paddle/utils/Flags.h" @@ -429,7 +430,11 @@ void RecurrentGradientMachine::reorganizeInput(PassType passType) { } { - AsyncGpuBlock asyncGpuBlock; + std::unique_ptr asyncBlock; + + if (useGpu_) { + asyncBlock.reset(new AsyncGpuBlock()); + } // inFrameLine select rows in real layer one time for (size_t i = 0; i < inFrameLines_.size(); i++) { From fc3b129b08795aecc2697ea4304855b44a5bb207 Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Sun, 17 Sep 2017 11:14:15 +0800 Subject: [PATCH 112/115] delete the unused comments --- python/paddle/trainer_config_helpers/networks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index 6a9bfbd5bc..93e8ac173e 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + from activations import LinearActivation, ReluActivation, SoftmaxActivation, \ IdentityActivation, TanhActivation, SequenceSoftmaxActivation from attrs import ExtraAttr From b012013a5887d7d7b387e1369de94460039292a3 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 16 Sep 2017 20:24:35 -0700 Subject: [PATCH 113/115] Revert "Do not invoke GPU method when use_gpu=false" --- .../gserver/gradientmachines/RecurrentGradientMachine.cpp | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index b71431b907..9f29b97466 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -18,7 +18,6 @@ limitations under the License. */ #include #include #include -#include #include "NeuralNetwork.h" #include "paddle/gserver/layers/AgentLayer.h" #include "paddle/utils/Flags.h" @@ -430,11 +429,7 @@ void RecurrentGradientMachine::reorganizeInput(PassType passType) { } { - std::unique_ptr asyncBlock; - - if (useGpu_) { - asyncBlock.reset(new AsyncGpuBlock()); - } + AsyncGpuBlock asyncGpuBlock; // inFrameLine select rows in real layer one time for (size_t i = 0; i < inFrameLines_.size(); i++) { From 59c48f9831e4a189ea042cd1ebb9be39e98b6d03 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Sun, 17 Sep 2017 02:09:54 -0400 Subject: [PATCH 114/115] block design (#3708) * add block --- doc/design/block.md | 338 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 338 insertions(+) create mode 100644 doc/design/block.md diff --git a/doc/design/block.md b/doc/design/block.md new file mode 100644 index 0000000000..be88001220 --- /dev/null +++ b/doc/design/block.md @@ -0,0 +1,338 @@ +# Design Doc: Block and Scope + +## The Representation of Computation + +Both deep learning systems and programming languages help users describe computation procedures. These systems use various representations of computation: + +- Caffe, Torch, and Paddle: sequences of layers. +- TensorFlow, Caffe2, Mxnet: graphs of operators. +- PaddlePaddle: nested blocks, like C++ and Java programs. + +## Block in Programming Languages and Deep Learning + +In programming languages, a block is a pair of curly braces that includes local variables definitions and a sequence of instructions, or operators. + +Blocks work with control flow structures like `if`, `else`, and `for`, which have equivalents in deep learning: + +| programming languages | PaddlePaddle | +|-----------------------|-----------------------| +| for, while loop | RNN, WhileOp | +| if, if-else, switch | IfElseOp, SwitchOp | +| sequential execution | a sequence of layers | + +A key difference is that a C++ program describes a one pass computation, whereas a deep learning program describes both the forward and backward passes. + +## Stack Frames and the Scope Hierarchy + +The existence of the backward makes the execution of a block of traditional programs and PaddlePaddle different to each other: + +| programming languages | PaddlePaddle | +|-----------------------|-------------------------------| +| stack | scope hierarchy | +| stack frame | scope | +| push at entering block| push at entering block | +| pop at leaving block | destroy at minibatch completes| + +1. In traditional programs: + + - When the execution enters the left curly brace of a block, the runtime pushes a frame into the stack, where it realizes local variables. + - After the execution leaves the right curly brace, the runtime pops the frame. + - The maximum number of frames in the stack is the maximum depth of nested blocks. + +1. In PaddlePaddle + + - When the execution enters a block, PaddlePaddle adds a new scope, where it realizes variables. + - PaddlePaddle doesn't pop a scope after the execution of the block because variables therein are to be used by the backward pass. So it has a stack forest known as a *scope hierarchy*. + - The height of the highest tree is the maximum depth of nested blocks. + - After the process of a minibatch, PaddlePaddle destroys the scope hierarchy. + +## Use Blocks in C++ and PaddlePaddle Programs + +Let us consolidate the discussion by presenting some examples. + +### Blocks with `if-else` and `IfElseOp` + +The following C++ programs shows how blocks are used with the `if-else` structure: + +```c++ +int x = 10; +int y = 20; +int out; +bool cond = false; +if (cond) { + int z = x + y; + out = softmax(z); +} else { + int z = fc(x); + out = z; +} +``` + +An equivalent PaddlePaddle program from the design doc of the [IfElseOp operator](./if_else_op.md) is as follows: + +```python +import paddle as pd + +x = var(10) +y = var(20) +cond = var(false) +ie = pd.create_ifelseop(inputs=[x], output_num=1) +with ie.true_block(): + x = ie.inputs(true, 0) + z = operator.add(x, y) + ie.set_output(true, 0, operator.softmax(z)) +with ie.false_block(): + x = ie.inputs(false, 0) + z = layer.fc(x) + ie.set_output(true, 0, operator.softmax(z)) +out = b(cond) +``` + +In both examples, the left branch computes `softmax(x+y)` and the right branch computes `fc(x)`. + +A difference is that variables in the C++ program contain scalar values, whereas those in the PaddlePaddle programs are mini-batches of instances. The `ie.input(true, 0)` invocation returns instances in the 0-th input, `x`, that corresponds to true values in `cond` as the local variable `x`, where `ie.input(false, 0)` returns instances corresponding to false values. + +### Blocks with `for` and `RNNOp` + +The following RNN model from the [RNN design doc](./rnn.md) + +```python +x = sequence([10, 20, 30]) +m = var(0) +W = tensor() +U = tensor() + +rnn = create_rnn(inputs=[input]) +with rnn.stepnet() as net: + x = net.set_inputs(0) + h = net.add_memory(init=m) + fc_out = pd.matmul(W, x) + hidden_out = pd.matmul(U, h.pre(n=1)) + sum = pd.add_two(fc_out, hidden_out) + act = pd.sigmoid(sum) + h.update(act) # update memory with act + net.set_outputs(0, act, hidden_out) # two outputs + +o1, o2 = rnn() +print o1, o2 +``` + +has its equivalent C++ program as follows + +```c++ +int* x = {10, 20, 30}; +int m = 0; +int W = some_value(); +int U = some_other_value(); + +int mem[sizeof(x) / sizeof(x[0]) + 1]; +int o1[sizeof(x) / sizeof(x[0]) + 1]; +int o2[sizeof(x) / sizeof(x[0]) + 1]; +for (int i = 1; i <= sizeof(x)/sizeof(x[0]); ++i) { + int x = x[i-1]; + if (i == 1) mem[0] = m; + int fc_out = W * x; + int hidden_out = Y * mem[i-1]; + int sum = fc_out + hidden_out; + int act = sigmoid(sum); + mem[i] = act; + o1[i] = act; + o2[i] = hidden_out; +} + +print_array(o1); +print_array(o2); +``` + + +## Compilation and Execution + +Like TensorFlow programs, a PaddlePaddle program is written in Python. The first part describes a neural network as a protobuf message, and the rest part executes the message for training or inference. + +The generation of this protobuf message is like what a compiler generates a binary executable file. The execution of the message that the OS executes the binary file. + +## The "Binary Executable File Format" + +The definition of the protobuf message is as follows: + +```protobuf +message BlockDesc { + repeated VarDesc vars = 1; + repeated OpDesc ops = 2; +} +``` + +The step net in above RNN example would look like + +``` +BlockDesc { + vars = { + VarDesc {...} // x + VarDesc {...} // h + VarDesc {...} // fc_out + VarDesc {...} // hidden_out + VarDesc {...} // sum + VarDesc {...} // act + } + ops = { + OpDesc {...} // matmul + OpDesc {...} // add_two + OpDesc {...} // sigmoid + } +}; +``` + +Also, the RNN operator in above example is serialized into a protobuf message of type `OpDesc` and would look like: + +``` +OpDesc { + inputs = {0} // the index of x + outputs = {5, 3} // indices of act and hidden_out + attrs { + "memories" : {1} // the index of h + "step_net" : + } +}; +``` + +This `OpDesc` value is in the `ops` field of the `BlockDesc` value representing the global block. + + +## The Compilation of Blocks + +During the generation of the Protobuf message, the Block should store VarDesc (the Protobuf message which describes Variable) and OpDesc (the Protobuf message which describes Operator). + +VarDesc in a block should have its name scope to avoid local variables affect parent block's name scope. +Child block's name scopes should inherit the parent's so that OpDesc in child block can reference a VarDesc that stored in parent block. For example + +```python +a = pd.Varaible(shape=[20, 20]) +b = pd.fc(a, params=["fc.w", "fc.b"]) + +rnn = pd.create_rnn() +with rnn.stepnet() as net: + x = net.set_inputs(a) + # reuse fc's parameter + fc_without_b = pd.get_variable("fc.w") + net.set_outputs(fc_without_b) + +out = rnn() +``` +the method `pd.get_variable` can help retrieve a Variable by a name, a Variable may store in a parent block, but might be retrieved in a child block, so block should have a variable scope that supports inheritance. + +In compiler design, the symbol table is a data structure created and maintained by compilers to store information about the occurrence of various entities such as variable names, function names, classes, etc. + +To store the definition of variables and operators, we define a C++ class `SymbolTable`, like the one used in compilers. + +`SymbolTable` can do the following stuff: + +- store the definitions (some names and attributes) of variables and operators, +- to verify if a variable was declared, +- to make it possible to implement type checking (offer Protobuf message pointers to `InferShape` handlers). + + +```c++ +// Information in SymbolTable is enough to trace the dependency graph. So maybe +// the Eval() interface takes a SymbolTable is enough. +class SymbolTable { + public: + SymbolTable(SymbolTable* parent) : parent_(parent) {} + + OpDesc* NewOp(const string& name=""); + + // TODO determine whether name is generated by python or C++ + // currently assume that a unique name will be generated by C++ if the + // argument name left default. + VarDesc* NewVar(const string& name=""); + + // find a VarDesc by name, if recursive true, find parent's SymbolTable + // recursively. + // this interface is introduced to support InferShape, find protobuf messages + // of variables and operators, pass pointers into InferShape. + // operator + // + // NOTE maybe some C++ classes such as VarDescBuilder and OpDescBuilder should + // be proposed and embedded into pybind to enable python operate on C++ pointers. + VarDesc* FindVar(const string& name, bool recursive=true); + + OpDesc* FindOp(const string& name); + + BlockDesc Compile() const; + + private: + SymbolTable* parent_; + + map ops_; + map vars_; +}; +``` + +After all the description of variables and operators is added into SymbolTable, +the block has enough information to run. + +The `Block` class takes a `BlockDesc` as input, and provide `Run` and `InferShape` functions. + + +```c++ +namespace { + +class Block : OperatorBase { +public: + Block(const BlockDesc& desc) desc_(desc) {} + + void InferShape(const framework::Scope& scope) const override { + if (!symbols_ready_) { + CreateVariables(scope); + CreateOperators(); + } + // should run InferShape first. + for (auto& op : runtime_table_.ops()) { + op->InferShape(scope); + } + } + + void Run(const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) const override { + PADDLE_ENFORCE(symbols_ready_, "operators and variables should be created first."); + for (auto& op : runtime_table_.ops()) { + op->Run(scope, dev_ctx); + } + } + + void CreateVariables(const framework::Scope& scope); + void CreateOperators(); + + // some other necessary interfaces of NetOp are list below + // ... + +private: + BlockDesc desc_; + bool symbols_ready_{false}; +}; +``` + +## The Execution of Blocks + +Block inherits from OperatorBase, which has a Run method. +Block's Run method will run its operators sequentially. + +There is another important interface called `Eval`, which take some arguments called targets, and generate a minimal graph which takes targets as the end points and creates a new Block, +after `Run`, `Eval` will get the latest value and return the targets. + +The definition of Eval is as follows: + +```c++ +// clean a block description by targets using the corresponding dependency graph. +// return a new BlockDesc with minimal number of operators. +// NOTE not return a Block but the block's description so that this can be distributed +// to a cluster. +BlockDesc Prune(const BlockDesc& desc, vector targets); + +void Block::Eval(const vector& targets, + const framework::Scope& scope, + const platform::DeviceContext& dev_ctx) { + BlockDesc min_desc = Prune(desc_, targets); + Block min_block(min_desc); + min_block.Run(scope, dev_ctx); +} +``` From 8580dce308118c5903224c9ac40213c9105ad179 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Mon, 18 Sep 2017 15:49:34 +0800 Subject: [PATCH 115/115] Refine accuracy_op CUDA kernel (#4097) * refind accuracy_op * follow comments * follow comments --- paddle/operators/accuracy_op.cu | 38 ++++++++++++------- paddle/platform/cuda_helper.h | 5 +++ .../v2/framework/tests/test_accuracy_op.py | 9 +++-- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/paddle/operators/accuracy_op.cu b/paddle/operators/accuracy_op.cu index 4e6d1ef965..0a6a0fd15c 100644 --- a/paddle/operators/accuracy_op.cu +++ b/paddle/operators/accuracy_op.cu @@ -12,26 +12,38 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/operators/accuracy_op.h" +#include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { +using platform::PADDLE_CUDA_NUM_THREADS; -__global__ void AccuracySingleKernel(const int N, const int D, const int top_k, - const int* Xdata, const int* labelData, - float* accuracy) { - int correct = 0; - for (int row = 0; row < N; row++) { - const int label = labelData[row]; - for (int col = 0; col < D; col++) { - const int pred = Xdata[row * D + col]; - if (pred == label) { - ++correct; +template +__global__ void AccuracyCudaKernel(const int N, const int D, const int* Xdata, + const int* labeldata, float* accuracy) { + int count = 0; + __shared__ int total[BlockSize]; + + // support only 1 block + for (int i = threadIdx.x; i < (N); i += BlockSize) { + for (int j = 0; j < D; ++j) { + if (Xdata[i * D + j] == labeldata[i]) { + ++count; break; } } } - *accuracy = static_cast(correct) / static_cast(N); + total[threadIdx.x] = count; + __syncthreads(); + + // reduce the count with init value 0, and output accuracy. + int result = thrust::reduce(thrust::device, total, total + BlockSize, 0); + if (threadIdx.x == 0) { + *accuracy = static_cast(result) / static_cast(N); + } } template @@ -57,8 +69,8 @@ class AccuracyOpCUDAKernel : public framework::OpKernel { return; } - AccuracySingleKernel<<<1, 1>>>(num_samples, infer_width, 1, inference_data, - label_data, accuracy_data); + AccuracyCudaKernel<<<1, PADDLE_CUDA_NUM_THREADS>>>( + num_samples, infer_width, inference_data, label_data, accuracy_data); } }; diff --git a/paddle/platform/cuda_helper.h b/paddle/platform/cuda_helper.h index 6feec0d7f8..a7d99cde10 100644 --- a/paddle/platform/cuda_helper.h +++ b/paddle/platform/cuda_helper.h @@ -24,6 +24,11 @@ namespace platform { #define USE_CUDA_ATOMIC(op, T) \ CUDA_ATOMIC_WRAPPER(op, T) { return atomic##op(address, val); } +// Default thread count per block(or block size). +// TODO(typhoonzero): need to benchmark against setting this value +// to 1024. +constexpr int PADDLE_CUDA_NUM_THREADS = 512; + // For atomicAdd. USE_CUDA_ATOMIC(Add, float); diff --git a/python/paddle/v2/framework/tests/test_accuracy_op.py b/python/paddle/v2/framework/tests/test_accuracy_op.py index 43d60eb90d..b6f3a35d6f 100644 --- a/python/paddle/v2/framework/tests/test_accuracy_op.py +++ b/python/paddle/v2/framework/tests/test_accuracy_op.py @@ -6,16 +6,17 @@ from op_test import OpTest class TestAccuracyOp(OpTest): def setUp(self): self.op_type = "accuracy" - infer = np.random.randint(0, 2, (32, 1)).astype("int") - label = np.random.randint(0, 2, (32, )).astype("int") + n = 8192 + infer = np.random.randint(0, 2, (n, 1)).astype("int") + label = np.random.randint(0, 2, (n, )).astype("int") self.inputs = {'Inference': infer, "Label": label} num_correct = 0 - for rowid in xrange(32): + for rowid in xrange(n): for ele in infer[rowid]: if ele == label[rowid]: num_correct += 1 break - self.outputs = {'Accuracy': [num_correct / 32.0]} + self.outputs = {'Accuracy': [num_correct / float(n)]} def test_check_output(self): self.check_output()